mirror of https://github.com/citusdata/citus.git
Compare commits
46 Commits
Author | SHA1 | Date |
---|---|---|
|
6663635593 | |
|
54978b738d | |
|
29ba5a6251 | |
|
7fa26cd095 | |
|
d8a8e530fb | |
|
629ba8978f | |
|
3c84828931 | |
|
cb21065749 | |
|
cca5a76090 | |
|
ed3f298eb3 | |
|
5a1a1334d3 | |
|
5f04346408 | |
|
39348e2c3b | |
|
d695deae16 | |
|
e19089503e | |
|
81702af8d7 | |
|
6899e66f80 | |
|
f5b297e149 | |
|
b66abbcba8 | |
|
88f2b8a60d | |
|
de39835da2 | |
|
3f2ac78cf6 | |
|
757446bc61 | |
|
0a48c0aec7 | |
|
d18757b0cd | |
|
a71e0b5c84 | |
|
143a3f2b28 | |
|
425ca713ff | |
|
ef2c6d51b2 | |
|
bf3c0d7efd | |
|
dee3c95992 | |
|
6fe7c32d9f | |
|
819ac372e0 | |
|
877369fd36 | |
|
2813063059 | |
|
8e5b9a06ad | |
|
aed6776d1f | |
|
a9dced4291 | |
|
34744501ce | |
|
86b57f426b | |
|
9801f743ce | |
|
8e3246a2f3 | |
|
1feb7102b8 | |
|
9cde3d4122 | |
|
7da6d68675 | |
|
a913b90ff3 |
|
@ -158,6 +158,14 @@ jobs:
|
|||
cp core.* /tmp/core_dumps
|
||||
fi
|
||||
when: on_fail
|
||||
- run:
|
||||
name: 'Copy pg_upgrade logs for newData dir'
|
||||
command: |
|
||||
mkdir -p /tmp/pg_upgrade_newData_logs
|
||||
if ls src/test/regress/tmp_upgrade/newData/*.log 1> /dev/null 2>&1; then
|
||||
cp src/test/regress/tmp_upgrade/newData/*.log /tmp/pg_upgrade_newData_logs
|
||||
fi
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
name: 'Save regressions'
|
||||
path: src/test/regress/regression.diffs
|
||||
|
@ -166,6 +174,9 @@ jobs:
|
|||
name: 'Save core dumps'
|
||||
path: /tmp/core_dumps
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
name: 'Save pg_upgrade logs for newData dir'
|
||||
path: /tmp/pg_upgrade_newData_logs
|
||||
- codecov/upload:
|
||||
flags: 'test_<< parameters.old_pg_major >>_<< parameters.new_pg_major >>,upgrade'
|
||||
|
||||
|
@ -451,7 +462,7 @@ workflows:
|
|||
- build:
|
||||
name: build-14
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
|
||||
- check-style
|
||||
- check-sql-snapshots
|
||||
|
@ -607,74 +618,74 @@ workflows:
|
|||
- test-citus:
|
||||
name: 'test-14_check-multi'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-multi
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-multi-1'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-multi-1
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-mx'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-multi-mx
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-vanilla'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-vanilla
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-isolation'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-isolation
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-worker'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-worker
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-operations'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-operations
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-follower-cluster'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-follower-cluster
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-columnar'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-columnar
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-columnar-isolation'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-columnar-isolation
|
||||
requires: [build-14]
|
||||
- tap-test-citus:
|
||||
name: 'test_14_tap-recovery'
|
||||
pg_major: 14
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
suite: recovery
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-failure'
|
||||
pg_major: 14
|
||||
image: citus/failtester
|
||||
image_tag: '14beta3'
|
||||
image_tag: '14.0'
|
||||
make: check-failure
|
||||
requires: [build-14]
|
||||
|
||||
|
@ -689,14 +700,14 @@ workflows:
|
|||
name: 'test-12-14_check-pg-upgrade'
|
||||
old_pg_major: 12
|
||||
new_pg_major: 14
|
||||
image_tag: '12-13-14'
|
||||
image_tag: '12.8-13.4-14.0'
|
||||
requires: [build-12,build-14]
|
||||
|
||||
- test-pg-upgrade:
|
||||
name: 'test-13-14_check-pg-upgrade'
|
||||
old_pg_major: 13
|
||||
new_pg_major: 14
|
||||
image_tag: '12-13-14'
|
||||
image_tag: '12.8-13.4-14.0'
|
||||
requires: [build-13,build-14]
|
||||
|
||||
- test-citus-upgrade:
|
||||
|
|
81
CHANGELOG.md
81
CHANGELOG.md
|
@ -1,3 +1,84 @@
|
|||
### citus v10.2.5 (March 15, 2022) ###
|
||||
|
||||
* Fixes a bug that could cause `worker_save_query_explain_analyze` to fail on
|
||||
custom types
|
||||
|
||||
* Fixes a bug that limits usage of sequences in non-integer columns
|
||||
|
||||
* Fixes a crash that occurs when the aggregate that cannot be pushed-down
|
||||
returns empty result from a worker
|
||||
|
||||
* Improves concurrent metadata syncing and metadata changing DDL operations
|
||||
|
||||
### citus v10.2.4 (February 1, 2022) ###
|
||||
|
||||
* Adds support for operator class parameters in indexes
|
||||
|
||||
* Fixes a bug with distributed functions that have `OUT` parameters or
|
||||
return `TABLE`
|
||||
|
||||
* Fixes a build error that happens when `lz4` is not installed
|
||||
|
||||
* Improves self-deadlock prevention for `CREATE INDEX` &
|
||||
`REINDEX CONCURRENTLY` commands for builds using PG14 or higher
|
||||
|
||||
* Fixes a bug that causes commands to fail when `application_name` is set
|
||||
|
||||
### citus v10.2.3 (November 29, 2021) ###
|
||||
|
||||
* Adds `fix_partition_shard_index_names` udf to fix currently broken
|
||||
partition index names
|
||||
|
||||
* Fixes a bug that could break `DROP SCHEMA/EXTENSION` commands when there is
|
||||
a columnar table
|
||||
|
||||
* Fixes a bug that could break pg upgrades due to missing `pg_depend` records
|
||||
for columnar table access method
|
||||
|
||||
* Fixes a missing `FROM` clause entry error
|
||||
|
||||
* Fixes an unexpected error that occurs when writing to a columnar table
|
||||
created in older versions
|
||||
|
||||
* Fixes issue when compiling Citus from source with some compilers
|
||||
|
||||
* Reinstates optimisation for uniform shard interval ranges
|
||||
|
||||
* Relaxes table ownership check to privileges check while acquiring lock
|
||||
|
||||
### citus v10.2.2 (October 14, 2021) ###
|
||||
|
||||
* Fixes a bug that causes reading columnar metapage as all-zeros when
|
||||
writing to a columnar table
|
||||
|
||||
* Fixes a bug that could cause prerequisite columnar table access method
|
||||
objects being not created during pg upgrades
|
||||
|
||||
* Fixes a bug that could cause `CREATE INDEX` to fail for expressions when
|
||||
using custom `search_path`
|
||||
|
||||
* Fixes an unexpected error that occurs due to aborted writes to a columnar
|
||||
table with an index
|
||||
|
||||
### citus v10.2.1 (September 24, 2021) ###
|
||||
|
||||
* Adds missing version-mismatch checks for columnar tables
|
||||
|
||||
* Adds missing version-mismatch checks for internal functions
|
||||
|
||||
* Fixes a bug that could cause partition shards being not co-located with
|
||||
parent shards
|
||||
|
||||
* Fixes a bug that prevents pushing down boolean expressions when using
|
||||
columnar custom scan
|
||||
|
||||
* Fixes a clog lookup failure that could occur when writing to a columnar table
|
||||
|
||||
* Fixes an issue that could cause unexpected errors when there is an
|
||||
in-progress write to a columnar table
|
||||
|
||||
* Revokes read access to `columnar.chunk` from unprivileged user
|
||||
|
||||
### citus v10.2.0 (September 14, 2021) ###
|
||||
|
||||
* Adds PostgreSQL 14 support
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 10.2devel.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 10.2.5.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||
|
@ -579,8 +579,8 @@ MAKEFLAGS=
|
|||
# Identity of this package.
|
||||
PACKAGE_NAME='Citus'
|
||||
PACKAGE_TARNAME='citus'
|
||||
PACKAGE_VERSION='10.2devel'
|
||||
PACKAGE_STRING='Citus 10.2devel'
|
||||
PACKAGE_VERSION='10.2.5'
|
||||
PACKAGE_STRING='Citus 10.2.5'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
|
@ -1260,7 +1260,7 @@ if test "$ac_init_help" = "long"; then
|
|||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures Citus 10.2devel to adapt to many kinds of systems.
|
||||
\`configure' configures Citus 10.2.5 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
|
@ -1322,7 +1322,7 @@ fi
|
|||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of Citus 10.2devel:";;
|
||||
short | recursive ) echo "Configuration of Citus 10.2.5:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
|
@ -1425,7 +1425,7 @@ fi
|
|||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
Citus configure 10.2devel
|
||||
Citus configure 10.2.5
|
||||
generated by GNU Autoconf 2.69
|
||||
|
||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||
|
@ -1908,7 +1908,7 @@ cat >config.log <<_ACEOF
|
|||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by Citus $as_me 10.2devel, which was
|
||||
It was created by Citus $as_me 10.2.5, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
$ $0 $@
|
||||
|
@ -4543,7 +4543,9 @@ if test "${with_lz4+set}" = set; then :
|
|||
withval=$with_lz4;
|
||||
case $withval in
|
||||
yes)
|
||||
:
|
||||
|
||||
$as_echo "#define HAVE_CITUS_LIBLZ4 1" >>confdefs.h
|
||||
|
||||
;;
|
||||
no)
|
||||
:
|
||||
|
@ -4556,6 +4558,8 @@ if test "${with_lz4+set}" = set; then :
|
|||
else
|
||||
with_lz4=yes
|
||||
|
||||
$as_echo "#define HAVE_CITUS_LIBLZ4 1" >>confdefs.h
|
||||
|
||||
fi
|
||||
|
||||
|
||||
|
@ -5356,7 +5360,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
|||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by Citus $as_me 10.2devel, which was
|
||||
This file was extended by Citus $as_me 10.2.5, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
|
@ -5418,7 +5422,7 @@ _ACEOF
|
|||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||
ac_cs_version="\\
|
||||
Citus config.status 10.2devel
|
||||
Citus config.status 10.2.5
|
||||
configured by $0, generated by GNU Autoconf 2.69,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
# everyone needing autoconf installed, the resulting files are checked
|
||||
# into the SCM.
|
||||
|
||||
AC_INIT([Citus], [10.2devel])
|
||||
AC_INIT([Citus], [10.2.5])
|
||||
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
||||
|
||||
# we'll need sed and awk for some of the version commands
|
||||
|
@ -220,7 +220,8 @@ AC_DEFINE_UNQUOTED(REPORTS_BASE_URL, "$REPORTS_BASE_URL",
|
|||
# LZ4
|
||||
#
|
||||
PGAC_ARG_BOOL(with, lz4, yes,
|
||||
[do not use lz4])
|
||||
[do not use lz4],
|
||||
[AC_DEFINE([HAVE_CITUS_LIBLZ4], 1, [Define to 1 to build with lz4 support. (--with-lz4)])])
|
||||
AC_SUBST(with_lz4)
|
||||
|
||||
if test "$with_lz4" = yes; then
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
|
||||
#if HAVE_LIBZSTD
|
||||
#define DEFAULT_COMPRESSION_TYPE COMPRESSION_ZSTD
|
||||
#elif HAVE_LIBLZ4
|
||||
#elif HAVE_CITUS_LIBLZ4
|
||||
#define DEFAULT_COMPRESSION_TYPE COMPRESSION_LZ4
|
||||
#else
|
||||
#define DEFAULT_COMPRESSION_TYPE COMPRESSION_PG_LZ
|
||||
|
@ -44,7 +44,7 @@ static const struct config_enum_entry columnar_compression_options[] =
|
|||
{
|
||||
{ "none", COMPRESSION_NONE, false },
|
||||
{ "pglz", COMPRESSION_PG_LZ, false },
|
||||
#if HAVE_LIBLZ4
|
||||
#if HAVE_CITUS_LIBLZ4
|
||||
{ "lz4", COMPRESSION_LZ4, false },
|
||||
#endif
|
||||
#if HAVE_LIBZSTD
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
#include "columnar/columnar_compression.h"
|
||||
|
||||
#if HAVE_LIBLZ4
|
||||
#if HAVE_CITUS_LIBLZ4
|
||||
#include <lz4.h>
|
||||
#endif
|
||||
|
||||
|
@ -63,7 +63,7 @@ CompressBuffer(StringInfo inputBuffer,
|
|||
{
|
||||
switch (compressionType)
|
||||
{
|
||||
#if HAVE_LIBLZ4
|
||||
#if HAVE_CITUS_LIBLZ4
|
||||
case COMPRESSION_LZ4:
|
||||
{
|
||||
int maximumLength = LZ4_compressBound(inputBuffer->len);
|
||||
|
@ -170,7 +170,7 @@ DecompressBuffer(StringInfo buffer,
|
|||
return buffer;
|
||||
}
|
||||
|
||||
#if HAVE_LIBLZ4
|
||||
#if HAVE_CITUS_LIBLZ4
|
||||
case COMPRESSION_LZ4:
|
||||
{
|
||||
StringInfo decompressedBuffer = makeStringInfo();
|
||||
|
|
|
@ -605,10 +605,11 @@ RelationIdGetNumberOfAttributes(Oid relationId)
|
|||
/*
|
||||
* CheckVarStats() checks whether a qual involving this Var is likely to be
|
||||
* useful based on the correlation stats. If so, or if stats are unavailable,
|
||||
* return true; otherwise return false.
|
||||
* return true; otherwise return false and sets absVarCorrelation in case
|
||||
* caller wants to use for logging purposes.
|
||||
*/
|
||||
static bool
|
||||
CheckVarStats(PlannerInfo *root, Var *var, Oid sortop)
|
||||
CheckVarStats(PlannerInfo *root, Var *var, Oid sortop, float4 *absVarCorrelation)
|
||||
{
|
||||
/*
|
||||
* Collect isunique, ndistinct, and varCorrelation.
|
||||
|
@ -642,6 +643,14 @@ CheckVarStats(PlannerInfo *root, Var *var, Oid sortop)
|
|||
*/
|
||||
if (Abs(varCorrelation) < ColumnarQualPushdownCorrelationThreshold)
|
||||
{
|
||||
if (absVarCorrelation)
|
||||
{
|
||||
/*
|
||||
* Report absVarCorrelation if caller wants to know why given
|
||||
* var is rejected.
|
||||
*/
|
||||
*absVarCorrelation = Abs(varCorrelation);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -674,7 +683,7 @@ ExprReferencesRelid(Expr *expr, Index relid)
|
|||
|
||||
|
||||
/*
|
||||
* CheckPushdownClause tests to see if clause is a candidate for pushing down
|
||||
* ExtractPushdownClause extracts an Expr node from given clause for pushing down
|
||||
* into the given rel (including join clauses). This test may not be exact in
|
||||
* all cases; it's used to reduce the search space for parameterization.
|
||||
*
|
||||
|
@ -683,19 +692,134 @@ ExprReferencesRelid(Expr *expr, Index relid)
|
|||
* and that doesn't seem worth the effort. Here we just look for "Var op Expr"
|
||||
* or "Expr op Var", where Var references rel and Expr references other rels
|
||||
* (or no rels at all).
|
||||
*
|
||||
* Moreover, this function also looks into BoolExpr's to recursively extract
|
||||
* pushdownable OpExpr's of them:
|
||||
* i) AND_EXPR:
|
||||
* Take pushdownable args of AND expressions by ignoring the other args.
|
||||
* ii) OR_EXPR:
|
||||
* Ignore the whole OR expression if we cannot exract a pushdownable Expr
|
||||
* from one of its args.
|
||||
* iii) NOT_EXPR:
|
||||
* Simply ignore NOT expressions since we don't expect to see them before
|
||||
* an expression that we can pushdown, see the comment in function.
|
||||
*
|
||||
* The reasoning for those three rules could also be summarized as such;
|
||||
* for any expression that we cannot push-down, we must assume that it
|
||||
* evaluates to true.
|
||||
*
|
||||
* For example, given following WHERE clause:
|
||||
* (
|
||||
* (a > random() OR a < 30)
|
||||
* AND
|
||||
* a < 200
|
||||
* ) OR
|
||||
* (
|
||||
* a = 300
|
||||
* OR
|
||||
* a > 400
|
||||
* );
|
||||
* Even if we can pushdown (a < 30), we cannot pushdown (a > random() OR a < 30)
|
||||
* due to (a > random()). However, we can pushdown (a < 200), so we extract
|
||||
* (a < 200) from the lhs of the top level OR expression.
|
||||
*
|
||||
* For the rhs of the top level OR expression, since we can pushdown both (a = 300)
|
||||
* and (a > 400), we take this part as is.
|
||||
*
|
||||
* Finally, since both sides of the top level OR expression yielded pushdownable
|
||||
* expressions, we will pushdown the following:
|
||||
* (a < 200) OR ((a = 300) OR (a > 400))
|
||||
*/
|
||||
static bool
|
||||
CheckPushdownClause(PlannerInfo *root, RelOptInfo *rel, Expr *clause)
|
||||
static Expr *
|
||||
ExtractPushdownClause(PlannerInfo *root, RelOptInfo *rel, Node *node)
|
||||
{
|
||||
if (!IsA(clause, OpExpr) || list_length(((OpExpr *) clause)->args) != 2)
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
check_stack_depth();
|
||||
|
||||
if (node == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (IsA(node, BoolExpr))
|
||||
{
|
||||
BoolExpr *boolExpr = castNode(BoolExpr, node);
|
||||
if (boolExpr->boolop == NOT_EXPR)
|
||||
{
|
||||
/*
|
||||
* Standard planner should have already applied de-morgan rule to
|
||||
* simple NOT expressions. If we encounter with such an expression
|
||||
* here, then it can't be a pushdownable one, such as:
|
||||
* WHERE id NOT IN (SELECT id FROM something).
|
||||
*/
|
||||
ereport(ColumnarPlannerDebugLevel,
|
||||
(errmsg("columnar planner: cannot push down clause: "
|
||||
"must not contain a subplan")));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
List *pushdownableArgs = NIL;
|
||||
|
||||
Node *boolExprArg = NULL;
|
||||
foreach_ptr(boolExprArg, boolExpr->args)
|
||||
{
|
||||
Expr *pushdownableArg = ExtractPushdownClause(root, rel,
|
||||
(Node *) boolExprArg);
|
||||
if (pushdownableArg)
|
||||
{
|
||||
pushdownableArgs = lappend(pushdownableArgs, pushdownableArg);
|
||||
}
|
||||
else if (boolExpr->boolop == OR_EXPR)
|
||||
{
|
||||
ereport(ColumnarPlannerDebugLevel,
|
||||
(errmsg("columnar planner: cannot push down clause: "
|
||||
"all arguments of an OR expression must be "
|
||||
"pushdownable but one of them was not, due "
|
||||
"to the reason given above")));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* simply skip AND args that we cannot pushdown */
|
||||
}
|
||||
|
||||
int npushdownableArgs = list_length(pushdownableArgs);
|
||||
if (npushdownableArgs == 0)
|
||||
{
|
||||
ereport(ColumnarPlannerDebugLevel,
|
||||
(errmsg("columnar planner: cannot push down clause: "
|
||||
"none of the arguments were pushdownable, "
|
||||
"due to the reason(s) given above ")));
|
||||
return NULL;
|
||||
}
|
||||
else if (npushdownableArgs == 1)
|
||||
{
|
||||
return (Expr *) linitial(pushdownableArgs);
|
||||
}
|
||||
|
||||
if (boolExpr->boolop == AND_EXPR)
|
||||
{
|
||||
return make_andclause(pushdownableArgs);
|
||||
}
|
||||
else if (boolExpr->boolop == OR_EXPR)
|
||||
{
|
||||
return make_orclause(pushdownableArgs);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* already discarded NOT expr, so should not be reachable */
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!IsA(node, OpExpr) || list_length(((OpExpr *) node)->args) != 2)
|
||||
{
|
||||
ereport(ColumnarPlannerDebugLevel,
|
||||
(errmsg("columnar planner: cannot push down clause: "
|
||||
"must be binary operator expression")));
|
||||
return false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
OpExpr *opExpr = castNode(OpExpr, clause);
|
||||
OpExpr *opExpr = castNode(OpExpr, node);
|
||||
Expr *lhs = list_nth(opExpr->args, 0);
|
||||
Expr *rhs = list_nth(opExpr->args, 1);
|
||||
|
||||
|
@ -721,15 +845,15 @@ CheckPushdownClause(PlannerInfo *root, RelOptInfo *rel, Expr *clause)
|
|||
"must match 'Var <op> Expr' or 'Expr <op> Var'"),
|
||||
errhint("Var must only reference this rel, "
|
||||
"and Expr must not reference this rel")));
|
||||
return false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (varSide->varattno <= 0)
|
||||
{
|
||||
ereport(ColumnarPlannerDebugLevel,
|
||||
(errmsg("columnar planner: cannot push down clause: "
|
||||
"var is whole-row reference")));
|
||||
return false;
|
||||
"var is whole-row reference or system column")));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (contain_volatile_functions((Node *) exprSide))
|
||||
|
@ -737,7 +861,7 @@ CheckPushdownClause(PlannerInfo *root, RelOptInfo *rel, Expr *clause)
|
|||
ereport(ColumnarPlannerDebugLevel,
|
||||
(errmsg("columnar planner: cannot push down clause: "
|
||||
"expr contains volatile functions")));
|
||||
return false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* only the default opclass is used for qual pushdown. */
|
||||
|
@ -753,7 +877,7 @@ CheckPushdownClause(PlannerInfo *root, RelOptInfo *rel, Expr *clause)
|
|||
(errmsg("columnar planner: cannot push down clause: "
|
||||
"cannot find default btree opclass and opfamily for type: %s",
|
||||
format_type_be(varSide->vartype))));
|
||||
return false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!op_in_opfamily(opExpr->opno, varOpFamily))
|
||||
|
@ -762,7 +886,7 @@ CheckPushdownClause(PlannerInfo *root, RelOptInfo *rel, Expr *clause)
|
|||
(errmsg("columnar planner: cannot push down clause: "
|
||||
"operator %d not a member of opfamily %d",
|
||||
opExpr->opno, varOpFamily)));
|
||||
return false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Oid sortop = get_opfamily_member(varOpFamily, varOpcInType,
|
||||
|
@ -773,15 +897,20 @@ CheckPushdownClause(PlannerInfo *root, RelOptInfo *rel, Expr *clause)
|
|||
* Check that statistics on the Var support the utility of this
|
||||
* clause.
|
||||
*/
|
||||
if (!CheckVarStats(root, varSide, sortop))
|
||||
float4 absVarCorrelation = 0;
|
||||
if (!CheckVarStats(root, varSide, sortop, &absVarCorrelation))
|
||||
{
|
||||
ereport(ColumnarPlannerDebugLevel,
|
||||
(errmsg("columnar planner: cannot push down clause: "
|
||||
"var attribute %d is uncorrelated", varSide->varattno)));
|
||||
return false;
|
||||
"absolute correlation (%.3f) of var attribute %d is "
|
||||
"smaller than the value configured in "
|
||||
"\"columnar.qual_pushdown_correlation_threshold\" "
|
||||
"(%.3f)", absVarCorrelation, varSide->varattno,
|
||||
ColumnarQualPushdownCorrelationThreshold)));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return true;
|
||||
return (Expr *) node;
|
||||
}
|
||||
|
||||
|
||||
|
@ -806,12 +935,19 @@ FilterPushdownClauses(PlannerInfo *root, RelOptInfo *rel, List *inputClauses)
|
|||
* there's something we should do with pseudoconstants here.
|
||||
*/
|
||||
if (rinfo->pseudoconstant ||
|
||||
!bms_is_member(rel->relid, rinfo->required_relids) ||
|
||||
!CheckPushdownClause(root, rel, rinfo->clause))
|
||||
!bms_is_member(rel->relid, rinfo->required_relids))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
Expr *pushdownableExpr = ExtractPushdownClause(root, rel, (Node *) rinfo->clause);
|
||||
if (!pushdownableExpr)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
rinfo = copyObject(rinfo);
|
||||
rinfo->clause = pushdownableExpr;
|
||||
filteredClauses = lappend(filteredClauses, rinfo);
|
||||
}
|
||||
|
||||
|
|
|
@ -335,8 +335,13 @@ DeleteColumnarTableOptions(Oid regclass, bool missingOk)
|
|||
*/
|
||||
Assert(!IsBinaryUpgrade);
|
||||
|
||||
Relation columnarOptions = relation_open(ColumnarOptionsRelationId(),
|
||||
RowExclusiveLock);
|
||||
Relation columnarOptions = try_relation_open(ColumnarOptionsRelationId(),
|
||||
RowExclusiveLock);
|
||||
if (columnarOptions == NULL)
|
||||
{
|
||||
/* extension has been dropped */
|
||||
return false;
|
||||
}
|
||||
|
||||
/* find existing item to remove */
|
||||
ScanKeyData scanKey[1] = { 0 };
|
||||
|
@ -1178,8 +1183,18 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, bool *update,
|
|||
|
||||
heap_inplace_update(columnarStripes, modifiedTuple);
|
||||
|
||||
/*
|
||||
* Existing tuple now contains modifications, because we used
|
||||
* heap_inplace_update().
|
||||
*/
|
||||
HeapTuple newTuple = oldTuple;
|
||||
|
||||
/*
|
||||
* Must not pass modifiedTuple, because BuildStripeMetadata expects a real
|
||||
* heap tuple with MVCC fields.
|
||||
*/
|
||||
StripeMetadata *modifiedStripeMetadata = BuildStripeMetadata(columnarStripes,
|
||||
modifiedTuple);
|
||||
newTuple);
|
||||
|
||||
CommandCounterIncrement();
|
||||
|
||||
|
@ -1233,6 +1248,8 @@ ReadDataFileStripeList(uint64 storageId, Snapshot snapshot)
|
|||
|
||||
/*
|
||||
* BuildStripeMetadata builds a StripeMetadata object from given heap tuple.
|
||||
*
|
||||
* NB: heapTuple must be a proper heap tuple with MVCC fields.
|
||||
*/
|
||||
static StripeMetadata *
|
||||
BuildStripeMetadata(Relation columnarStripes, HeapTuple heapTuple)
|
||||
|
@ -1269,7 +1286,8 @@ BuildStripeMetadata(Relation columnarStripes, HeapTuple heapTuple)
|
|||
* subtransaction id here.
|
||||
*/
|
||||
TransactionId entryXmin = HeapTupleHeaderGetXmin(heapTuple->t_data);
|
||||
stripeMetadata->aborted = TransactionIdDidAbort(entryXmin);
|
||||
stripeMetadata->aborted = !TransactionIdIsInProgress(entryXmin) &&
|
||||
TransactionIdDidAbort(entryXmin);
|
||||
stripeMetadata->insertedByCurrentXact =
|
||||
TransactionIdIsCurrentTransactionId(entryXmin);
|
||||
|
||||
|
|
|
@ -104,6 +104,10 @@ typedef struct PhysicalAddr
|
|||
"version or run \"ALTER EXTENSION citus UPDATE\"."
|
||||
|
||||
|
||||
/* only for testing purposes */
|
||||
PG_FUNCTION_INFO_V1(test_columnar_storage_write_new_page);
|
||||
|
||||
|
||||
/*
|
||||
* Map logical offsets to a physical page and offset where the data is kept.
|
||||
*/
|
||||
|
@ -667,6 +671,7 @@ ReadFromBlock(Relation rel, BlockNumber blockno, uint32 offset, char *buf,
|
|||
uint32 len, bool force)
|
||||
{
|
||||
Buffer buffer = ReadBuffer(rel, blockno);
|
||||
LockBuffer(buffer, BUFFER_LOCK_SHARE);
|
||||
Page page = BufferGetPage(buffer);
|
||||
PageHeader phdr = (PageHeader) page;
|
||||
|
||||
|
@ -678,7 +683,7 @@ ReadFromBlock(Relation rel, BlockNumber blockno, uint32 offset, char *buf,
|
|||
}
|
||||
|
||||
memcpy_s(buf, len, page + offset, len);
|
||||
ReleaseBuffer(buffer);
|
||||
UnlockReleaseBuffer(buffer);
|
||||
}
|
||||
|
||||
|
||||
|
@ -703,13 +708,32 @@ WriteToBlock(Relation rel, BlockNumber blockno, uint32 offset, char *buf,
|
|||
PageInit(page, BLCKSZ, 0);
|
||||
}
|
||||
|
||||
if (phdr->pd_lower != offset || phdr->pd_upper - offset < len)
|
||||
if (phdr->pd_lower < offset || phdr->pd_upper - offset < len)
|
||||
{
|
||||
elog(ERROR,
|
||||
"attempt to write columnar data of length %d to offset %d of block %d of relation %d",
|
||||
len, offset, blockno, rel->rd_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* After a transaction has been rolled-back, we might be
|
||||
* over-writing the rolledback write, so phdr->pd_lower can be
|
||||
* different from addr.offset.
|
||||
*
|
||||
* We reset pd_lower to reset the rolledback write.
|
||||
*
|
||||
* Given that we always align page reservation to the next page as of
|
||||
* 10.2, having such a disk page is only possible if write operaion
|
||||
* failed in an older version of columnar, but now user attempts writing
|
||||
* to that table in version >= 10.2.
|
||||
*/
|
||||
if (phdr->pd_lower > offset)
|
||||
{
|
||||
ereport(DEBUG4, (errmsg("overwriting page %u", blockno),
|
||||
errdetail("This can happen after a roll-back.")));
|
||||
phdr->pd_lower = offset;
|
||||
}
|
||||
|
||||
START_CRIT_SECTION();
|
||||
|
||||
memcpy_s(page + phdr->pd_lower, phdr->pd_upper - phdr->pd_lower, buf, len);
|
||||
|
@ -819,3 +843,36 @@ ColumnarMetapageCheckVersion(Relation rel, ColumnarMetapage *metapage)
|
|||
errhint(OLD_METAPAGE_VERSION_HINT)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* test_columnar_storage_write_new_page is a UDF only used for testing
|
||||
* purposes. It could make more sense to define this in columnar_debug.c,
|
||||
* but the storage layer doesn't expose ColumnarMetapage to any other files,
|
||||
* so we define it here.
|
||||
*/
|
||||
Datum
|
||||
test_columnar_storage_write_new_page(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
|
||||
Relation relation = relation_open(relationId, AccessShareLock);
|
||||
|
||||
/*
|
||||
* Allocate a new page, write some data to there, and set reserved offset
|
||||
* to the start of that page. That way, for a subsequent write operation,
|
||||
* storage layer would try to overwrite the page that we allocated here.
|
||||
*/
|
||||
uint64 newPageOffset = ColumnarStorageGetReservedOffset(relation, false);
|
||||
|
||||
ColumnarStorageReserveData(relation, 100);
|
||||
ColumnarStorageWrite(relation, newPageOffset, "foo_bar", 8);
|
||||
|
||||
ColumnarMetapage metapage = ColumnarMetapageRead(relation, false);
|
||||
metapage.reservedOffset = newPageOffset;
|
||||
ColumnarOverwriteMetapage(relation, metapage);
|
||||
|
||||
relation_close(relation, AccessShareLock);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
|
|
@ -85,7 +85,6 @@ typedef struct ColumnarScanDescData
|
|||
List *scanQual;
|
||||
} ColumnarScanDescData;
|
||||
|
||||
typedef struct ColumnarScanDescData *ColumnarScanDesc;
|
||||
|
||||
/*
|
||||
* IndexFetchColumnarData is the scan state passed between index_fetch_begin,
|
||||
|
@ -173,6 +172,8 @@ columnar_beginscan(Relation relation, Snapshot snapshot,
|
|||
ParallelTableScanDesc parallel_scan,
|
||||
uint32 flags)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
int natts = relation->rd_att->natts;
|
||||
|
||||
/* attr_needed represents 0-indexed attribute numbers */
|
||||
|
@ -418,6 +419,8 @@ columnar_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
|
|||
static IndexFetchTableData *
|
||||
columnar_index_fetch_begin(Relation rel)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
Oid relfilenode = rel->rd_node.relNode;
|
||||
if (PendingWritesInUpperTransactions(relfilenode, GetCurrentSubTransactionId()))
|
||||
{
|
||||
|
@ -472,8 +475,11 @@ columnar_index_fetch_tuple(struct IndexFetchTableData *sscan,
|
|||
*call_again = false;
|
||||
|
||||
/*
|
||||
* No dead tuples are possible in columnar, set it to false if it's
|
||||
* passed to be non-NULL.
|
||||
* Initialize all_dead to false if passed to be non-NULL.
|
||||
*
|
||||
* XXX: For aborted writes, we should set all_dead to true but this would
|
||||
* require implementing columnar_index_delete_tuples for simple deletion
|
||||
* of dead tuples (TM_IndexDeleteOp.bottomup = false).
|
||||
*/
|
||||
if (all_dead)
|
||||
{
|
||||
|
@ -638,6 +644,8 @@ static bool
|
|||
columnar_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
|
||||
Snapshot snapshot)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
uint64 rowNumber = tid_to_row_number(slot->tts_tid);
|
||||
StripeMetadata *stripeMetadata = FindStripeByRowNumber(rel, rowNumber, snapshot);
|
||||
return stripeMetadata != NULL;
|
||||
|
@ -649,7 +657,47 @@ static TransactionId
|
|||
columnar_index_delete_tuples(Relation rel,
|
||||
TM_IndexDeleteOp *delstate)
|
||||
{
|
||||
elog(ERROR, "columnar_index_delete_tuples not implemented");
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
/*
|
||||
* XXX: We didn't bother implementing index_delete_tuple for neither of
|
||||
* simple deletion and bottom-up deletion cases. There is no particular
|
||||
* reason for that, just to keep things simple.
|
||||
*
|
||||
* See the rest of this function to see how we deal with
|
||||
* index_delete_tuples requests made to columnarAM.
|
||||
*/
|
||||
|
||||
if (delstate->bottomup)
|
||||
{
|
||||
/*
|
||||
* Ignore any bottom-up deletion requests.
|
||||
*
|
||||
* Currently only caller in postgres that does bottom-up deletion is
|
||||
* _bt_bottomupdel_pass, which in turn calls _bt_delitems_delete_check.
|
||||
* And this function is okay with ndeltids being set to 0 by tableAM
|
||||
* for bottom-up deletion.
|
||||
*/
|
||||
delstate->ndeltids = 0;
|
||||
return InvalidTransactionId;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* TableAM is not expected to set ndeltids to 0 for simple deletion
|
||||
* case, so here we cannot do the same trick that we do for
|
||||
* bottom-up deletion.
|
||||
* See the assertion around table_index_delete_tuples call in pg
|
||||
* function index_compute_xid_horizon_for_tuples.
|
||||
*
|
||||
* For this reason, to avoid receiving simple deletion requests for
|
||||
* columnar tables (bottomup = false), columnar_index_fetch_tuple
|
||||
* doesn't ever set all_dead to true in order to prevent triggering
|
||||
* simple deletion of index tuples. But let's throw an error to be on
|
||||
* the safe side.
|
||||
*/
|
||||
elog(ERROR, "columnar_index_delete_tuples not implemented for simple deletion");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -670,6 +718,8 @@ static void
|
|||
columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid,
|
||||
int options, BulkInsertState bistate)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
/*
|
||||
* columnar_init_write_state allocates the write state in a longer
|
||||
* lasting context, so no need to worry about it.
|
||||
|
@ -716,6 +766,8 @@ static void
|
|||
columnar_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
CommandId cid, int options, BulkInsertState bistate)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
ColumnarWriteState *writeState = columnar_init_write_state(relation,
|
||||
RelationGetDescr(relation),
|
||||
GetCurrentSubTransactionId());
|
||||
|
@ -790,6 +842,8 @@ columnar_relation_set_new_filenode(Relation rel,
|
|||
TransactionId *freezeXid,
|
||||
MultiXactId *minmulti)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
if (persistence == RELPERSISTENCE_UNLOGGED)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
|
@ -825,6 +879,8 @@ columnar_relation_set_new_filenode(Relation rel,
|
|||
static void
|
||||
columnar_relation_nontransactional_truncate(Relation rel)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
RelFileNode relfilenode = rel->rd_node;
|
||||
|
||||
NonTransactionDropWriteState(relfilenode.relNode);
|
||||
|
@ -871,6 +927,8 @@ columnar_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
|
|||
double *tups_vacuumed,
|
||||
double *tups_recently_dead)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
TupleDesc sourceDesc = RelationGetDescr(OldHeap);
|
||||
TupleDesc targetDesc = RelationGetDescr(NewHeap);
|
||||
|
||||
|
@ -967,6 +1025,15 @@ static void
|
|||
columnar_vacuum_rel(Relation rel, VacuumParams *params,
|
||||
BufferAccessStrategy bstrategy)
|
||||
{
|
||||
if (!CheckCitusVersion(WARNING))
|
||||
{
|
||||
/*
|
||||
* Skip if the extension catalogs are not up-to-date, but avoid
|
||||
* erroring during auto-vacuum.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If metapage version of relation is older, then we hint users to VACUUM
|
||||
* the relation in ColumnarMetapageCheckVersion. So if needed, upgrade
|
||||
|
@ -1276,6 +1343,8 @@ columnar_index_build_range_scan(Relation columnarRelation,
|
|||
void *callback_state,
|
||||
TableScanDesc scan)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
if (start_blockno != 0 || numblocks != InvalidBlockNumber)
|
||||
{
|
||||
/*
|
||||
|
@ -1524,6 +1593,8 @@ columnar_index_validate_scan(Relation columnarRelation,
|
|||
ValidateIndexState *
|
||||
validateIndexState)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
ColumnarReportTotalVirtualBlocks(columnarRelation, snapshot,
|
||||
PROGRESS_SCAN_BLOCKS_TOTAL);
|
||||
|
||||
|
@ -1694,6 +1765,8 @@ TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetIt
|
|||
static uint64
|
||||
columnar_relation_size(Relation rel, ForkNumber forkNumber)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
uint64 nblocks = 0;
|
||||
|
||||
/* Open it at the smgr level if not already done */
|
||||
|
@ -1719,6 +1792,8 @@ columnar_relation_size(Relation rel, ForkNumber forkNumber)
|
|||
static bool
|
||||
columnar_relation_needs_toast_table(Relation rel)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1728,6 +1803,8 @@ columnar_estimate_rel_size(Relation rel, int32 *attr_widths,
|
|||
BlockNumber *pages, double *tuples,
|
||||
double *allvisfrac)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
RelationOpenSmgr(rel);
|
||||
*pages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM);
|
||||
*tuples = ColumnarTableRowCount(rel);
|
||||
|
@ -1899,6 +1976,8 @@ ColumnarTableDropHook(Oid relid)
|
|||
|
||||
if (IsColumnarTableAmTable(relid))
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
/*
|
||||
* Drop metadata. No need to drop storage here since for
|
||||
* tableam tables storage is managed by postgres.
|
||||
|
@ -2020,6 +2099,8 @@ ColumnarProcessUtility(PlannedStmt *pstmt,
|
|||
GetCreateIndexRelationLockMode(indexStmt));
|
||||
if (rel->rd_tableam == GetColumnarTableAmRoutine())
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
if (!ColumnarSupportsIndexAM(indexStmt->accessMethod))
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
|
@ -2356,6 +2437,8 @@ PG_FUNCTION_INFO_V1(alter_columnar_table_set);
|
|||
Datum
|
||||
alter_columnar_table_set(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
|
||||
Relation rel = table_open(relationId, AccessExclusiveLock); /* ALTER TABLE LOCK */
|
||||
|
@ -2483,6 +2566,8 @@ PG_FUNCTION_INFO_V1(alter_columnar_table_reset);
|
|||
Datum
|
||||
alter_columnar_table_reset(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
|
||||
Relation rel = table_open(relationId, AccessExclusiveLock); /* ALTER TABLE LOCK */
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
-- columnar--10.2-1--10.2-2.sql
|
||||
|
||||
-- revoke read access for columnar.chunk from unprivileged
|
||||
-- user as it contains chunk min/max values
|
||||
REVOKE SELECT ON columnar.chunk FROM PUBLIC;
|
|
@ -0,0 +1,15 @@
|
|||
-- columnar--10.2-2--10.2-3.sql
|
||||
|
||||
-- Since stripe_first_row_number_idx is required to scan a columnar table, we
|
||||
-- need to make sure that it is created before doing anything with columnar
|
||||
-- tables during pg upgrades.
|
||||
--
|
||||
-- However, a plain btree index is not a dependency of a table, so pg_upgrade
|
||||
-- cannot guarantee that stripe_first_row_number_idx gets created when
|
||||
-- creating columnar.stripe, unless we make it a unique "constraint".
|
||||
--
|
||||
-- To do that, drop stripe_first_row_number_idx and create a unique
|
||||
-- constraint with the same name to keep the code change at minimum.
|
||||
DROP INDEX columnar.stripe_first_row_number_idx;
|
||||
ALTER TABLE columnar.stripe ADD CONSTRAINT stripe_first_row_number_idx
|
||||
UNIQUE (storage_id, first_row_number);
|
|
@ -0,0 +1,5 @@
|
|||
-- columnar--10.2-3--10.2-4.sql
|
||||
|
||||
#include "udfs/columnar_ensure_am_depends_catalog/10.2-4.sql"
|
||||
|
||||
SELECT citus_internal.columnar_ensure_am_depends_catalog();
|
|
@ -0,0 +1,4 @@
|
|||
-- columnar--10.2-2--10.2-1.sql
|
||||
|
||||
-- grant read access for columnar.chunk to unprivileged user
|
||||
GRANT SELECT ON columnar.chunk TO PUBLIC;
|
|
@ -0,0 +1,4 @@
|
|||
-- columnar--10.2-3--10.2-2.sql
|
||||
|
||||
ALTER TABLE columnar.stripe DROP CONSTRAINT stripe_first_row_number_idx;
|
||||
CREATE INDEX stripe_first_row_number_idx ON columnar.stripe USING BTREE(storage_id, first_row_number);
|
|
@ -0,0 +1,6 @@
|
|||
-- columnar--10.2-4--10.2-3.sql
|
||||
|
||||
DROP FUNCTION citus_internal.columnar_ensure_am_depends_catalog();
|
||||
|
||||
-- Note that we intentionally do not delete pg_depend records that we inserted
|
||||
-- via columnar--10.2-3--10.2-4.sql (by using columnar_ensure_am_depends_catalog).
|
|
@ -0,0 +1,40 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.columnar_ensure_am_depends_catalog()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $func$
|
||||
BEGIN
|
||||
INSERT INTO pg_depend
|
||||
SELECT -- Define a dependency edge from "columnar table access method" ..
|
||||
'pg_am'::regclass::oid as classid,
|
||||
(select oid from pg_am where amname = 'columnar') as objid,
|
||||
0 as objsubid,
|
||||
-- ... to each object that is registered to pg_class and that lives
|
||||
-- in "columnar" schema. That contains catalog tables, indexes
|
||||
-- created on them and the sequences created in "columnar" schema.
|
||||
--
|
||||
-- Given the possibility of user might have created their own objects
|
||||
-- in columnar schema, we explicitly specify list of objects that we
|
||||
-- are interested in.
|
||||
'pg_class'::regclass::oid as refclassid,
|
||||
columnar_schema_members.relname::regclass::oid as refobjid,
|
||||
0 as refobjsubid,
|
||||
'n' as deptype
|
||||
FROM (VALUES ('columnar.chunk'),
|
||||
('columnar.chunk_group'),
|
||||
('columnar.chunk_group_pkey'),
|
||||
('columnar.chunk_pkey'),
|
||||
('columnar.options'),
|
||||
('columnar.options_pkey'),
|
||||
('columnar.storageid_seq'),
|
||||
('columnar.stripe'),
|
||||
('columnar.stripe_first_row_number_idx'),
|
||||
('columnar.stripe_pkey')
|
||||
) columnar_schema_members(relname)
|
||||
-- Avoid inserting duplicate entries into pg_depend.
|
||||
EXCEPT TABLE pg_depend;
|
||||
END;
|
||||
$func$;
|
||||
COMMENT ON FUNCTION citus_internal.columnar_ensure_am_depends_catalog()
|
||||
IS 'internal function responsible for creating dependencies from columnar '
|
||||
'table access method to the rel objects in columnar schema';
|
|
@ -0,0 +1,40 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.columnar_ensure_am_depends_catalog()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $func$
|
||||
BEGIN
|
||||
INSERT INTO pg_depend
|
||||
SELECT -- Define a dependency edge from "columnar table access method" ..
|
||||
'pg_am'::regclass::oid as classid,
|
||||
(select oid from pg_am where amname = 'columnar') as objid,
|
||||
0 as objsubid,
|
||||
-- ... to each object that is registered to pg_class and that lives
|
||||
-- in "columnar" schema. That contains catalog tables, indexes
|
||||
-- created on them and the sequences created in "columnar" schema.
|
||||
--
|
||||
-- Given the possibility of user might have created their own objects
|
||||
-- in columnar schema, we explicitly specify list of objects that we
|
||||
-- are interested in.
|
||||
'pg_class'::regclass::oid as refclassid,
|
||||
columnar_schema_members.relname::regclass::oid as refobjid,
|
||||
0 as refobjsubid,
|
||||
'n' as deptype
|
||||
FROM (VALUES ('columnar.chunk'),
|
||||
('columnar.chunk_group'),
|
||||
('columnar.chunk_group_pkey'),
|
||||
('columnar.chunk_pkey'),
|
||||
('columnar.options'),
|
||||
('columnar.options_pkey'),
|
||||
('columnar.storageid_seq'),
|
||||
('columnar.stripe'),
|
||||
('columnar.stripe_first_row_number_idx'),
|
||||
('columnar.stripe_pkey')
|
||||
) columnar_schema_members(relname)
|
||||
-- Avoid inserting duplicate entries into pg_depend.
|
||||
EXCEPT TABLE pg_depend;
|
||||
END;
|
||||
$func$;
|
||||
COMMENT ON FUNCTION citus_internal.columnar_ensure_am_depends_catalog()
|
||||
IS 'internal function responsible for creating dependencies from columnar '
|
||||
'table access method to the rel objects in columnar schema';
|
|
@ -1,6 +1,6 @@
|
|||
# Citus extension
|
||||
comment = 'Citus distributed database'
|
||||
default_version = '10.2-1'
|
||||
default_version = '10.2-4'
|
||||
module_pathname = '$libdir/citus'
|
||||
relocatable = false
|
||||
schema = pg_catalog
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "catalog/pg_opclass.h"
|
||||
#include "catalog/pg_proc.h"
|
||||
#include "catalog/pg_trigger.h"
|
||||
#include "catalog/pg_type.h"
|
||||
#include "commands/defrem.h"
|
||||
#include "commands/extension.h"
|
||||
#include "commands/sequence.h"
|
||||
|
@ -554,11 +555,16 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio
|
|||
{
|
||||
List *partitionList = PartitionList(relationId);
|
||||
Oid partitionRelationId = InvalidOid;
|
||||
Oid namespaceId = get_rel_namespace(relationId);
|
||||
char *schemaName = get_namespace_name(namespaceId);
|
||||
char *relationName = get_rel_name(relationId);
|
||||
char *parentRelationName = quote_qualified_identifier(schemaName, relationName);
|
||||
|
||||
foreach_oid(partitionRelationId, partitionList)
|
||||
{
|
||||
CreateDistributedTable(partitionRelationId, distributionColumn,
|
||||
distributionMethod, shardCount, false,
|
||||
colocateWithTableName, viaDeprecatedAPI);
|
||||
parentRelationName, viaDeprecatedAPI);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -592,7 +598,7 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio
|
|||
* Otherwise, the condition is ensured.
|
||||
*/
|
||||
void
|
||||
EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId)
|
||||
EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId)
|
||||
{
|
||||
List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE);
|
||||
Oid citusTableId = InvalidOid;
|
||||
|
@ -617,9 +623,9 @@ EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId)
|
|||
*/
|
||||
if (currentSeqOid == seqOid)
|
||||
{
|
||||
Oid currentSeqTypId = GetAttributeTypeOid(citusTableId,
|
||||
currentAttnum);
|
||||
if (seqTypId != currentSeqTypId)
|
||||
Oid currentAttributeTypId = GetAttributeTypeOid(citusTableId,
|
||||
currentAttnum);
|
||||
if (attributeTypeId != currentAttributeTypId)
|
||||
{
|
||||
char *sequenceName = generate_qualified_relation_name(
|
||||
seqOid);
|
||||
|
@ -711,17 +717,29 @@ EnsureDistributedSequencesHaveOneType(Oid relationId, List *dependentSequenceLis
|
|||
* We should make sure that the type of the column that uses
|
||||
* that sequence is supported
|
||||
*/
|
||||
Oid seqTypId = GetAttributeTypeOid(relationId, attnum);
|
||||
EnsureSequenceTypeSupported(sequenceOid, seqTypId);
|
||||
Oid attributeTypeId = GetAttributeTypeOid(relationId, attnum);
|
||||
EnsureSequenceTypeSupported(sequenceOid, attributeTypeId);
|
||||
|
||||
/*
|
||||
* Alter the sequence's data type in the coordinator if needed.
|
||||
*
|
||||
* First, we should only change the sequence type if the column
|
||||
* is a supported sequence type. For example, if a sequence is used
|
||||
* in an expression which then becomes a text, we should not try to
|
||||
* alter the sequence type to text. Postgres only supports int2, int4
|
||||
* and int8 as the sequence type.
|
||||
*
|
||||
* A sequence's type is bigint by default and it doesn't change even if
|
||||
* it's used in an int column. We should change the type if needed,
|
||||
* and not allow future ALTER SEQUENCE ... TYPE ... commands for
|
||||
* sequences used as defaults in distributed tables
|
||||
* sequences used as defaults in distributed tables.
|
||||
*/
|
||||
AlterSequenceType(sequenceOid, seqTypId);
|
||||
if (attributeTypeId == INT2OID ||
|
||||
attributeTypeId == INT4OID ||
|
||||
attributeTypeId == INT8OID)
|
||||
{
|
||||
AlterSequenceType(sequenceOid, attributeTypeId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -149,16 +149,6 @@ PostprocessCreateExtensionStmt(Node *node, const char *queryString)
|
|||
/* extension management can only be done via coordinator node */
|
||||
EnsureCoordinator();
|
||||
|
||||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will have the extension, because they will
|
||||
* either get it now, or get it in citus_add_node after this transaction finishes and
|
||||
* the pg_dist_object record becomes visible.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
/*
|
||||
* Make sure that the current transaction is already in sequential mode,
|
||||
* or can still safely be put in sequential mode
|
||||
|
@ -262,16 +252,6 @@ PreprocessDropExtensionStmt(Node *node, const char *queryString,
|
|||
/* extension management can only be done via coordinator node */
|
||||
EnsureCoordinator();
|
||||
|
||||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will drop the extension, because they will
|
||||
* either get it now, or get it in citus_add_node after this transaction finishes and
|
||||
* the pg_dist_object record becomes visible.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
/*
|
||||
* Make sure that the current transaction is already in sequential mode,
|
||||
* or can still safely be put in sequential mode
|
||||
|
@ -398,15 +378,6 @@ PreprocessAlterExtensionSchemaStmt(Node *node, const char *queryString,
|
|||
/* extension management can only be done via coordinator node */
|
||||
EnsureCoordinator();
|
||||
|
||||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will update the extension schema after
|
||||
* this transaction finishes and the pg_dist_object record becomes visible.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
/*
|
||||
* Make sure that the current transaction is already in sequential mode,
|
||||
* or can still safely be put in sequential mode
|
||||
|
@ -466,16 +437,6 @@ PreprocessAlterExtensionUpdateStmt(Node *node, const char *queryString,
|
|||
/* extension management can only be done via coordinator node */
|
||||
EnsureCoordinator();
|
||||
|
||||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will update the extension version, because
|
||||
* they will either get it now, or get it in citus_add_node after this transaction
|
||||
* finishes and the pg_dist_object record becomes visible.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
/*
|
||||
* Make sure that the current transaction is already in sequential mode,
|
||||
* or can still safely be put in sequential mode
|
||||
|
|
|
@ -83,6 +83,7 @@ static void EnsureSequentialModeForFunctionDDL(void);
|
|||
static void TriggerSyncMetadataToPrimaryNodes(void);
|
||||
static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt);
|
||||
static bool ShouldPropagateAlterFunction(const ObjectAddress *address);
|
||||
static bool ShouldAddFunctionSignature(FunctionParameterMode mode);
|
||||
static ObjectAddress FunctionToObjectAddress(ObjectType objectType,
|
||||
ObjectWithArgs *objectWithArgs,
|
||||
bool missing_ok);
|
||||
|
@ -1298,7 +1299,11 @@ CreateFunctionStmtObjectAddress(Node *node, bool missing_ok)
|
|||
FunctionParameter *funcParam = NULL;
|
||||
foreach_ptr(funcParam, stmt->parameters)
|
||||
{
|
||||
objectWithArgs->objargs = lappend(objectWithArgs->objargs, funcParam->argType);
|
||||
if (ShouldAddFunctionSignature(funcParam->mode))
|
||||
{
|
||||
objectWithArgs->objargs = lappend(objectWithArgs->objargs,
|
||||
funcParam->argType);
|
||||
}
|
||||
}
|
||||
|
||||
return FunctionToObjectAddress(objectType, objectWithArgs, missing_ok);
|
||||
|
@ -1855,8 +1860,7 @@ ObjectWithArgsFromOid(Oid funcOid)
|
|||
|
||||
for (int i = 0; i < numargs; i++)
|
||||
{
|
||||
if (argModes == NULL ||
|
||||
argModes[i] != PROARGMODE_OUT || argModes[i] != PROARGMODE_TABLE)
|
||||
if (argModes == NULL || ShouldAddFunctionSignature(argModes[i]))
|
||||
{
|
||||
objargs = lappend(objargs, makeTypeNameFromOid(argTypes[i], -1));
|
||||
}
|
||||
|
@ -1869,6 +1873,35 @@ ObjectWithArgsFromOid(Oid funcOid)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ShouldAddFunctionSignature takes a FunctionParameterMode and returns true if it should
|
||||
* be included in the function signature. Returns false otherwise.
|
||||
*/
|
||||
static bool
|
||||
ShouldAddFunctionSignature(FunctionParameterMode mode)
|
||||
{
|
||||
/* only input parameters should be added to the generated signature */
|
||||
switch (mode)
|
||||
{
|
||||
case FUNC_PARAM_IN:
|
||||
case FUNC_PARAM_INOUT:
|
||||
case FUNC_PARAM_VARIADIC:
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
case FUNC_PARAM_OUT:
|
||||
case FUNC_PARAM_TABLE:
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FunctionToObjectAddress returns the ObjectAddress of a Function or Procedure based on
|
||||
* its type and ObjectWithArgs describing the Function/Procedure. If missing_ok is set to
|
||||
|
|
|
@ -140,13 +140,6 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString)
|
|||
|
||||
AlterRoleStmt *stmt = castNode(AlterRoleStmt, node);
|
||||
|
||||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
DefElem *option = NULL;
|
||||
foreach_ptr(option, stmt->options)
|
||||
{
|
||||
|
|
|
@ -130,16 +130,6 @@ PreprocessCompositeTypeStmt(Node *node, const char *queryString,
|
|||
*/
|
||||
EnsureCoordinator();
|
||||
|
||||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will have the object, because they will
|
||||
* either get it now, or get it in citus_add_node after this transaction finishes and
|
||||
* the pg_dist_object record becomes visible.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
/* fully qualify before lookup and later deparsing */
|
||||
QualifyTreeNode(node);
|
||||
|
||||
|
|
|
@ -33,7 +33,9 @@
|
|||
#include "access/attnum.h"
|
||||
#include "access/heapam.h"
|
||||
#include "access/htup_details.h"
|
||||
#if PG_VERSION_NUM < 140000
|
||||
#include "access/xact.h"
|
||||
#endif
|
||||
#include "catalog/catalog.h"
|
||||
#include "catalog/dependency.h"
|
||||
#include "commands/dbcommands.h"
|
||||
|
@ -51,7 +53,9 @@
|
|||
#include "distributed/local_executor.h"
|
||||
#include "distributed/maintenanced.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
#if PG_VERSION_NUM < 140000
|
||||
#include "distributed/metadata_cache.h"
|
||||
#endif
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/multi_explain.h"
|
||||
|
@ -67,6 +71,7 @@
|
|||
#include "tcop/utility.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/snapmgr.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
bool EnableDDLPropagation = true; /* ddl propagation is enabled */
|
||||
|
@ -88,6 +93,9 @@ static void ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
struct QueryEnvironment *queryEnv,
|
||||
DestReceiver *dest,
|
||||
QueryCompletionCompat *completionTag);
|
||||
#if PG_VERSION_NUM >= 140000
|
||||
static void set_indexsafe_procflags(void);
|
||||
#endif
|
||||
static char * SetSearchPathToCurrentSearchPathCommand(void);
|
||||
static char * CurrentSearchPath(void);
|
||||
static void IncrementUtilityHookCountersIfNecessary(Node *parsetree);
|
||||
|
@ -906,9 +914,35 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
|
|||
/*
|
||||
* Start a new transaction to make sure CONCURRENTLY commands
|
||||
* on localhost do not block waiting for this transaction to finish.
|
||||
*
|
||||
* In addition to doing that, we also need to tell other backends
|
||||
* --including the ones spawned for connections opened to localhost to
|
||||
* build indexes on shards of this relation-- that concurrent index
|
||||
* builds can safely ignore us.
|
||||
*
|
||||
* Normally, DefineIndex() only does that if index doesn't have any
|
||||
* predicates (i.e.: where clause) and no index expressions at all.
|
||||
* However, now that we already called standard process utility,
|
||||
* index build on the shell table is finished anyway.
|
||||
*
|
||||
* The reason behind doing so is that we cannot guarantee not
|
||||
* grabbing any snapshots via adaptive executor, and the backends
|
||||
* creating indexes on local shards (if any) might block on waiting
|
||||
* for current xact of the current backend to finish, which would
|
||||
* cause self deadlocks that are not detectable.
|
||||
*/
|
||||
if (ddlJob->startNewTransaction)
|
||||
{
|
||||
#if PG_VERSION_NUM < 140000
|
||||
|
||||
/*
|
||||
* Older versions of postgres doesn't have PROC_IN_SAFE_IC flag
|
||||
* so we cannot use set_indexsafe_procflags in those versions.
|
||||
*
|
||||
* For this reason, we do our best to ensure not grabbing any
|
||||
* snapshots later in the executor.
|
||||
*/
|
||||
|
||||
/*
|
||||
* If cache is not populated, system catalog lookups will cause
|
||||
* the xmin of current backend to change. Then the last phase
|
||||
|
@ -929,8 +963,34 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
|
|||
* will already be in the hash table, hence we won't be holding any snapshots.
|
||||
*/
|
||||
WarmUpConnParamsHash();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Since it is not certain whether the code-path that we followed
|
||||
* until reaching here caused grabbing any snapshots or not, we
|
||||
* need to pop the active snapshot if we had any, to ensure not
|
||||
* leaking any snapshots.
|
||||
*
|
||||
* For example, EnsureCoordinator might return without grabbing
|
||||
* any snapshots if we didn't receive any invalidation messages
|
||||
* but the otherwise is also possible.
|
||||
*/
|
||||
if (ActiveSnapshotSet())
|
||||
{
|
||||
PopActiveSnapshot();
|
||||
}
|
||||
|
||||
CommitTransactionCommand();
|
||||
StartTransactionCommand();
|
||||
|
||||
#if PG_VERSION_NUM >= 140000
|
||||
|
||||
/*
|
||||
* Tell other backends to ignore us, even if we grab any
|
||||
* snapshots via adaptive executor.
|
||||
*/
|
||||
set_indexsafe_procflags();
|
||||
#endif
|
||||
}
|
||||
|
||||
/* save old commit protocol to restore at xact end */
|
||||
|
@ -997,6 +1057,33 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= 140000
|
||||
|
||||
/*
|
||||
* set_indexsafe_procflags sets PROC_IN_SAFE_IC flag in MyProc->statusFlags.
|
||||
*
|
||||
* The flag is reset automatically at transaction end, so it must be set
|
||||
* for each transaction.
|
||||
*
|
||||
* Copied from pg/src/backend/commands/indexcmds.c
|
||||
* Also see pg commit c98763bf51bf610b3ee7e209fc76c3ff9a6b3163.
|
||||
*/
|
||||
static void
|
||||
set_indexsafe_procflags(void)
|
||||
{
|
||||
Assert(MyProc->xid == InvalidTransactionId &&
|
||||
MyProc->xmin == InvalidTransactionId);
|
||||
|
||||
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
|
||||
MyProc->statusFlags |= PROC_IN_SAFE_IC;
|
||||
ProcGlobal->statusFlags[MyProc->pgxactoff] = MyProc->statusFlags;
|
||||
LWLockRelease(ProcArrayLock);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* CreateCustomDDLTaskList creates a DDLJob which will apply a command to all placements
|
||||
* of shards of a distributed table. The command to be applied is generated by the
|
||||
|
@ -1260,7 +1347,8 @@ DDLTaskList(Oid relationId, const char *commandString)
|
|||
List *
|
||||
NodeDDLTaskList(TargetWorkerSet targets, List *commands)
|
||||
{
|
||||
List *workerNodes = TargetWorkerSetNodeList(targets, NoLock);
|
||||
/* don't allow concurrent node list changes that require an exclusive lock */
|
||||
List *workerNodes = TargetWorkerSetNodeList(targets, RowShareLock);
|
||||
|
||||
if (list_length(workerNodes) <= 0)
|
||||
{
|
||||
|
|
|
@ -72,8 +72,8 @@ InitConnParams()
|
|||
/*
|
||||
* ResetConnParams frees all strings in the keywords and parameters arrays,
|
||||
* sets their elements to null, and resets the ConnParamsSize to zero before
|
||||
* adding back any hardcoded global connection settings (at present, only the
|
||||
* fallback_application_name of 'citus').
|
||||
* adding back any hardcoded global connection settings (at present, there
|
||||
* are no).
|
||||
*/
|
||||
void
|
||||
ResetConnParams()
|
||||
|
@ -89,8 +89,6 @@ ResetConnParams()
|
|||
ConnParams.size = 0;
|
||||
|
||||
InvalidateConnParamsHashEntries();
|
||||
|
||||
AddConnParam("fallback_application_name", CITUS_APPLICATION_NAME);
|
||||
}
|
||||
|
||||
|
||||
|
@ -253,14 +251,16 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
|
|||
"port",
|
||||
"dbname",
|
||||
"user",
|
||||
"client_encoding"
|
||||
"client_encoding",
|
||||
"application_name"
|
||||
};
|
||||
const char *runtimeValues[] = {
|
||||
key->hostname,
|
||||
nodePortString,
|
||||
key->database,
|
||||
key->user,
|
||||
GetDatabaseEncodingName()
|
||||
GetDatabaseEncodingName(),
|
||||
CITUS_APPLICATION_NAME
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "distributed/version_compat.h"
|
||||
#include "distributed/worker_log_messages.h"
|
||||
#include "mb/pg_wchar.h"
|
||||
#include "pg_config.h"
|
||||
#include "portability/instr_time.h"
|
||||
#include "storage/ipc.h"
|
||||
#include "utils/hsearch.h"
|
||||
|
@ -1155,6 +1156,8 @@ StartConnectionEstablishment(MultiConnection *connection, ConnectionHashKey *key
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM < 140000
|
||||
|
||||
/*
|
||||
* WarmUpConnParamsHash warms up the ConnParamsHash by loading all the
|
||||
* conn params for active primary nodes.
|
||||
|
@ -1176,6 +1179,9 @@ WarmUpConnParamsHash(void)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* FindOrCreateConnParamsEntry searches ConnParamsHash for the given key,
|
||||
* if it is not found, it is created.
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/metadata_utility.h"
|
||||
#include "distributed/namespace_utils.h"
|
||||
#include "distributed/relay_utility.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
|
@ -739,6 +740,12 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
|
|||
relationName),
|
||||
indexStmt->accessMethod);
|
||||
|
||||
/*
|
||||
* Switch to empty search_path to deparse_index_columns to produce fully-
|
||||
* qualified names in expressions.
|
||||
*/
|
||||
PushOverrideEmptySearchPath(CurrentMemoryContext);
|
||||
|
||||
/* index column or expression list begins here */
|
||||
appendStringInfoChar(buffer, '(');
|
||||
deparse_index_columns(buffer, indexStmt->indexParams, deparseContext);
|
||||
|
@ -749,10 +756,15 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
|
|||
{
|
||||
appendStringInfoString(buffer, "INCLUDE (");
|
||||
deparse_index_columns(buffer, indexStmt->indexIncludingParams, deparseContext);
|
||||
appendStringInfoChar(buffer, ')');
|
||||
appendStringInfoString(buffer, ") ");
|
||||
}
|
||||
|
||||
AppendStorageParametersToString(buffer, indexStmt->options);
|
||||
if (indexStmt->options != NIL)
|
||||
{
|
||||
appendStringInfoString(buffer, "WITH (");
|
||||
AppendStorageParametersToString(buffer, indexStmt->options);
|
||||
appendStringInfoString(buffer, ") ");
|
||||
}
|
||||
|
||||
if (indexStmt->whereClause != NULL)
|
||||
{
|
||||
|
@ -760,6 +772,9 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
|
|||
deparseContext, false,
|
||||
false));
|
||||
}
|
||||
|
||||
/* revert back to original search_path */
|
||||
PopOverrideSearchPath();
|
||||
}
|
||||
|
||||
|
||||
|
@ -948,8 +963,9 @@ deparse_index_columns(StringInfo buffer, List *indexParameterList, List *deparse
|
|||
/* Commit on postgres: 911e70207703799605f5a0e8aad9f06cff067c63*/
|
||||
if (indexElement->opclassopts != NIL)
|
||||
{
|
||||
ereport(ERROR, errmsg(
|
||||
"citus currently doesn't support operator class parameters in indexes"));
|
||||
appendStringInfoString(buffer, "(");
|
||||
AppendStorageParametersToString(buffer, indexElement->opclassopts);
|
||||
appendStringInfoString(buffer, ") ");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1081,13 +1097,6 @@ AppendStorageParametersToString(StringInfo stringBuffer, List *optionList)
|
|||
ListCell *optionCell = NULL;
|
||||
bool firstOptionPrinted = false;
|
||||
|
||||
if (optionList == NIL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
appendStringInfo(stringBuffer, " WITH (");
|
||||
|
||||
foreach(optionCell, optionList)
|
||||
{
|
||||
DefElem *option = (DefElem *) lfirst(optionCell);
|
||||
|
@ -1104,8 +1113,6 @@ AppendStorageParametersToString(StringInfo stringBuffer, List *optionList)
|
|||
quote_identifier(optionName),
|
||||
quote_literal_cstr(optionValue));
|
||||
}
|
||||
|
||||
appendStringInfo(stringBuffer, ")");
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -2061,6 +2061,8 @@ ShouldInitiateMetadataSync(bool *lockFailure)
|
|||
Datum
|
||||
citus_internal_add_partition_metadata(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
PG_ENSURE_ARGNOTNULL(0, "relation");
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
|
||||
|
@ -2211,6 +2213,8 @@ EnsurePartitionMetadataIsSane(Oid relationId, char distributionMethod, int coloc
|
|||
Datum
|
||||
citus_internal_add_shard_metadata(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
PG_ENSURE_ARGNOTNULL(0, "relation");
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
|
||||
|
@ -2426,6 +2430,8 @@ EnsureShardMetadataIsSane(Oid relationId, int64 shardId, char storageType,
|
|||
Datum
|
||||
citus_internal_add_placement_metadata(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
int32 shardState = PG_GETARG_INT32(1);
|
||||
int64 shardLength = PG_GETARG_INT64(2);
|
||||
|
@ -2537,6 +2543,8 @@ ShouldSkipMetadataChecks(void)
|
|||
Datum
|
||||
citus_internal_update_placement_metadata(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
int32 sourceGroupId = PG_GETARG_INT32(1);
|
||||
int32 targetGroupId = PG_GETARG_INT32(2);
|
||||
|
@ -2602,6 +2610,8 @@ citus_internal_update_placement_metadata(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
citus_internal_delete_shard_metadata(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
|
||||
if (!ShouldSkipMetadataChecks())
|
||||
|
@ -2640,6 +2650,8 @@ citus_internal_delete_shard_metadata(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
citus_internal_update_relation_colocation(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
uint32 tagetColocationId = PG_GETARG_UINT32(1);
|
||||
|
||||
|
|
|
@ -180,9 +180,6 @@ citus_set_coordinator_host(PG_FUNCTION_ARGS)
|
|||
Name nodeClusterName = PG_GETARG_NAME(3);
|
||||
nodeMetadata.nodeCluster = NameStr(*nodeClusterName);
|
||||
|
||||
/* prevent concurrent modification */
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
bool isCoordinatorInMetadata = false;
|
||||
WorkerNode *coordinatorNode = PrimaryNodeForGroup(COORDINATOR_GROUP_ID,
|
||||
&isCoordinatorInMetadata);
|
||||
|
@ -1410,12 +1407,6 @@ AddNodeMetadata(char *nodeName, int32 nodePort,
|
|||
|
||||
*nodeAlreadyExists = false;
|
||||
|
||||
/*
|
||||
* Prevent / wait for concurrent modification before checking whether
|
||||
* the worker already exists in pg_dist_node.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
WorkerNode *workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort);
|
||||
if (workerNode != NULL)
|
||||
{
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include "utils/syscache.h"
|
||||
|
||||
|
||||
static void AddInsertAliasIfNeeded(Query *query);
|
||||
static void UpdateTaskQueryString(Query *query, Task *task);
|
||||
static RelationShard * FindRelationShard(Oid inputRelationId, List *relationShardList);
|
||||
static void ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte);
|
||||
|
@ -159,7 +158,7 @@ RebuildQueryStrings(Job *workerJob)
|
|||
* deparsing issues (e.g. RETURNING might reference the original table name,
|
||||
* which has been replaced by a shard name).
|
||||
*/
|
||||
static void
|
||||
void
|
||||
AddInsertAliasIfNeeded(Query *query)
|
||||
{
|
||||
Assert(query->commandType == CMD_INSERT);
|
||||
|
|
|
@ -174,6 +174,8 @@ DeparseLocalShardQuery(Query *jobQuery, List *relationShardList, Oid
|
|||
*/
|
||||
Assert(!CheckInsertSelectQuery(jobQuery));
|
||||
|
||||
AddInsertAliasIfNeeded(jobQuery);
|
||||
|
||||
/*
|
||||
* For INSERT queries we cannot use pg_get_query_def. Mainly because we
|
||||
* cannot run UpdateRelationToShardNames on an INSERT query. This is
|
||||
|
|
|
@ -1487,7 +1487,9 @@ WrapQueryForExplainAnalyze(const char *queryString, TupleDesc tupleDesc)
|
|||
}
|
||||
|
||||
Form_pg_attribute attr = &tupleDesc->attrs[columnIndex];
|
||||
char *attrType = format_type_with_typemod(attr->atttypid, attr->atttypmod);
|
||||
char *attrType = format_type_extended(attr->atttypid, attr->atttypmod,
|
||||
FORMAT_TYPE_TYPEMOD_GIVEN |
|
||||
FORMAT_TYPE_FORCE_QUALIFY);
|
||||
|
||||
appendStringInfo(columnDef, "field_%d %s", columnIndex, attrType);
|
||||
}
|
||||
|
|
|
@ -1616,7 +1616,19 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
Expr *directarg;
|
||||
foreach_ptr(directarg, originalAggregate->aggdirectargs)
|
||||
{
|
||||
if (!IsA(directarg, Const) && !IsA(directarg, Param))
|
||||
/*
|
||||
* Need to replace nodes that contain any Vars with Vars referring
|
||||
* to the related column of the result set returned for the worker
|
||||
* aggregation.
|
||||
*
|
||||
* When there are no Vars, then the expression can be fully evaluated
|
||||
* on the coordinator, so we skip it here. This is not just an
|
||||
* optimization, but the result of the expression might require
|
||||
* calling the final function of the aggregate, and doing so when
|
||||
* there are no input rows (i.e.: with an empty tuple slot) is not
|
||||
* desirable for the node-executor methods.
|
||||
*/
|
||||
if (pull_var_clause_default((Node *) directarg) != NIL)
|
||||
{
|
||||
Var *var = makeVar(masterTableId, walkerContext->columnId,
|
||||
exprType((Node *) directarg),
|
||||
|
@ -3070,7 +3082,13 @@ WorkerAggregateExpressionList(Aggref *originalAggregate,
|
|||
Expr *directarg;
|
||||
foreach_ptr(directarg, originalAggregate->aggdirectargs)
|
||||
{
|
||||
if (!IsA(directarg, Const) && !IsA(directarg, Param))
|
||||
/*
|
||||
* The worker aggregation should execute any node that contains any
|
||||
* Var nodes and return the result in the targetlist, so that the
|
||||
* combine query can then fetch the result via remote scan; see
|
||||
* MasterAggregateExpression.
|
||||
*/
|
||||
if (pull_var_clause_default((Node *) directarg) != NIL)
|
||||
{
|
||||
workerAggregateList = lappend(workerAggregateList, directarg);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
-- citus--10.2-1--10.2-2
|
||||
|
||||
-- bump version to 10.2-2
|
||||
|
||||
#include "../../columnar/sql/columnar--10.2-1--10.2-2.sql"
|
|
@ -0,0 +1,5 @@
|
|||
-- citus--10.2-2--10.2-3
|
||||
|
||||
-- bump version to 10.2-3
|
||||
|
||||
#include "../../columnar/sql/columnar--10.2-2--10.2-3.sql"
|
|
@ -0,0 +1,10 @@
|
|||
-- citus--10.2-3--10.2-4
|
||||
|
||||
-- bump version to 10.2-4
|
||||
|
||||
#include "../../columnar/sql/columnar--10.2-3--10.2-4.sql"
|
||||
|
||||
#include "udfs/fix_partition_shard_index_names/10.2-4.sql"
|
||||
#include "udfs/fix_all_partition_shard_index_names/10.2-4.sql"
|
||||
#include "udfs/worker_fix_partition_shard_index_names/10.2-4.sql"
|
||||
#include "udfs/citus_finish_pg_upgrade/10.2-4.sql"
|
|
@ -0,0 +1,3 @@
|
|||
-- citus--10.2-2--10.2-1
|
||||
|
||||
#include "../../../columnar/sql/downgrades/columnar--10.2-2--10.2-1.sql"
|
|
@ -0,0 +1,3 @@
|
|||
-- citus--10.2-3--10.2-2
|
||||
|
||||
#include "../../../columnar/sql/downgrades/columnar--10.2-3--10.2-2.sql"
|
|
@ -0,0 +1,12 @@
|
|||
-- citus--10.2-4--10.2-3
|
||||
|
||||
DROP FUNCTION pg_catalog.fix_all_partition_shard_index_names();
|
||||
DROP FUNCTION pg_catalog.fix_partition_shard_index_names(regclass);
|
||||
DROP FUNCTION pg_catalog.worker_fix_partition_shard_index_names(regclass, text, text);
|
||||
|
||||
#include "../udfs/citus_finish_pg_upgrade/10.2-1.sql"
|
||||
|
||||
-- This needs to be done after downgrading citus_finish_pg_upgrade. This is
|
||||
-- because citus_finish_pg_upgrade/10.2-4 depends on columnar_ensure_am_depends_catalog,
|
||||
-- which is dropped by columnar--10.2-4--10.2-3.sql
|
||||
#include "../../../columnar/sql/downgrades/columnar--10.2-4--10.2-3.sql"
|
|
@ -0,0 +1,144 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $cppu$
|
||||
DECLARE
|
||||
table_name regclass;
|
||||
command text;
|
||||
trigger_name text;
|
||||
BEGIN
|
||||
|
||||
|
||||
IF substring(current_Setting('server_version'), '\d+')::int >= 14 THEN
|
||||
EXECUTE $cmd$
|
||||
CREATE AGGREGATE array_cat_agg(anycompatiblearray) (SFUNC = array_cat, STYPE = anycompatiblearray);
|
||||
COMMENT ON AGGREGATE array_cat_agg(anycompatiblearray)
|
||||
IS 'concatenate input arrays into a single array';
|
||||
$cmd$;
|
||||
ELSE
|
||||
EXECUTE $cmd$
|
||||
CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray);
|
||||
COMMENT ON AGGREGATE array_cat_agg(anyarray)
|
||||
IS 'concatenate input arrays into a single array';
|
||||
$cmd$;
|
||||
END IF;
|
||||
|
||||
--
|
||||
-- Citus creates the array_cat_agg but because of a compatibility
|
||||
-- issue between pg13-pg14, we drop and create it during upgrade.
|
||||
-- And as Citus creates it, there needs to be a dependency to the
|
||||
-- Citus extension, so we create that dependency here.
|
||||
-- We are not using:
|
||||
-- ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg
|
||||
-- because we don't have an easy way to check if the aggregate
|
||||
-- exists with anyarray type or anycompatiblearray type.
|
||||
|
||||
INSERT INTO pg_depend
|
||||
SELECT
|
||||
'pg_proc'::regclass::oid as classid,
|
||||
(SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') as objid,
|
||||
0 as objsubid,
|
||||
'pg_extension'::regclass::oid as refclassid,
|
||||
(select oid from pg_extension where extname = 'citus') as refobjid,
|
||||
0 as refobjsubid ,
|
||||
'e' as deptype;
|
||||
|
||||
--
|
||||
-- restore citus catalog tables
|
||||
--
|
||||
INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition;
|
||||
INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
|
||||
INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
|
||||
INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;
|
||||
INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node;
|
||||
INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group;
|
||||
INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction;
|
||||
INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation;
|
||||
-- enterprise catalog tables
|
||||
INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo;
|
||||
INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo;
|
||||
|
||||
INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT
|
||||
name,
|
||||
default_strategy,
|
||||
shard_cost_function::regprocedure::regproc,
|
||||
node_capacity_function::regprocedure::regproc,
|
||||
shard_allowed_on_node_function::regprocedure::regproc,
|
||||
default_threshold,
|
||||
minimum_threshold,
|
||||
improvement_threshold
|
||||
FROM public.pg_dist_rebalance_strategy;
|
||||
|
||||
--
|
||||
-- drop backup tables
|
||||
--
|
||||
DROP TABLE public.pg_dist_authinfo;
|
||||
DROP TABLE public.pg_dist_colocation;
|
||||
DROP TABLE public.pg_dist_local_group;
|
||||
DROP TABLE public.pg_dist_node;
|
||||
DROP TABLE public.pg_dist_node_metadata;
|
||||
DROP TABLE public.pg_dist_partition;
|
||||
DROP TABLE public.pg_dist_placement;
|
||||
DROP TABLE public.pg_dist_poolinfo;
|
||||
DROP TABLE public.pg_dist_shard;
|
||||
DROP TABLE public.pg_dist_transaction;
|
||||
DROP TABLE public.pg_dist_rebalance_strategy;
|
||||
|
||||
--
|
||||
-- reset sequences
|
||||
--
|
||||
PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false);
|
||||
|
||||
--
|
||||
-- register triggers
|
||||
--
|
||||
FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition
|
||||
LOOP
|
||||
trigger_name := 'truncate_trigger_' || table_name::oid;
|
||||
command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()';
|
||||
EXECUTE command;
|
||||
command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name);
|
||||
EXECUTE command;
|
||||
END LOOP;
|
||||
|
||||
--
|
||||
-- set dependencies
|
||||
--
|
||||
INSERT INTO pg_depend
|
||||
SELECT
|
||||
'pg_class'::regclass::oid as classid,
|
||||
p.logicalrelid::regclass::oid as objid,
|
||||
0 as objsubid,
|
||||
'pg_extension'::regclass::oid as refclassid,
|
||||
(select oid from pg_extension where extname = 'citus') as refobjid,
|
||||
0 as refobjsubid ,
|
||||
'n' as deptype
|
||||
FROM pg_catalog.pg_dist_partition p;
|
||||
|
||||
-- set dependencies for columnar table access method
|
||||
PERFORM citus_internal.columnar_ensure_am_depends_catalog();
|
||||
|
||||
-- restore pg_dist_object from the stable identifiers
|
||||
TRUNCATE citus.pg_dist_object;
|
||||
INSERT INTO citus.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid)
|
||||
SELECT
|
||||
address.classid,
|
||||
address.objid,
|
||||
address.objsubid,
|
||||
naming.distribution_argument_index,
|
||||
naming.colocationid
|
||||
FROM
|
||||
public.pg_dist_object naming,
|
||||
pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address;
|
||||
|
||||
DROP TABLE public.pg_dist_object;
|
||||
END;
|
||||
$cppu$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade()
|
||||
IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade';
|
|
@ -120,6 +120,9 @@ BEGIN
|
|||
'n' as deptype
|
||||
FROM pg_catalog.pg_dist_partition p;
|
||||
|
||||
-- set dependencies for columnar table access method
|
||||
PERFORM citus_internal.columnar_ensure_am_depends_catalog();
|
||||
|
||||
-- restore pg_dist_object from the stable identifiers
|
||||
TRUNCATE citus.pg_dist_object;
|
||||
INSERT INTO citus.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid)
|
||||
|
|
21
src/backend/distributed/sql/udfs/fix_all_partition_shard_index_names/10.2-4.sql
generated
Normal file
21
src/backend/distributed/sql/udfs/fix_all_partition_shard_index_names/10.2-4.sql
generated
Normal file
|
@ -0,0 +1,21 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.fix_all_partition_shard_index_names()
|
||||
RETURNS SETOF regclass
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
dist_partitioned_table_name regclass;
|
||||
BEGIN
|
||||
FOR dist_partitioned_table_name IN SELECT p.logicalrelid
|
||||
FROM pg_dist_partition p
|
||||
JOIN pg_class c ON p.logicalrelid = c.oid
|
||||
WHERE c.relkind = 'p'
|
||||
ORDER BY c.relname, c.oid
|
||||
LOOP
|
||||
EXECUTE 'SELECT fix_partition_shard_index_names( ' || quote_literal(dist_partitioned_table_name) || ' )';
|
||||
RETURN NEXT dist_partitioned_table_name;
|
||||
END LOOP;
|
||||
RETURN;
|
||||
END;
|
||||
$$;
|
||||
COMMENT ON FUNCTION pg_catalog.fix_all_partition_shard_index_names()
|
||||
IS 'fix index names on partition shards of all tables';
|
|
@ -0,0 +1,21 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.fix_all_partition_shard_index_names()
|
||||
RETURNS SETOF regclass
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
dist_partitioned_table_name regclass;
|
||||
BEGIN
|
||||
FOR dist_partitioned_table_name IN SELECT p.logicalrelid
|
||||
FROM pg_dist_partition p
|
||||
JOIN pg_class c ON p.logicalrelid = c.oid
|
||||
WHERE c.relkind = 'p'
|
||||
ORDER BY c.relname, c.oid
|
||||
LOOP
|
||||
EXECUTE 'SELECT fix_partition_shard_index_names( ' || quote_literal(dist_partitioned_table_name) || ' )';
|
||||
RETURN NEXT dist_partitioned_table_name;
|
||||
END LOOP;
|
||||
RETURN;
|
||||
END;
|
||||
$$;
|
||||
COMMENT ON FUNCTION pg_catalog.fix_all_partition_shard_index_names()
|
||||
IS 'fix index names on partition shards of all tables';
|
|
@ -0,0 +1,6 @@
|
|||
CREATE FUNCTION pg_catalog.fix_partition_shard_index_names(table_name regclass)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$fix_partition_shard_index_names$$;
|
||||
COMMENT ON FUNCTION pg_catalog.fix_partition_shard_index_names(table_name regclass)
|
||||
IS 'fix index names on partition shards of given table';
|
|
@ -0,0 +1,6 @@
|
|||
CREATE FUNCTION pg_catalog.fix_partition_shard_index_names(table_name regclass)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$fix_partition_shard_index_names$$;
|
||||
COMMENT ON FUNCTION pg_catalog.fix_partition_shard_index_names(table_name regclass)
|
||||
IS 'fix index names on partition shards of given table';
|
10
src/backend/distributed/sql/udfs/worker_fix_partition_shard_index_names/10.2-4.sql
generated
Normal file
10
src/backend/distributed/sql/udfs/worker_fix_partition_shard_index_names/10.2-4.sql
generated
Normal file
|
@ -0,0 +1,10 @@
|
|||
CREATE FUNCTION pg_catalog.worker_fix_partition_shard_index_names(parent_shard_index regclass,
|
||||
partition_shard text,
|
||||
new_partition_shard_index_name text)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$worker_fix_partition_shard_index_names$$;
|
||||
COMMENT ON FUNCTION pg_catalog.worker_fix_partition_shard_index_names(parent_shard_index regclass,
|
||||
partition_shard text,
|
||||
new_partition_shard_index_name text)
|
||||
IS 'fix the name of the index on given partition shard that is child of given parent_index';
|
|
@ -0,0 +1,10 @@
|
|||
CREATE FUNCTION pg_catalog.worker_fix_partition_shard_index_names(parent_shard_index regclass,
|
||||
partition_shard text,
|
||||
new_partition_shard_index_name text)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$worker_fix_partition_shard_index_names$$;
|
||||
COMMENT ON FUNCTION pg_catalog.worker_fix_partition_shard_index_names(parent_shard_index regclass,
|
||||
partition_shard text,
|
||||
new_partition_shard_index_name text)
|
||||
IS 'fix the name of the index on given partition shard that is child of given parent_index';
|
|
@ -11,11 +11,13 @@
|
|||
#include "access/genam.h"
|
||||
#include "access/heapam.h"
|
||||
#include "access/htup_details.h"
|
||||
#include "catalog/index.h"
|
||||
#include "catalog/indexing.h"
|
||||
#include "catalog/partition.h"
|
||||
#include "catalog/pg_class.h"
|
||||
#include "catalog/pg_constraint.h"
|
||||
#include "catalog/pg_inherits.h"
|
||||
#include "commands/tablecmds.h"
|
||||
#include "common/string.h"
|
||||
#include "distributed/citus_nodes.h"
|
||||
#include "distributed/adaptive_executor.h"
|
||||
|
@ -26,13 +28,16 @@
|
|||
#include "distributed/deparse_shard_query.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata_utility.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/multi_partitioning_utils.h"
|
||||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/relay_utility.h"
|
||||
#include "distributed/resource_lock.h"
|
||||
#include "distributed/shardinterval_utils.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "pgstat.h"
|
||||
#include "partitioning/partdesc.h"
|
||||
|
@ -41,12 +46,22 @@
|
|||
#include "utils/lsyscache.h"
|
||||
#include "utils/rel.h"
|
||||
#include "utils/syscache.h"
|
||||
#include "utils/varlena.h"
|
||||
|
||||
static char * PartitionBound(Oid partitionId);
|
||||
static Relation try_relation_open_nolock(Oid relationId);
|
||||
static List * CreateFixPartitionConstraintsTaskList(Oid relationId);
|
||||
static List * WorkerFixPartitionConstraintCommandList(Oid relationId, uint64 shardId,
|
||||
List *checkConstraintList);
|
||||
static List * CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId);
|
||||
static List * WorkerFixPartitionShardIndexNamesCommandList(uint64 parentShardId,
|
||||
List *indexIdList);
|
||||
static List * WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex(
|
||||
char *qualifiedParentShardIndexName, Oid parentIndexId);
|
||||
static List * WorkerFixPartitionShardIndexNamesCommandListForPartitionIndex(Oid
|
||||
partitionIndexId,
|
||||
char *
|
||||
qualifiedParentShardIndexName);
|
||||
static List * CheckConstraintNameListForRelation(Oid relationId);
|
||||
static bool RelationHasConstraint(Oid relationId, char *constraintName);
|
||||
static char * RenameConstraintCommand(Oid relationId, char *constraintName,
|
||||
|
@ -55,6 +70,8 @@ static char * RenameConstraintCommand(Oid relationId, char *constraintName,
|
|||
|
||||
PG_FUNCTION_INFO_V1(fix_pre_citus10_partitioned_table_constraint_names);
|
||||
PG_FUNCTION_INFO_V1(worker_fix_pre_citus10_partitioned_table_constraint_names);
|
||||
PG_FUNCTION_INFO_V1(fix_partition_shard_index_names);
|
||||
PG_FUNCTION_INFO_V1(worker_fix_partition_shard_index_names);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -130,6 +147,167 @@ worker_fix_pre_citus10_partitioned_table_constraint_names(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* fix_partition_shard_index_names fixes the index names of shards of partitions of
|
||||
* partitioned tables on workers.
|
||||
*
|
||||
* When running CREATE INDEX on parent_table, we didn't explicitly create the index on
|
||||
* each partition as well. Postgres created indexes for partitions in the coordinator,
|
||||
* and also in the workers. Actually, Postgres auto-generates index names when auto-creating
|
||||
* indexes on each partition shard of the parent shards. If index name is too long, it
|
||||
* truncates the name and adds _idx postfix to it. However, when truncating the name, the
|
||||
* shardId of the partition shard can be lost. This may result in the same index name used for
|
||||
* the partition shell table and one of the partition shards.
|
||||
* For more details, check issue #4962 https://github.com/citusdata/citus/issues/4962
|
||||
*
|
||||
* fix_partition_shard_index_names renames indexes of shards of partition tables to include
|
||||
* the shardId at the end of the name, regardless of whether index name was long or short
|
||||
* As a result there will be no index name ending in _idx, rather all will end in _{shardid}
|
||||
* Algorithm is:
|
||||
* foreach parentShard in shardListOfParentTableId:
|
||||
* foreach parentIndex on parent:
|
||||
* generate qualifiedParentShardIndexName -> parentShardIndex
|
||||
* foreach inheritedPartitionIndex on parentIndex:
|
||||
* get table relation of inheritedPartitionIndex -> partitionId
|
||||
* foreach partitionShard in shardListOfPartitionid:
|
||||
* generate qualifiedPartitionShardName -> partitionShard
|
||||
* generate newPartitionShardIndexName
|
||||
* (the following happens in the worker node)
|
||||
* foreach inheritedPartitionShardIndex on parentShardIndex:
|
||||
* if table relation of inheritedPartitionShardIndex is partitionShard:
|
||||
* if inheritedPartitionShardIndex does not have proper name:
|
||||
* Rename(inheritedPartitionShardIndex, newPartitionShardIndexName)
|
||||
* break
|
||||
*/
|
||||
Datum
|
||||
fix_partition_shard_index_names(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
EnsureCoordinator();
|
||||
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
|
||||
Relation relation = try_relation_open(relationId, AccessExclusiveLock);
|
||||
|
||||
if (relation == NULL)
|
||||
{
|
||||
ereport(NOTICE, (errmsg("relation with OID %u does not exist, skipping",
|
||||
relationId)));
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
if (relation->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
|
||||
{
|
||||
relation_close(relation, NoLock);
|
||||
ereport(ERROR, (errmsg(
|
||||
"Fixing shard index names is only applicable to partitioned"
|
||||
" tables, and \"%s\" is not a partitioned table",
|
||||
RelationGetRelationName(relation))));
|
||||
}
|
||||
|
||||
if (!IsCitusTable(relationId))
|
||||
{
|
||||
relation_close(relation, NoLock);
|
||||
ereport(ERROR, (errmsg("fix_partition_shard_index_names can "
|
||||
"only be called for distributed partitioned tables")));
|
||||
}
|
||||
|
||||
EnsureTableOwner(relationId);
|
||||
|
||||
List *taskList = CreateFixPartitionShardIndexNamesTaskList(relationId);
|
||||
|
||||
/* do not do anything if there are no index names to fix */
|
||||
if (taskList != NIL)
|
||||
{
|
||||
bool localExecutionSupported = true;
|
||||
RowModifyLevel modLevel = ROW_MODIFY_NONE;
|
||||
ExecutionParams *execParams = CreateBasicExecutionParams(modLevel, taskList,
|
||||
MaxAdaptiveExecutorPoolSize,
|
||||
localExecutionSupported);
|
||||
ExecuteTaskListExtended(execParams);
|
||||
}
|
||||
|
||||
relation_close(relation, NoLock);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* worker_fix_partition_shard_index_names fixes the index name of the index on given
|
||||
* partition shard that has parent the given parent index.
|
||||
* The parent index should be the index of a shard of a distributed partitioned table.
|
||||
*/
|
||||
Datum
|
||||
worker_fix_partition_shard_index_names(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid parentShardIndexId = PG_GETARG_OID(0);
|
||||
|
||||
text *partitionShardName = PG_GETARG_TEXT_P(1);
|
||||
|
||||
/* resolve partitionShardId from passed in schema and partition shard name */
|
||||
List *partitionShardNameList = textToQualifiedNameList(partitionShardName);
|
||||
RangeVar *partitionShard = makeRangeVarFromNameList(partitionShardNameList);
|
||||
|
||||
/* lock the relation with the lock mode */
|
||||
bool missing_ok = true;
|
||||
Oid partitionShardId = RangeVarGetRelid(partitionShard, NoLock, missing_ok);
|
||||
|
||||
if (!OidIsValid(partitionShardId))
|
||||
{
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
EnsureTableOwner(partitionShardId);
|
||||
|
||||
text *newPartitionShardIndexNameText = PG_GETARG_TEXT_P(2);
|
||||
char *newPartitionShardIndexName = text_to_cstring(
|
||||
newPartitionShardIndexNameText);
|
||||
|
||||
if (!has_subclass(parentShardIndexId))
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not fix child index names: "
|
||||
"index is not partitioned")));
|
||||
}
|
||||
|
||||
List *partitionShardIndexIds = find_inheritance_children(parentShardIndexId,
|
||||
ShareRowExclusiveLock);
|
||||
Oid partitionShardIndexId = InvalidOid;
|
||||
foreach_oid(partitionShardIndexId, partitionShardIndexIds)
|
||||
{
|
||||
if (IndexGetRelation(partitionShardIndexId, false) == partitionShardId)
|
||||
{
|
||||
char *partitionShardIndexName = get_rel_name(partitionShardIndexId);
|
||||
if (ExtractShardIdFromTableName(partitionShardIndexName, missing_ok) ==
|
||||
INVALID_SHARD_ID)
|
||||
{
|
||||
/*
|
||||
* ExtractShardIdFromTableName will return INVALID_SHARD_ID if
|
||||
* partitionShardIndexName doesn't end in _shardid. In that case,
|
||||
* we want to rename this partition shard index to newPartitionShardIndexName,
|
||||
* which ends in _shardid, hence we maintain naming consistency:
|
||||
* we can reach this partition shard index by conventional Citus naming
|
||||
*/
|
||||
RenameStmt *stmt = makeNode(RenameStmt);
|
||||
|
||||
stmt->renameType = OBJECT_INDEX;
|
||||
stmt->missing_ok = false;
|
||||
char *idxNamespace = get_namespace_name(get_rel_namespace(
|
||||
partitionShardIndexId));
|
||||
stmt->relation = makeRangeVar(idxNamespace, partitionShardIndexName, -1);
|
||||
stmt->newname = newPartitionShardIndexName;
|
||||
|
||||
RenameRelation(stmt);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateFixPartitionConstraintsTaskList goes over all the partitions of a distributed
|
||||
* partitioned table, and creates the list of tasks to execute
|
||||
|
@ -257,6 +435,199 @@ WorkerFixPartitionConstraintCommandList(Oid relationId, uint64 shardId,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateFixPartitionShardIndexNamesTaskList goes over all the indexes of a distributed
|
||||
* partitioned table, and creates the list of tasks to execute
|
||||
* worker_fix_partition_shard_index_names UDF on worker nodes.
|
||||
*
|
||||
* We create parent_table_shard_count tasks,
|
||||
* each task with parent_indexes_count x parent_partitions_count query strings.
|
||||
*/
|
||||
static List *
|
||||
CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId)
|
||||
{
|
||||
List *taskList = NIL;
|
||||
|
||||
/* enumerate the tasks when putting them to the taskList */
|
||||
int taskId = 1;
|
||||
|
||||
Relation parentRelation = RelationIdGetRelation(parentRelationId);
|
||||
|
||||
List *parentIndexIdList = RelationGetIndexList(parentRelation);
|
||||
|
||||
/* early exit if the parent relation does not have any indexes */
|
||||
if (parentIndexIdList == NIL)
|
||||
{
|
||||
RelationClose(parentRelation);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
List *partitionList = PartitionList(parentRelationId);
|
||||
|
||||
/* early exit if the parent relation does not have any partitions */
|
||||
if (partitionList == NIL)
|
||||
{
|
||||
RelationClose(parentRelation);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
List *parentShardIntervalList = LoadShardIntervalList(parentRelationId);
|
||||
|
||||
/* lock metadata before getting placement lists */
|
||||
LockShardListMetadata(parentShardIntervalList, ShareLock);
|
||||
Oid partitionId = InvalidOid;
|
||||
foreach_oid(partitionId, partitionList)
|
||||
{
|
||||
List *partitionShardIntervalList = LoadShardIntervalList(partitionId);
|
||||
LockShardListMetadata(partitionShardIntervalList, ShareLock);
|
||||
}
|
||||
|
||||
ShardInterval *parentShardInterval = NULL;
|
||||
foreach_ptr(parentShardInterval, parentShardIntervalList)
|
||||
{
|
||||
uint64 parentShardId = parentShardInterval->shardId;
|
||||
|
||||
List *queryStringList = WorkerFixPartitionShardIndexNamesCommandList(
|
||||
parentShardId, parentIndexIdList);
|
||||
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->jobId = INVALID_JOB_ID;
|
||||
task->taskId = taskId++;
|
||||
|
||||
task->taskType = DDL_TASK;
|
||||
SetTaskQueryStringList(task, queryStringList);
|
||||
task->dependentTaskList = NULL;
|
||||
task->replicationModel = REPLICATION_MODEL_INVALID;
|
||||
task->anchorShardId = parentShardId;
|
||||
task->taskPlacementList = ActiveShardPlacementList(parentShardId);
|
||||
|
||||
taskList = lappend(taskList, task);
|
||||
}
|
||||
|
||||
RelationClose(parentRelation);
|
||||
|
||||
return taskList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* WorkerFixPartitionShardIndexNamesCommandList creates a list of queries that will fix
|
||||
* all child index names of parent indexes on given shard of parent partitioned table.
|
||||
*/
|
||||
static List *
|
||||
WorkerFixPartitionShardIndexNamesCommandList(uint64 parentShardId,
|
||||
List *parentIndexIdList)
|
||||
{
|
||||
List *commandList = NIL;
|
||||
Oid parentIndexId = InvalidOid;
|
||||
foreach_oid(parentIndexId, parentIndexIdList)
|
||||
{
|
||||
if (!has_subclass(parentIndexId))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the qualified name of the corresponding index of given parent index
|
||||
* in the parent shard with given parentShardId
|
||||
*/
|
||||
char *parentIndexName = get_rel_name(parentIndexId);
|
||||
char *parentShardIndexName = pstrdup(parentIndexName);
|
||||
AppendShardIdToName(&parentShardIndexName, parentShardId);
|
||||
Oid schemaId = get_rel_namespace(parentIndexId);
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
char *qualifiedParentShardIndexName = quote_qualified_identifier(schemaName,
|
||||
parentShardIndexName);
|
||||
List *commands = WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex(
|
||||
qualifiedParentShardIndexName, parentIndexId);
|
||||
commandList = list_concat(commandList, commands);
|
||||
}
|
||||
|
||||
return commandList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex creates a list of queries that will fix
|
||||
* all child index names of given index on shard of parent partitioned table.
|
||||
*/
|
||||
static List *
|
||||
WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex(
|
||||
char *qualifiedParentShardIndexName, Oid parentIndexId)
|
||||
{
|
||||
List *commandList = NIL;
|
||||
|
||||
/*
|
||||
* Get the list of all partition indexes that are children of current
|
||||
* index on parent
|
||||
*/
|
||||
List *partitionIndexIds = find_inheritance_children(parentIndexId,
|
||||
ShareRowExclusiveLock);
|
||||
Oid partitionIndexId = InvalidOid;
|
||||
foreach_oid(partitionIndexId, partitionIndexIds)
|
||||
{
|
||||
List *commands = WorkerFixPartitionShardIndexNamesCommandListForPartitionIndex(
|
||||
partitionIndexId, qualifiedParentShardIndexName);
|
||||
commandList = list_concat(commandList, commands);
|
||||
}
|
||||
return commandList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* WorkerFixPartitionShardIndexNamesCommandListForPartitionIndex creates a list of queries that will fix
|
||||
* all child index names of given index on shard of parent partitioned table, whose table relation is a shard
|
||||
* of the partition that is the table relation of given partitionIndexId
|
||||
*/
|
||||
static List *
|
||||
WorkerFixPartitionShardIndexNamesCommandListForPartitionIndex(Oid partitionIndexId,
|
||||
char *
|
||||
qualifiedParentShardIndexName)
|
||||
{
|
||||
List *commandList = NIL;
|
||||
|
||||
/* get info for this partition relation of this index*/
|
||||
char *partitionIndexName = get_rel_name(partitionIndexId);
|
||||
Oid partitionId = IndexGetRelation(partitionIndexId, false);
|
||||
char *partitionName = get_rel_name(partitionId);
|
||||
char *partitionSchemaName = get_namespace_name(get_rel_namespace(partitionId));
|
||||
List *partitionShardIntervalList = LoadShardIntervalList(partitionId);
|
||||
|
||||
ShardInterval *partitionShardInterval = NULL;
|
||||
foreach_ptr(partitionShardInterval, partitionShardIntervalList)
|
||||
{
|
||||
/*
|
||||
* Prepare commands for each shard of current partition
|
||||
* to fix the index name that corresponds to the
|
||||
* current parent index name
|
||||
*/
|
||||
uint64 partitionShardId = partitionShardInterval->shardId;
|
||||
|
||||
/* get qualified partition shard name */
|
||||
char *partitionShardName = pstrdup(partitionName);
|
||||
AppendShardIdToName(&partitionShardName, partitionShardId);
|
||||
char *qualifiedPartitionShardName = quote_qualified_identifier(
|
||||
partitionSchemaName,
|
||||
partitionShardName);
|
||||
|
||||
/* generate the new correct index name */
|
||||
char *newPartitionShardIndexName = pstrdup(partitionIndexName);
|
||||
AppendShardIdToName(&newPartitionShardIndexName, partitionShardId);
|
||||
|
||||
/* create worker_fix_partition_shard_index_names command */
|
||||
StringInfo shardQueryString = makeStringInfo();
|
||||
appendStringInfo(shardQueryString,
|
||||
"SELECT worker_fix_partition_shard_index_names(%s::regclass, %s, %s)",
|
||||
quote_literal_cstr(qualifiedParentShardIndexName),
|
||||
quote_literal_cstr(qualifiedPartitionShardName),
|
||||
quote_literal_cstr(newPartitionShardIndexName));
|
||||
commandList = lappend(commandList, shardQueryString->data);
|
||||
}
|
||||
|
||||
return commandList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RelationHasConstraint checks if a relation has a constraint with a given name.
|
||||
*/
|
||||
|
|
|
@ -183,20 +183,49 @@ lock_shard_resources(PG_FUNCTION_ARGS)
|
|||
int shardIdCount = ArrayObjectCount(shardIdArrayObject);
|
||||
Datum *shardIdArrayDatum = DeconstructArrayObject(shardIdArrayObject);
|
||||
|
||||
/*
|
||||
* The executor calls this UDF for modification queries. So, any user
|
||||
* who has the the rights to modify this table are actually able
|
||||
* to call the UDF.
|
||||
*
|
||||
* So, at this point, we make sure that any malicious user who doesn't
|
||||
* have modification privileges to call this UDF.
|
||||
*
|
||||
* Update/Delete/Truncate commands already acquires ExclusiveLock
|
||||
* on the executor. However, for INSERTs, the user might have only
|
||||
* INSERTs granted, so add a special case for it.
|
||||
*/
|
||||
AclMode aclMask = ACL_UPDATE | ACL_DELETE | ACL_TRUNCATE;
|
||||
if (lockMode == RowExclusiveLock)
|
||||
{
|
||||
aclMask |= ACL_INSERT;
|
||||
}
|
||||
|
||||
for (int shardIdIndex = 0; shardIdIndex < shardIdCount; shardIdIndex++)
|
||||
{
|
||||
int64 shardId = DatumGetInt64(shardIdArrayDatum[shardIdIndex]);
|
||||
|
||||
/*
|
||||
* We don't want random users to block writes. The callers of this
|
||||
* function either operates on all the colocated placements, such
|
||||
* as shard moves, or requires superuser such as adding node.
|
||||
* In other words, the coordinator initiated operations has already
|
||||
* ensured table owner, we are preventing any malicious attempt to
|
||||
* use this function.
|
||||
* We don't want random users to block writes. If the current user
|
||||
* has privileges to modify the shard, then the user can already
|
||||
* acquire the lock. So, we allow.
|
||||
*/
|
||||
bool missingOk = true;
|
||||
EnsureShardOwner(shardId, missingOk);
|
||||
Oid relationId = LookupShardRelationFromCatalog(shardId, missingOk);
|
||||
|
||||
if (!OidIsValid(relationId) && missingOk)
|
||||
{
|
||||
/*
|
||||
* This could happen in two ways. First, a malicious user is trying
|
||||
* to acquire locks on non-existing shards. Second, the metadata has
|
||||
* not been synced (or not yet visible) to this node. In the second
|
||||
* case, there is no point in locking the shards because no other
|
||||
* transaction can be accessing the table.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
EnsureTablePermissions(relationId, aclMask);
|
||||
|
||||
LockShardResource(shardId, lockMode);
|
||||
}
|
||||
|
|
|
@ -297,7 +297,7 @@ FindShardIntervalIndex(Datum searchedValue, CitusTableCacheEntry *cacheEntry)
|
|||
ShardInterval **shardIntervalCache = cacheEntry->sortedShardIntervalArray;
|
||||
int shardCount = cacheEntry->shardIntervalArrayLength;
|
||||
FmgrInfo *compareFunction = cacheEntry->shardIntervalCompareFunction;
|
||||
bool useBinarySearch = (IsCitusTableTypeCacheEntry(cacheEntry, HASH_DISTRIBUTED) ||
|
||||
bool useBinarySearch = (!IsCitusTableTypeCacheEntry(cacheEntry, HASH_DISTRIBUTED) ||
|
||||
!cacheEntry->hasUniformHashDistribution);
|
||||
int shardIndex = INVALID_SHARD_INDEX;
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ WrapCreateOrReplace(const char *sql)
|
|||
* have this functionality or where their implementation is not sufficient.
|
||||
*
|
||||
* Besides checking if an object of said name exists it tries to compare the object to be
|
||||
* created with the one in the local catalog. If there is a difference the on in the local
|
||||
* created with the one in the local catalog. If there is a difference the one in the local
|
||||
* catalog will be renamed after which the statement can be executed on this worker to
|
||||
* create the object.
|
||||
*
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#undef HAVE_LIBCURL
|
||||
|
||||
/* Define to 1 if you have the `lz4' library (-llz4). */
|
||||
#undef HAVE_LIBLZ4
|
||||
#undef HAVE_CITUS_LIBLZ4
|
||||
|
||||
/* Define to 1 if you have the `zstd' library (-lzstd). */
|
||||
#undef HAVE_LIBZSTD
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#undef HAVE_LIBCURL
|
||||
|
||||
/* Define to 1 if you have the `liblz4' library (-llz4). */
|
||||
#undef HAVE_LIBLZ4
|
||||
#undef HAVE_CITUS_LIBLZ4
|
||||
|
||||
/* Define to 1 if you have the `libzstd' library (-lzstd). */
|
||||
#undef HAVE_LIBZSTD
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "distributed/transaction_management.h"
|
||||
#include "distributed/remote_transaction.h"
|
||||
#include "lib/ilist.h"
|
||||
#include "pg_config.h"
|
||||
#include "portability/instr_time.h"
|
||||
#include "utils/guc.h"
|
||||
#include "utils/hsearch.h"
|
||||
|
@ -264,5 +265,7 @@ extern void MarkConnectionConnected(MultiConnection *connection);
|
|||
extern double MillisecondsPassedSince(instr_time moment);
|
||||
extern long MillisecondsToTimeout(instr_time start, long msAfterStart);
|
||||
|
||||
#if PG_VERSION_NUM < 140000
|
||||
extern void WarmUpConnParamsHash(void);
|
||||
#endif
|
||||
#endif /* CONNECTION_MANAGMENT_H */
|
||||
|
|
|
@ -29,6 +29,7 @@ extern void SetTaskQueryStringList(Task *task, List *queryStringList);
|
|||
extern char * TaskQueryString(Task *task);
|
||||
extern char * TaskQueryStringAtIndex(Task *task, int index);
|
||||
extern int GetTaskQueryType(Task *task);
|
||||
extern void AddInsertAliasIfNeeded(Query *query);
|
||||
|
||||
|
||||
#endif /* DEPARSE_SHARD_QUERY_H */
|
||||
|
|
|
@ -291,7 +291,7 @@ extern bool GetNodeDiskSpaceStatsForConnection(MultiConnection *connection,
|
|||
uint64 *availableBytes,
|
||||
uint64 *totalBytes);
|
||||
extern void ExecuteQueryViaSPI(char *query, int SPIOK);
|
||||
extern void EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId);
|
||||
extern void EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId);
|
||||
extern void AlterSequenceType(Oid seqOid, Oid typeOid);
|
||||
extern void MarkSequenceListDistributedAndPropagateDependencies(List *sequenceList);
|
||||
extern void MarkSequenceDistributedAndPropagateDependencies(Oid sequenceOid);
|
||||
|
|
|
@ -1 +1,5 @@
|
|||
test: upgrade_basic_after upgrade_columnar_after upgrade_type_after upgrade_ref2ref_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects
|
||||
test: upgrade_basic_after upgrade_type_after upgrade_ref2ref_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects
|
||||
|
||||
# This attempts dropping citus extension (and rollbacks), so please do
|
||||
# not run in parallel with any other tests.
|
||||
test: upgrade_columnar_after
|
||||
|
|
|
@ -2,7 +2,10 @@
|
|||
test: multi_test_helpers multi_test_helpers_superuser
|
||||
test: multi_test_catalog_views
|
||||
test: upgrade_basic_before
|
||||
test: upgrade_columnar_before
|
||||
test: upgrade_ref2ref_before
|
||||
test: upgrade_type_before
|
||||
test: upgrade_distributed_function_before upgrade_rebalance_strategy_before
|
||||
|
||||
# upgrade_columnar_before renames public schema to citus_schema, so let's
|
||||
# run this test as the last one.
|
||||
test: upgrade_columnar_before
|
||||
|
|
|
@ -246,3 +246,6 @@ s/TRIM\(BOTH FROM value\)/btrim\(value\)/g
|
|||
s/pg14\.idx.*/pg14\.xxxxx/g
|
||||
|
||||
s/CREATE TABLESPACE test_tablespace LOCATION.*/CREATE TABLESPACE test_tablespace LOCATION XXXX/g
|
||||
|
||||
# columnar log for var correlation
|
||||
s/(.*absolute correlation \()([0,1]\.[0-9]+)(\) of var attribute [0-9]+ is smaller than.*)/\1X\.YZ\3/g
|
||||
|
|
|
@ -712,6 +712,19 @@ select array_agg(val order by valf) from aggdata;
|
|||
{0,NULL,2,3,5,2,4,NULL,NULL,8,NULL}
|
||||
(1 row)
|
||||
|
||||
-- test by using some other node types as arguments to agg
|
||||
select key, percentile_cont((key - (key > 4)::int) / 10.0) within group(order by val) from aggdata group by key;
|
||||
key | percentile_cont
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 2.4
|
||||
3 | 4
|
||||
5 |
|
||||
6 |
|
||||
7 | 8
|
||||
9 | 0
|
||||
(7 rows)
|
||||
|
||||
-- Test TransformSubqueryNode
|
||||
select * FROM (
|
||||
SELECT key, mode() within group (order by floor(agg1.val/2)) m from aggdata agg1
|
||||
|
@ -932,5 +945,100 @@ SELECT square_func(5), a, count(a) FROM t1 GROUP BY a;
|
|||
ERROR: function aggregate_support.square_func(integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- Test the cases where the worker agg exec. returns no tuples.
|
||||
CREATE TABLE dist_table (dist_col int, agg_col numeric);
|
||||
SELECT create_distributed_table('dist_table', 'dist_col');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE ref_table (int_col int);
|
||||
SELECT create_reference_table('ref_table');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM (SELECT *, random() FROM dist_table) a;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT SUM(COALESCE(agg_col, 3))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT AVG(COALESCE(agg_col, 10))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
avg
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
insert into dist_table values (2, 11.2), (3, NULL), (6, 3.22), (3, 4.23), (5, 5.25), (4, 63.4), (75, NULL), (80, NULL), (96, NULL), (8, 1078), (0, 1.19);
|
||||
-- run the same queries after loading some data
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
3.22
|
||||
(1 row)
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM (SELECT *, random() FROM dist_table) a;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
3.22
|
||||
(1 row)
|
||||
|
||||
SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
1.19
|
||||
(1 row)
|
||||
|
||||
SELECT floor(SUM(COALESCE(agg_col, 3)))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
floor
|
||||
---------------------------------------------------------------------
|
||||
1178
|
||||
(1 row)
|
||||
|
||||
SELECT floor(AVG(COALESCE(agg_col, 10)))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
floor
|
||||
---------------------------------------------------------------------
|
||||
109
|
||||
(1 row)
|
||||
|
||||
set client_min_messages to error;
|
||||
drop schema aggregate_support cascade;
|
||||
|
|
|
@ -645,7 +645,7 @@ alter table coltest add column x5 int default (random()*20000)::int;
|
|||
analyze coltest;
|
||||
-- test that expressions on whole-row references are not pushed down
|
||||
select * from coltest where coltest = (1,1,1,1);
|
||||
NOTICE: columnar planner: cannot push down clause: var is whole-row reference
|
||||
NOTICE: columnar planner: cannot push down clause: var is whole-row reference or system column
|
||||
NOTICE: columnar planner: adding CustomScan path for coltest
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
id | x1 | x2 | x3 | x5
|
||||
|
@ -655,7 +655,7 @@ DETAIL: unparameterized; 0 clauses pushed down
|
|||
-- test that expressions on uncorrelated attributes are not pushed down
|
||||
set columnar.qual_pushdown_correlation to default;
|
||||
select * from coltest where x5 = 23484;
|
||||
NOTICE: columnar planner: cannot push down clause: var attribute 5 is uncorrelated
|
||||
NOTICE: columnar planner: cannot push down clause: absolute correlation (X.YZ) of var attribute 5 is smaller than the value configured in "columnar.qual_pushdown_correlation_threshold" (0.900)
|
||||
NOTICE: columnar planner: adding CustomScan path for coltest
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
id | x1 | x2 | x3 | x5
|
||||
|
@ -819,3 +819,250 @@ select * from numrange_test natural join numrange_test2 order by nr;
|
|||
|
||||
DROP TABLE atest1, atest2, t1, t2, t3, numrange_test, numrange_test2;
|
||||
set default_table_access_method to default;
|
||||
set columnar.planner_debug_level to notice;
|
||||
BEGIN;
|
||||
SET LOCAL columnar.stripe_row_limit = 2000;
|
||||
SET LOCAL columnar.chunk_group_row_limit = 1000;
|
||||
create table pushdown_test (a int, b int) using columnar;
|
||||
insert into pushdown_test values (generate_series(1, 200000));
|
||||
COMMIT;
|
||||
SET columnar.max_custom_scan_paths TO 50;
|
||||
SET columnar.qual_pushdown_correlation_threshold TO 0.0;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=2 loops=1)
|
||||
Filter: ((a = 204356) OR (a = 104356) OR (a = 76556))
|
||||
Rows Removed by Filter: 1998
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: ((a = 204356) OR (a = 104356) OR (a = 76556))
|
||||
Columnar Chunk Groups Removed by Filter: 198
|
||||
(7 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
180912
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1)
|
||||
Filter: ((a = 194356) OR (a = 104356) OR (a = 76556))
|
||||
Rows Removed by Filter: 2997
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: ((a = 194356) OR (a = 104356) OR (a = 76556))
|
||||
Columnar Chunk Groups Removed by Filter: 197
|
||||
(7 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
375268
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b;
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=0 loops=1)
|
||||
Filter: ((a = 204356) OR (a > ((a * '-1'::integer) + b)))
|
||||
Rows Removed by Filter: 200000
|
||||
Columnar Projected Columns: a, b
|
||||
(5 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=38998 loops=1)
|
||||
Filter: (((a > 1000) AND (a < 10000)) OR ((a > 20000) AND (a < 50000)))
|
||||
Rows Removed by Filter: 2
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: (((a > 1000) AND (a < 10000)) OR ((a > 20000) AND (a < 50000)))
|
||||
Columnar Chunk Groups Removed by Filter: 161
|
||||
(7 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
1099459500
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: none of the arguments were pushdownable, due to the reason(s) given above
|
||||
NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=200000 loops=1)
|
||||
Filter: ((((a)::double precision > random()) AND (a < (2 * a))) OR (a > 100))
|
||||
Columnar Projected Columns: a
|
||||
(4 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: none of the arguments were pushdownable, due to the reason(s) given above
|
||||
NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
20000100000
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3010 loops=1)
|
||||
Filter: ((((a)::double precision > random()) AND (a <= 2000)) OR (a > 198990))
|
||||
Rows Removed by Filter: 990
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: ((a <= 2000) OR (a > 198990))
|
||||
Columnar Chunk Groups Removed by Filter: 196
|
||||
(7 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
203491455
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where
|
||||
(
|
||||
a > random()
|
||||
and
|
||||
(
|
||||
(a < 200 and a not in (select a from pushdown_test)) or
|
||||
(a > 1000 and a < 2000)
|
||||
)
|
||||
)
|
||||
or
|
||||
(a > 200000-2010);
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must not contain a subplan
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3009 loops=1)
|
||||
Filter: ((((a)::double precision > random()) AND (((a < 200) AND (NOT (SubPlan 1))) OR ((a > 1000) AND (a < 2000)))) OR (a > 197990))
|
||||
Rows Removed by Filter: 1991
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: (((a < 200) OR ((a > 1000) AND (a < 2000))) OR (a > 197990))
|
||||
Columnar Chunk Groups Removed by Filter: 195
|
||||
SubPlan 1
|
||||
-> Materialize (actual rows=100 loops=199)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test pushdown_test_1 (actual rows=199 loops=1)
|
||||
Columnar Projected Columns: a
|
||||
(11 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test where
|
||||
(
|
||||
a > random()
|
||||
and
|
||||
(
|
||||
(a < 200 and a not in (select a from pushdown_test)) or
|
||||
(a > 1000 and a < 2000)
|
||||
)
|
||||
)
|
||||
or
|
||||
(a > 200000-2010);
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must not contain a subplan
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
401479455
|
||||
(1 row)
|
||||
|
||||
create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as
|
||||
$$ BEGIN RETURN 1+arg; END; $$;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=0 loops=1)
|
||||
Filter: ((a < 6001) AND ((a)::double precision = random()) AND (a < stable_1(a)))
|
||||
Rows Removed by Filter: 6000
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: (a < 6001)
|
||||
Columnar Chunk Groups Removed by Filter: 194
|
||||
(7 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET columnar.max_custom_scan_paths;
|
||||
RESET columnar.qual_pushdown_correlation_threshold;
|
||||
RESET columnar.planner_debug_level;
|
||||
DROP TABLE pushdown_test;
|
||||
|
|
|
@ -645,7 +645,7 @@ alter table coltest add column x5 int default (random()*20000)::int;
|
|||
analyze coltest;
|
||||
-- test that expressions on whole-row references are not pushed down
|
||||
select * from coltest where coltest = (1,1,1,1);
|
||||
NOTICE: columnar planner: cannot push down clause: var is whole-row reference
|
||||
NOTICE: columnar planner: cannot push down clause: var is whole-row reference or system column
|
||||
NOTICE: columnar planner: adding CustomScan path for coltest
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
id | x1 | x2 | x3 | x5
|
||||
|
@ -655,7 +655,7 @@ DETAIL: unparameterized; 0 clauses pushed down
|
|||
-- test that expressions on uncorrelated attributes are not pushed down
|
||||
set columnar.qual_pushdown_correlation to default;
|
||||
select * from coltest where x5 = 23484;
|
||||
NOTICE: columnar planner: cannot push down clause: var attribute 5 is uncorrelated
|
||||
NOTICE: columnar planner: cannot push down clause: absolute correlation (X.YZ) of var attribute 5 is smaller than the value configured in "columnar.qual_pushdown_correlation_threshold" (0.900)
|
||||
NOTICE: columnar planner: adding CustomScan path for coltest
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
id | x1 | x2 | x3 | x5
|
||||
|
@ -819,3 +819,250 @@ select * from numrange_test natural join numrange_test2 order by nr;
|
|||
|
||||
DROP TABLE atest1, atest2, t1, t2, t3, numrange_test, numrange_test2;
|
||||
set default_table_access_method to default;
|
||||
set columnar.planner_debug_level to notice;
|
||||
BEGIN;
|
||||
SET LOCAL columnar.stripe_row_limit = 2000;
|
||||
SET LOCAL columnar.chunk_group_row_limit = 1000;
|
||||
create table pushdown_test (a int, b int) using columnar;
|
||||
insert into pushdown_test values (generate_series(1, 200000));
|
||||
COMMIT;
|
||||
SET columnar.max_custom_scan_paths TO 50;
|
||||
SET columnar.qual_pushdown_correlation_threshold TO 0.0;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=2 loops=1)
|
||||
Filter: ((a = 204356) OR (a = 104356) OR (a = 76556))
|
||||
Rows Removed by Filter: 1998
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: ((a = 204356) OR (a = 104356) OR (a = 76556))
|
||||
Columnar Chunk Groups Removed by Filter: 198
|
||||
(7 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
180912
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1)
|
||||
Filter: ((a = 194356) OR (a = 104356) OR (a = 76556))
|
||||
Rows Removed by Filter: 2997
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: ((a = 194356) OR (a = 104356) OR (a = 76556))
|
||||
Columnar Chunk Groups Removed by Filter: 197
|
||||
(7 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
375268
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b;
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=0 loops=1)
|
||||
Filter: ((a = 204356) OR (a > ((a * '-1'::integer) + b)))
|
||||
Rows Removed by Filter: 200000
|
||||
Columnar Projected Columns: a, b
|
||||
(5 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=38998 loops=1)
|
||||
Filter: (((a > 1000) AND (a < 10000)) OR ((a > 20000) AND (a < 50000)))
|
||||
Rows Removed by Filter: 2
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: (((a > 1000) AND (a < 10000)) OR ((a > 20000) AND (a < 50000)))
|
||||
Columnar Chunk Groups Removed by Filter: 161
|
||||
(7 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
1099459500
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: none of the arguments were pushdownable, due to the reason(s) given above
|
||||
NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=200000 loops=1)
|
||||
Filter: ((((a)::double precision > random()) AND (a < (2 * a))) OR (a > 100))
|
||||
Columnar Projected Columns: a
|
||||
(4 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: none of the arguments were pushdownable, due to the reason(s) given above
|
||||
NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
20000100000
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3010 loops=1)
|
||||
Filter: ((((a)::double precision > random()) AND (a <= 2000)) OR (a > 198990))
|
||||
Rows Removed by Filter: 990
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: ((a <= 2000) OR (a > 198990))
|
||||
Columnar Chunk Groups Removed by Filter: 196
|
||||
(7 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
203491455
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where
|
||||
(
|
||||
a > random()
|
||||
and
|
||||
(
|
||||
(a < 200 and a not in (select a from pushdown_test)) or
|
||||
(a > 1000 and a < 2000)
|
||||
)
|
||||
)
|
||||
or
|
||||
(a > 200000-2010);
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must not contain a subplan
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3009 loops=1)
|
||||
Filter: ((((a)::double precision > random()) AND (((a < 200) AND (NOT (SubPlan 1))) OR ((a > 1000) AND (a < 2000)))) OR (a > 197990))
|
||||
Rows Removed by Filter: 1991
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: (((a < 200) OR ((a > 1000) AND (a < 2000))) OR (a > 197990))
|
||||
Columnar Chunk Groups Removed by Filter: 195
|
||||
SubPlan 1
|
||||
-> Materialize (actual rows=100 loops=199)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test pushdown_test_1 (actual rows=199 loops=1)
|
||||
Columnar Projected Columns: a
|
||||
(11 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test where
|
||||
(
|
||||
a > random()
|
||||
and
|
||||
(
|
||||
(a < 200 and a not in (select a from pushdown_test)) or
|
||||
(a > 1000 and a < 2000)
|
||||
)
|
||||
)
|
||||
or
|
||||
(a > 200000-2010);
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 0 clauses pushed down
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must not contain a subplan
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
401479455
|
||||
(1 row)
|
||||
|
||||
create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as
|
||||
$$ BEGIN RETURN 1+arg; END; $$;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test (actual rows=0 loops=1)
|
||||
Filter: ((a < 6001) AND ((a)::double precision = random()) AND (a < stable_1(a)))
|
||||
Rows Removed by Filter: 6000
|
||||
Columnar Projected Columns: a
|
||||
Columnar Chunk Group Filters: (a < 6001)
|
||||
Columnar Chunk Groups Removed by Filter: 194
|
||||
(7 rows)
|
||||
|
||||
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET columnar.max_custom_scan_paths;
|
||||
RESET columnar.qual_pushdown_correlation_threshold;
|
||||
RESET columnar.planner_debug_level;
|
||||
DROP TABLE pushdown_test;
|
||||
|
|
|
@ -704,5 +704,90 @@ begin;
|
|||
insert into uniq select generate_series(1,100);
|
||||
ERROR: cannot read from index when there is unflushed data in upper transactions
|
||||
rollback;
|
||||
-- Show that we nicely ignore index deletion requests made to columnarAM.
|
||||
--
|
||||
-- An INSERT command might trigger index deletion if index already had dead
|
||||
-- entries for the key we are about to insert.
|
||||
-- There are two ways of index deletion:
|
||||
-- a) simple deletion
|
||||
-- b) bottom-up deletion (>= pg14)
|
||||
--
|
||||
-- Since columnar_index_fetch_tuple never sets all_dead to true, columnarAM
|
||||
-- doesn't expect to receive simple deletion as we don't mark any index
|
||||
-- entries as dead.
|
||||
-- Otherwise, columnarAM would throw an error for all of below six test cases.
|
||||
--
|
||||
-- However, since columnarAM doesn't delete any dead entries via simple
|
||||
-- deletion, postgres might ask for a more comprehensive deletion (bottom-up)
|
||||
-- at some point when pg >= 14.
|
||||
-- For this reason, all following six test cases would certainly trigger
|
||||
-- bottom-up deletion. Show that we gracefully ignore such requests.
|
||||
CREATE TABLE index_tuple_delete (a int UNIQUE) USING COLUMNAR;
|
||||
ALTER TABLE index_tuple_delete SET (autovacuum_enabled = false);
|
||||
BEGIN;
|
||||
-- i) rollback before flushing
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK;
|
||||
-- index deletion test-1
|
||||
BEGIN;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
TRUNCATE index_tuple_delete;
|
||||
BEGIN;
|
||||
-- ii) rollback after flushing
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
SELECT SUM(a) > 0 FROM index_tuple_delete;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
-- index deletion test-2
|
||||
BEGIN;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
TRUNCATE index_tuple_delete;
|
||||
BEGIN;
|
||||
-- iii) rollback before flushing, use savepoint
|
||||
SAVEPOINT sp1;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK TO sp1;
|
||||
-- index deletion test-3
|
||||
SAVEPOINT sp2;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK TO sp2;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
ROLLBACK;
|
||||
-- index deletion test-4
|
||||
BEGIN;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
TRUNCATE index_tuple_delete;
|
||||
BEGIN;
|
||||
-- iv) rollback after flushing, use savepoint
|
||||
SAVEPOINT sp1;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
SELECT SUM(a) > 0 FROM index_tuple_delete;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ROLLBACK TO sp1;
|
||||
-- index deletion test-5
|
||||
SAVEPOINT sp2;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK TO sp2;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
ROLLBACK;
|
||||
-- index deletion test-6
|
||||
BEGIN;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA columnar_indexes CASCADE;
|
||||
|
|
|
@ -291,6 +291,20 @@ BEGIN;
|
|||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
CREATE OR REPLACE FUNCTION test_columnar_storage_write_new_page(relation regclass) RETURNS void
|
||||
STRICT LANGUAGE c AS 'citus', 'test_columnar_storage_write_new_page';
|
||||
CREATE TABLE aborted_write (a int, b int) USING columnar;
|
||||
SELECT test_columnar_storage_write_new_page('aborted_write');
|
||||
test_columnar_storage_write_new_page
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO DEBUG4;
|
||||
INSERT INTO aborted_write VALUES (5);
|
||||
DEBUG: Flushing Stripe of size 1
|
||||
DEBUG: overwriting page 2
|
||||
DETAIL: This can happen after a roll-back.
|
||||
RESET search_path;
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA columnar_insert CASCADE;
|
||||
|
|
|
@ -832,6 +832,266 @@ SELECT * FROM test ORDER BY id;
|
|||
(2 rows)
|
||||
|
||||
DROP TABLE test;
|
||||
-- verify that recreating distributed functions with TABLE params gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_with_return_table(int)
|
||||
RETURNS TABLE (date date)
|
||||
LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
RETURN query SELECT '2011-01-01'::date;
|
||||
END;
|
||||
$$;
|
||||
SELECT create_distributed_function('func_with_return_table(int)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_with_return_table(int)
|
||||
RETURNS TABLE (date date)
|
||||
LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
RETURN query SELECT '2011-01-02'::date;
|
||||
END;
|
||||
$$;
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'func_with_return_table';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'func_with_return_table')
|
||||
as test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- verify that recreating distributed functions with OUT params gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_with_out_param(a int, out b int)
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 1; $$;
|
||||
SELECT create_distributed_function('func_with_out_param(int)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
CREATE ROLE r1;
|
||||
SELECT 1 FROM run_command_on_workers($$CREATE ROLE r1;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
GRANT EXECUTE ON FUNCTION func_with_out_param TO r1;
|
||||
SELECT 1 FROM run_command_on_workers($$GRANT EXECUTE ON FUNCTION func_with_out_param TO r1;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
RESET client_min_messages;
|
||||
CREATE OR REPLACE FUNCTION func_with_out_param(a int, out b int)
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 2; $$;
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc, pg_proc.proowner) from pg_proc where proname = 'func_with_out_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc, pg_proc.proowner)::text from pg_proc where proname = 'func_with_out_param')
|
||||
as test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- verify that recreating distributed functions with INOUT params gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_with_inout_param(a int, inout b int)
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 1; $$;
|
||||
-- this should error out
|
||||
SELECT create_distributed_function('func_with_inout_param(int)');
|
||||
ERROR: function "func_with_inout_param(int)" does not exist
|
||||
-- this should work
|
||||
SELECT create_distributed_function('func_with_inout_param(int,int)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_with_inout_param(a int, inout b int)
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 2; $$;
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'func_with_inout_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'func_with_inout_param')
|
||||
as test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- verify that recreating distributed functions with VARIADIC params gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_with_variadic_param(a int, variadic b int[])
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 1; $$;
|
||||
-- this should work
|
||||
SELECT create_distributed_function('func_with_variadic_param(int,int[])');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_with_variadic_param(a int, variadic b int[])
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 2; $$;
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'func_with_variadic_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'func_with_variadic_param')
|
||||
as test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- verify that recreating distributed functions returning setof records gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_returning_setof_int(IN parm1 date, IN parm2 interval)
|
||||
RETURNS SETOF integer AS
|
||||
$BODY$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT 1;
|
||||
END;
|
||||
$BODY$
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
COST 100;
|
||||
SELECT create_distributed_function('func_returning_setof_int(date,interval)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_returning_setof_int(IN parm1 date, IN parm2 interval)
|
||||
RETURNS SETOF integer AS
|
||||
$BODY$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT 2;
|
||||
|
||||
END;
|
||||
$BODY$
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
COST 100;
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'func_returning_setof_int';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'func_returning_setof_int')
|
||||
as test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- verify that recreating distributed functions with variadic param returning setof records gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_returning_setof_int_with_variadic_param(IN parm1 date, VARIADIC parm2 int[])
|
||||
RETURNS SETOF integer AS
|
||||
$BODY$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT 1;
|
||||
END;
|
||||
$BODY$
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
COST 100;
|
||||
SELECT create_distributed_function('func_returning_setof_int_with_variadic_param(date,int[])');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_returning_setof_int_with_variadic_param(IN parm1 date, VARIADIC parm2 int[])
|
||||
RETURNS SETOF integer AS
|
||||
$BODY$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT 2;
|
||||
END;
|
||||
$BODY$
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
COST 100;
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'func_returning_setof_int_with_variadic_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'func_returning_setof_int_with_variadic_param')
|
||||
as test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- verify that recreating distributed procedures with out params gets propagated to workers
|
||||
CREATE OR REPLACE PROCEDURE proc_with_variadic_param(IN parm1 date, VARIADIC parm2 int[])
|
||||
LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT 1;
|
||||
$$;
|
||||
-- this should error out
|
||||
SELECT create_distributed_function('proc_with_variadic_param(date)');
|
||||
ERROR: function "proc_with_variadic_param(date)" does not exist
|
||||
-- this should work
|
||||
SELECT create_distributed_function('proc_with_variadic_param(date,int[])');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE PROCEDURE proc_with_variadic_param(IN parm1 date, VARIADIC parm2 int[])
|
||||
LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT 2;
|
||||
$$;
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'proc_with_variadic_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'proc_with_variadic_param')
|
||||
as test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- verify that recreating distributed procedures with INOUT param gets propagated to workers
|
||||
CREATE OR REPLACE PROCEDURE proc_with_inout_param(IN parm1 date, INOUT parm2 int)
|
||||
LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT 1;
|
||||
$$;
|
||||
-- this should error out
|
||||
SELECT create_distributed_function('proc_with_inout_param(date)');
|
||||
ERROR: function "proc_with_inout_param(date)" does not exist
|
||||
-- this should work
|
||||
SELECT create_distributed_function('proc_with_inout_param(date,int)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE PROCEDURE proc_with_inout_param(IN parm1 date, INOUT parm2 int)
|
||||
LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT 2;
|
||||
$$;
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'proc_with_inout_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'proc_with_inout_param')
|
||||
as test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA function_tests CASCADE;
|
||||
DROP SCHEMA function_tests2 CASCADE;
|
||||
|
|
|
@ -139,6 +139,27 @@ SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_
|
|||
f
|
||||
(1 row)
|
||||
|
||||
-- test worker_create_or_replace_object with a function that returns table
|
||||
CREATE OR REPLACE FUNCTION func_with_return_table(int)
|
||||
RETURNS TABLE (date date)
|
||||
LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
RETURN query SELECT '2011-01-01'::date;
|
||||
END;
|
||||
$$;
|
||||
SELECT worker_create_or_replace_object('CREATE OR REPLACE FUNCTION func_with_return_table(int) RETURNS TABLE (date date) LANGUAGE plpgsql AS $$ BEGIN RETURN query SELECT ''2011-01-01''::date; END; $$;');
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- verify that a backup function is created
|
||||
SELECT COUNT(*)=2 FROM pg_proc WHERE proname LIKE 'func_with_return_table%';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- hide cascades
|
||||
SET client_min_messages TO error;
|
||||
DROP SCHEMA proc_conflict CASCADE;
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s1-drop-table s2-fix-partition-shard-index-names s1-commit
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-drop-table:
|
||||
DROP TABLE dist_partitioned_table;
|
||||
|
||||
step s2-fix-partition-shard-index-names:
|
||||
SET client_min_messages TO NOTICE;
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-fix-partition-shard-index-names: <... completed>
|
||||
s2: NOTICE: relation with OID XXXX does not exist, skipping
|
||||
fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-fix-partition-shard-index-names s1-drop-table s2-commit
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-fix-partition-shard-index-names:
|
||||
SET client_min_messages TO NOTICE;
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
|
||||
fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-drop-table:
|
||||
DROP TABLE dist_partitioned_table;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-drop-table: <... completed>
|
|
@ -3014,5 +3014,18 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
|||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Seq Scan on distributed_table_1_570032 distributed_table_xxx (actual rows=1 loops=1)
|
||||
CREATE TYPE multi_explain.int_wrapper_type AS (int_field int);
|
||||
CREATE TABLE tbl (a int, b multi_explain.int_wrapper_type);
|
||||
SELECT create_distributed_table('tbl', 'a');
|
||||
|
||||
EXPLAIN :default_analyze_flags SELECT * FROM tbl;
|
||||
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
|
||||
Task Count: 2
|
||||
Tuple data received from nodes: 0 bytes
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Tuple data received from node: 0 bytes
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1)
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA multi_explain CASCADE;
|
||||
|
|
|
@ -750,6 +750,41 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
| view public.citus_tables
|
||||
(2 rows)
|
||||
|
||||
-- not print "HINT: " to hide current lib version
|
||||
\set VERBOSITY terse
|
||||
CREATE TABLE columnar_table(a INT, b INT) USING columnar;
|
||||
SET citus.enable_version_checks TO ON;
|
||||
-- all should throw an error due to version mismatch
|
||||
VACUUM FULL columnar_table;
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
INSERT INTO columnar_table SELECT i FROM generate_series(1, 10) i;
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
VACUUM columnar_table;
|
||||
WARNING: loaded Citus library version differs from installed extension version
|
||||
TRUNCATE columnar_table;
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
DROP TABLE columnar_table;
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
CREATE INDEX ON columnar_table (a);
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
SELECT alter_columnar_table_set('columnar_table', compression => 'pglz');
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
SELECT alter_columnar_table_reset('columnar_table');
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
INSERT INTO columnar_table SELECT * FROM columnar_table;
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
SELECT 1 FROM columnar_table; -- columnar custom scan
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
SET columnar.enable_custom_scan TO OFF;
|
||||
SELECT 1 FROM columnar_table; -- seq scan
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
CREATE TABLE new_columnar_table (a int) USING columnar;
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
-- do cleanup for the rest of the tests
|
||||
SET citus.enable_version_checks TO OFF;
|
||||
DROP TABLE columnar_table;
|
||||
RESET columnar.enable_custom_scan;
|
||||
\set VERBOSITY default
|
||||
-- Test downgrade to 10.0-4 from 10.1-1
|
||||
ALTER EXTENSION citus UPDATE TO '10.1-1';
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-4';
|
||||
|
@ -813,12 +848,110 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
| function worker_nextval(regclass) integer
|
||||
(16 rows)
|
||||
|
||||
-- Test downgrade to 10.2-1 from 10.2-2
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-2';
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-1';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Snapshot of state at 10.2-2
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-2';
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Test downgrade to 10.2-2 from 10.2-3
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-3';
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-2';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Snapshot of state at 10.2-3
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-3';
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Test downgrade to 10.2-3 from 10.2-4
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-3';
|
||||
-- Make sure that we don't delete pg_depend entries added in
|
||||
-- columnar--10.2-3--10.2-4.sql when downgrading to 10.2-3.
|
||||
SELECT COUNT(*)=10
|
||||
FROM pg_depend
|
||||
WHERE classid = 'pg_am'::regclass::oid AND
|
||||
objid = (select oid from pg_am where amname = 'columnar') AND
|
||||
objsubid = 0 AND
|
||||
refclassid = 'pg_class'::regclass::oid AND
|
||||
refobjsubid = 0 AND
|
||||
deptype = 'n';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Snapshot of state at 10.2-4
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
| function citus_internal.columnar_ensure_am_depends_catalog() void
|
||||
| function fix_all_partition_shard_index_names() SETOF regclass
|
||||
| function fix_partition_shard_index_names(regclass) void
|
||||
| function worker_fix_partition_shard_index_names(regclass,text,text) void
|
||||
(4 rows)
|
||||
|
||||
-- Make sure that we defined dependencies from all rel objects (tables,
|
||||
-- indexes, sequences ..) to columnar table access method ...
|
||||
SELECT pg_class.oid INTO columnar_schema_members
|
||||
FROM pg_class, pg_namespace
|
||||
WHERE pg_namespace.oid=pg_class.relnamespace AND
|
||||
pg_namespace.nspname='columnar';
|
||||
SELECT refobjid INTO columnar_schema_members_pg_depend
|
||||
FROM pg_depend
|
||||
WHERE classid = 'pg_am'::regclass::oid AND
|
||||
objid = (select oid from pg_am where amname = 'columnar') AND
|
||||
objsubid = 0 AND
|
||||
refclassid = 'pg_class'::regclass::oid AND
|
||||
refobjsubid = 0 AND
|
||||
deptype = 'n';
|
||||
-- ... , so this should be empty,
|
||||
(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend)
|
||||
UNION
|
||||
(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members);
|
||||
oid
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- ... , and both columnar_schema_members_pg_depend & columnar_schema_members
|
||||
-- should have 10 entries.
|
||||
SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend;
|
||||
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||
-- show running version
|
||||
SHOW citus.version;
|
||||
citus.version
|
||||
---------------------------------------------------------------------
|
||||
10.2devel
|
||||
10.2.5
|
||||
(1 row)
|
||||
|
||||
-- ensure no unexpected objects were created outside pg_catalog
|
||||
|
|
|
@ -0,0 +1,638 @@
|
|||
---------------------------------------------------------------------
|
||||
-- multi_fix_partition_shard_index_names
|
||||
-- check the following two issues
|
||||
-- https://github.com/citusdata/citus/issues/4962
|
||||
-- https://github.com/citusdata/citus/issues/5138
|
||||
---------------------------------------------------------------------
|
||||
SET citus.next_shard_id TO 910000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE SCHEMA fix_idx_names;
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- NULL input should automatically return NULL since
|
||||
-- fix_partition_shard_index_names is strict
|
||||
-- same for worker_fix_partition_shard_index_names
|
||||
SELECT fix_partition_shard_index_names(NULL);
|
||||
fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT worker_fix_partition_shard_index_names(NULL, NULL, NULL);
|
||||
worker_fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- fix_partition_shard_index_names cannot be called for distributed
|
||||
-- and not partitioned tables
|
||||
CREATE TABLE not_partitioned(id int);
|
||||
SELECT create_distributed_table('not_partitioned', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT fix_partition_shard_index_names('not_partitioned'::regclass);
|
||||
ERROR: Fixing shard index names is only applicable to partitioned tables, and "not_partitioned" is not a partitioned table
|
||||
-- fix_partition_shard_index_names cannot be called for partitioned
|
||||
-- and not distributed tables
|
||||
CREATE TABLE not_distributed(created_at timestamptz) PARTITION BY RANGE (created_at);
|
||||
SELECT fix_partition_shard_index_names('not_distributed'::regclass);
|
||||
ERROR: fix_partition_shard_index_names can only be called for distributed partitioned tables
|
||||
-- test with proper table
|
||||
CREATE TABLE dist_partitioned_table (dist_col int, another_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
|
||||
SELECT create_distributed_table('dist_partitioned_table', 'dist_col');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- create a partition with a long name and another with a short name
|
||||
CREATE TABLE partition_table_with_very_long_name PARTITION OF dist_partitioned_table FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
|
||||
CREATE TABLE p PARTITION OF dist_partitioned_table FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
|
||||
-- create an index on parent table
|
||||
-- we will see that it doesn't matter whether we name the index on parent or not
|
||||
-- indexes auto-generated on partitions will not use this name
|
||||
CREATE INDEX short ON dist_partitioned_table USING btree (another_col, partition_col);
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table | short
|
||||
p | p_another_col_partition_col_idx
|
||||
partition_table_with_very_long_name | partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
(3 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- Note that, the shell table from above partition_table_with_very_long_name
|
||||
-- and its shard partition_table_with_very_long_name_910008
|
||||
-- have the same index name: partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910004 | short_910004
|
||||
dist_partitioned_table_910006 | short_910006
|
||||
p_910012 | p_910012_another_col_partition_col_idx
|
||||
p_910014 | p_910014_another_col_partition_col_idx
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_n_another_col_partition_col_idx1
|
||||
(6 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- this should fail because of the name clash explained above
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
ERROR: relation "partition_table_with_very_long_na_another_col_partition_col_idx" already exists
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- let's fix the problematic table
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- shard id has been appended to all index names which didn't end in shard id
|
||||
-- this goes in line with Citus's way of naming indexes of shards: always append shardid to the end
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910004 | short_910004
|
||||
dist_partitioned_table_910006 | short_910006
|
||||
p_910012 | p_another_col_partition_col_idx_910012
|
||||
p_910014 | p_another_col_partition_col_idx_910014
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_na_another_col_p_dd884a3b_910008
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_na_another_col_p_dd884a3b_910010
|
||||
(6 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- this should now work
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- if we run this command again, the names will not change anymore since shardid is appended to them
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT fix_all_partition_shard_index_names();
|
||||
fix_all_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table | short
|
||||
dist_partitioned_table_910004 | short_910004
|
||||
dist_partitioned_table_910006 | short_910006
|
||||
p | p_another_col_partition_col_idx
|
||||
p_910012 | p_another_col_partition_col_idx_910012
|
||||
p_910014 | p_another_col_partition_col_idx_910014
|
||||
partition_table_with_very_long_name | partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_na_another_col_p_dd884a3b_910008
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_na_another_col_p_dd884a3b_910010
|
||||
(9 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 910020;
|
||||
-- if we explicitly create index on partition-to-be table, Citus handles the naming
|
||||
-- hence we would have no broken index names
|
||||
CREATE TABLE another_partition_table_with_very_long_name (dist_col int, another_col int, partition_col timestamp);
|
||||
SELECT create_distributed_table('another_partition_table_with_very_long_name', 'dist_col');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX ON another_partition_table_with_very_long_name USING btree (another_col, partition_col);
|
||||
ALTER TABLE dist_partitioned_table ATTACH PARTITION another_partition_table_with_very_long_name FOR VALUES FROM ('2020-01-01') TO ('2021-01-01');
|
||||
-- check it works even if we give a weird index name
|
||||
CREATE TABLE yet_another_partition_table (dist_col int, another_col int, partition_col timestamp);
|
||||
SELECT create_distributed_table('yet_another_partition_table', 'dist_col');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX "really weird index name !!" ON yet_another_partition_table USING btree (another_col, partition_col);
|
||||
ALTER TABLE dist_partitioned_table ATTACH PARTITION yet_another_partition_table FOR VALUES FROM ('2021-01-01') TO ('2022-01-01');
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
another_partition_table_with_very_long_name | another_partition_table_with_very_another_col_partition_col_idx
|
||||
dist_partitioned_table | short
|
||||
p | p_another_col_partition_col_idx
|
||||
partition_table_with_very_long_name | partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
yet_another_partition_table | really weird index name !!
|
||||
(5 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- notice indexes of shards of another_partition_table_with_very_long_name already have shardid appended to the end
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
another_partition_table_with_very_long_name | another_partition_table_with_very_another_col_partition_col_idx
|
||||
another_partition_table_with_very_long_name_910020 | another_partition_table_with_very_another_col_p_a02939b4_910020
|
||||
another_partition_table_with_very_long_name_910022 | another_partition_table_with_very_another_col_p_a02939b4_910022
|
||||
dist_partitioned_table | short
|
||||
dist_partitioned_table_910004 | short_910004
|
||||
dist_partitioned_table_910006 | short_910006
|
||||
p | p_another_col_partition_col_idx
|
||||
p_910012 | p_another_col_partition_col_idx_910012
|
||||
p_910014 | p_another_col_partition_col_idx_910014
|
||||
partition_table_with_very_long_name | partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_na_another_col_p_dd884a3b_910008
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_na_another_col_p_dd884a3b_910010
|
||||
yet_another_partition_table | really weird index name !!
|
||||
yet_another_partition_table_910024 | really weird index name !!_910024
|
||||
yet_another_partition_table_910026 | really weird index name !!_910026
|
||||
(15 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- this command would not do anything
|
||||
SELECT fix_all_partition_shard_index_names();
|
||||
fix_all_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- names are the same as before
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
another_partition_table_with_very_long_name | another_partition_table_with_very_another_col_partition_col_idx
|
||||
another_partition_table_with_very_long_name_910020 | another_partition_table_with_very_another_col_p_a02939b4_910020
|
||||
another_partition_table_with_very_long_name_910022 | another_partition_table_with_very_another_col_p_a02939b4_910022
|
||||
dist_partitioned_table | short
|
||||
dist_partitioned_table_910004 | short_910004
|
||||
dist_partitioned_table_910006 | short_910006
|
||||
p | p_another_col_partition_col_idx
|
||||
p_910012 | p_another_col_partition_col_idx_910012
|
||||
p_910014 | p_another_col_partition_col_idx_910014
|
||||
partition_table_with_very_long_name | partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_na_another_col_p_dd884a3b_910008
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_na_another_col_p_dd884a3b_910010
|
||||
yet_another_partition_table | really weird index name !!
|
||||
yet_another_partition_table_910024 | really weird index name !!_910024
|
||||
yet_another_partition_table_910026 | really weird index name !!_910026
|
||||
(15 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
NOTICE: dropping metadata on the node (localhost,57637)
|
||||
stop_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP INDEX short;
|
||||
DROP TABLE yet_another_partition_table, another_partition_table_with_very_long_name;
|
||||
-- this will create constraint1 index on parent
|
||||
ALTER TABLE dist_partitioned_table ADD CONSTRAINT constraint1 UNIQUE (dist_col, partition_col);
|
||||
CREATE TABLE fk_table (id int, fk_column timestamp, FOREIGN KEY (id, fk_column) REFERENCES dist_partitioned_table (dist_col, partition_col));
|
||||
-- try creating index to foreign key
|
||||
CREATE INDEX ON dist_partitioned_table USING btree (dist_col, partition_col);
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table | constraint1
|
||||
dist_partitioned_table | dist_partitioned_table_dist_col_partition_col_idx
|
||||
p | p_dist_col_partition_col_idx
|
||||
p | p_dist_col_partition_col_key
|
||||
partition_table_with_very_long_name | partition_table_with_very_long_name_dist_col_partition_col_idx
|
||||
partition_table_with_very_long_name | partition_table_with_very_long_name_dist_col_partition_col_key
|
||||
(6 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- index names don't end in shardid for partitions
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910004 | constraint1_910004
|
||||
dist_partitioned_table_910004 | dist_partitioned_table_dist_col_partition_col_idx_910004
|
||||
dist_partitioned_table_910006 | constraint1_910006
|
||||
dist_partitioned_table_910006 | dist_partitioned_table_dist_col_partition_col_idx_910006
|
||||
p_910012 | p_910012_dist_col_partition_col_idx
|
||||
p_910012 | p_910012_dist_col_partition_col_key
|
||||
p_910014 | p_910014_dist_col_partition_col_idx
|
||||
p_910014 | p_910014_dist_col_partition_col_key
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_name__dist_col_partition_col_idx
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_name__dist_col_partition_col_key
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_name_dist_col_partition_col_idx1
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_name_dist_col_partition_col_key1
|
||||
(12 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SELECT fix_all_partition_shard_index_names();
|
||||
fix_all_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- now index names end in shardid
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910004 | constraint1_910004
|
||||
dist_partitioned_table_910004 | dist_partitioned_table_dist_col_partition_col_idx_910004
|
||||
dist_partitioned_table_910006 | constraint1_910006
|
||||
dist_partitioned_table_910006 | dist_partitioned_table_dist_col_partition_col_idx_910006
|
||||
p_910012 | p_dist_col_partition_col_idx_910012
|
||||
p_910012 | p_dist_col_partition_col_key_910012
|
||||
p_910014 | p_dist_col_partition_col_idx_910014
|
||||
p_910014 | p_dist_col_partition_col_key_910014
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_name_dist_col_pa_781a5400_910008
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_name_dist_col_pa_ef25fb77_910008
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_name_dist_col_pa_781a5400_910010
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_name_dist_col_pa_ef25fb77_910010
|
||||
(12 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
ALTER TABLE dist_partitioned_table DROP CONSTRAINT constraint1 CASCADE;
|
||||
NOTICE: drop cascades to constraint fk_table_id_fk_column_fkey on table fk_table
|
||||
DROP INDEX dist_partitioned_table_dist_col_partition_col_idx;
|
||||
-- try with index on only parent
|
||||
-- this is also an invalid index
|
||||
-- also try with hash method, not btree
|
||||
CREATE INDEX short_parent ON ONLY dist_partitioned_table USING hash (dist_col);
|
||||
-- only another_partition will have the index on dist_col inherited from short_parent
|
||||
-- hence short_parent will still be invalid
|
||||
CREATE TABLE another_partition (dist_col int, another_col int, partition_col timestamp);
|
||||
ALTER TABLE dist_partitioned_table ATTACH PARTITION another_partition FOR VALUES FROM ('2017-01-01') TO ('2018-01-01');
|
||||
SELECT c.relname AS indexname
|
||||
FROM pg_catalog.pg_class c, pg_catalog.pg_namespace n, pg_catalog.pg_index i
|
||||
WHERE (i.indisvalid = false) AND i.indexrelid = c.oid AND c.relnamespace = n.oid AND n.nspname = 'fix_idx_names';
|
||||
indexname
|
||||
---------------------------------------------------------------------
|
||||
short_parent
|
||||
(1 row)
|
||||
|
||||
-- try with index on only partition
|
||||
CREATE INDEX short_child ON ONLY p USING hash (dist_col);
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
another_partition | another_partition_dist_col_idx
|
||||
dist_partitioned_table | short_parent
|
||||
p | short_child
|
||||
(3 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- index names are already correct except for inherited index for another_partition
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
another_partition_361176 | another_partition_361176_dist_col_idx
|
||||
another_partition_361178 | another_partition_361178_dist_col_idx
|
||||
dist_partitioned_table_910004 | short_parent_910004
|
||||
dist_partitioned_table_910006 | short_parent_910006
|
||||
p_910012 | short_child_910012
|
||||
p_910014 | short_child_910014
|
||||
(6 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- this will fix inherited index for another_partition
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this will error out becuase p is not partitioned, it is rather a partition
|
||||
SELECT fix_partition_shard_index_names('p'::regclass);
|
||||
ERROR: Fixing shard index names is only applicable to partitioned tables, and "p" is not a partitioned table
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
another_partition_361176 | another_partition_dist_col_idx_361176
|
||||
another_partition_361178 | another_partition_dist_col_idx_361178
|
||||
dist_partitioned_table_910004 | short_parent_910004
|
||||
dist_partitioned_table_910006 | short_parent_910006
|
||||
p_910012 | short_child_910012
|
||||
p_910014 | short_child_910014
|
||||
(6 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
DROP INDEX short_parent;
|
||||
DROP INDEX short_child;
|
||||
DROP TABLE another_partition;
|
||||
-- expression indexes have the same problem with naming
|
||||
CREATE INDEX expression_index ON dist_partitioned_table ((dist_col || ' ' || another_col));
|
||||
-- try with statistics on index
|
||||
CREATE INDEX statistics_on_index on dist_partitioned_table ((dist_col+another_col), (dist_col-another_col));
|
||||
ALTER INDEX statistics_on_index ALTER COLUMN 1 SET STATISTICS 3737;
|
||||
ALTER INDEX statistics_on_index ALTER COLUMN 2 SET STATISTICS 3737;
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table | expression_index
|
||||
dist_partitioned_table | statistics_on_index
|
||||
p | p_expr_expr1_idx
|
||||
p | p_expr_idx
|
||||
partition_table_with_very_long_name | partition_table_with_very_long_name_expr_expr1_idx
|
||||
partition_table_with_very_long_name | partition_table_with_very_long_name_expr_idx
|
||||
(6 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910004 | expression_index_910004
|
||||
dist_partitioned_table_910004 | statistics_on_index_910004
|
||||
dist_partitioned_table_910006 | expression_index_910006
|
||||
dist_partitioned_table_910006 | statistics_on_index_910006
|
||||
p_910012 | p_910012_expr_expr1_idx
|
||||
p_910012 | p_910012_expr_idx
|
||||
p_910014 | p_910014_expr_expr1_idx
|
||||
p_910014 | p_910014_expr_idx
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_name_910008_expr_expr1_idx
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_name_910008_expr_idx
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_name_910010_expr_expr1_idx
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_name_910010_expr_idx
|
||||
(12 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910004 | expression_index_910004
|
||||
dist_partitioned_table_910004 | statistics_on_index_910004
|
||||
dist_partitioned_table_910006 | expression_index_910006
|
||||
dist_partitioned_table_910006 | statistics_on_index_910006
|
||||
p_910012 | p_expr_expr1_idx_910012
|
||||
p_910012 | p_expr_idx_910012
|
||||
p_910014 | p_expr_expr1_idx_910014
|
||||
p_910014 | p_expr_idx_910014
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_name_expr_expr1_idx_910008
|
||||
partition_table_with_very_long_name_910008 | partition_table_with_very_long_name_expr_idx_910008
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_name_expr_expr1_idx_910010
|
||||
partition_table_with_very_long_name_910010 | partition_table_with_very_long_name_expr_idx_910010
|
||||
(12 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- try with a table with no partitions
|
||||
ALTER TABLE dist_partitioned_table DETACH PARTITION p;
|
||||
ALTER TABLE dist_partitioned_table DETACH PARTITION partition_table_with_very_long_name;
|
||||
DROP TABLE p;
|
||||
DROP TABLE partition_table_with_very_long_name;
|
||||
-- still dist_partitioned_table has indexes
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table | expression_index
|
||||
dist_partitioned_table | statistics_on_index
|
||||
(2 rows)
|
||||
|
||||
-- this does nothing
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910004 | expression_index_910004
|
||||
dist_partitioned_table_910004 | statistics_on_index_910004
|
||||
dist_partitioned_table_910006 | expression_index_910006
|
||||
dist_partitioned_table_910006 | statistics_on_index_910006
|
||||
(4 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
DROP TABLE dist_partitioned_table;
|
||||
-- add test with replication factor = 2
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
SET citus.next_shard_id TO 910050;
|
||||
CREATE TABLE dist_partitioned_table (dist_col int, another_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
|
||||
SELECT create_distributed_table('dist_partitioned_table', 'dist_col');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- create a partition with a long name
|
||||
CREATE TABLE partition_table_with_very_long_name PARTITION OF dist_partitioned_table FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
|
||||
-- create an index on parent table
|
||||
CREATE INDEX index_rep_factor_2 ON dist_partitioned_table USING btree (another_col, partition_col);
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table | index_rep_factor_2
|
||||
partition_table_with_very_long_name | partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
(2 rows)
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910050 | index_rep_factor_2_910050
|
||||
dist_partitioned_table_910051 | index_rep_factor_2_910051
|
||||
dist_partitioned_table_910052 | index_rep_factor_2_910052
|
||||
dist_partitioned_table_910053 | index_rep_factor_2_910053
|
||||
partition_table_with_very_long_name_910054 | partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
partition_table_with_very_long_name_910055 | partition_table_with_very_long_n_another_col_partition_col_idx1
|
||||
partition_table_with_very_long_name_910056 | partition_table_with_very_long_n_another_col_partition_col_idx2
|
||||
partition_table_with_very_long_name_910057 | partition_table_with_very_long_n_another_col_partition_col_idx3
|
||||
(8 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- let's fix the problematic table
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_2_port
|
||||
-- shard id has been appended to all index names which didn't end in shard id
|
||||
-- this goes in line with Citus's way of naming indexes of shards: always append shardid to the end
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910050 | index_rep_factor_2_910050
|
||||
dist_partitioned_table_910051 | index_rep_factor_2_910051
|
||||
dist_partitioned_table_910052 | index_rep_factor_2_910052
|
||||
dist_partitioned_table_910053 | index_rep_factor_2_910053
|
||||
partition_table_with_very_long_name_910054 | partition_table_with_very_long_na_another_col_p_dd884a3b_910054
|
||||
partition_table_with_very_long_name_910055 | partition_table_with_very_long_na_another_col_p_dd884a3b_910055
|
||||
partition_table_with_very_long_name_910056 | partition_table_with_very_long_na_another_col_p_dd884a3b_910056
|
||||
partition_table_with_very_long_name_910057 | partition_table_with_very_long_na_another_col_p_dd884a3b_910057
|
||||
(8 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- test with role that is not superuser
|
||||
SET client_min_messages TO warning;
|
||||
SET citus.enable_ddl_propagation TO off;
|
||||
CREATE USER user1;
|
||||
RESET client_min_messages;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
SET ROLE user1;
|
||||
SELECT fix_partition_shard_index_names('fix_idx_names.dist_partitioned_table'::regclass);
|
||||
ERROR: permission denied for schema fix_idx_names
|
||||
RESET ROLE;
|
||||
SET search_path TO fix_idx_names, public;
|
||||
DROP TABLE dist_partitioned_table;
|
||||
-- also, we cannot do any further operations (e.g. rename) on the indexes of partitions because
|
||||
-- the index names on shards of partitions have been generated by Postgres, not Citus
|
||||
-- it doesn't matter here whether the partition name is long or short
|
||||
-- replicate scenario from above but this time with one shard so that this test isn't flaky
|
||||
SET citus.shard_count TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 910030;
|
||||
CREATE TABLE dist_partitioned_table (dist_col int, another_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
|
||||
SELECT create_distributed_table('dist_partitioned_table', 'dist_col');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE partition_table_with_very_long_name PARTITION OF dist_partitioned_table FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
|
||||
CREATE TABLE p PARTITION OF dist_partitioned_table FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
|
||||
CREATE INDEX short ON dist_partitioned_table USING btree (another_col, partition_col);
|
||||
-- rename shouldn't work
|
||||
ALTER INDEX partition_table_with_very_long_na_another_col_partition_col_idx RENAME TO partition_table_with_very_long_name_idx;
|
||||
ERROR: relation "fix_idx_names.partition_table_with_very_long_na_another_col_p_dd884a3b_910031" does not exist
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- we currently can't drop index on detached partition
|
||||
-- https://github.com/citusdata/citus/issues/5138
|
||||
ALTER TABLE dist_partitioned_table DETACH PARTITION p;
|
||||
DROP INDEX p_another_col_partition_col_idx;
|
||||
ERROR: index "p_another_col_partition_col_idx_910032" does not exist
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- let's reattach and retry after fixing index names
|
||||
ALTER TABLE dist_partitioned_table ATTACH PARTITION p FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
|
||||
\c - - - :worker_1_port
|
||||
-- check the current broken index names
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910030 | short_910030
|
||||
p_910032 | p_910032_another_col_partition_col_idx
|
||||
partition_table_with_very_long_name_910031 | partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
(3 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- fix index names
|
||||
SELECT fix_all_partition_shard_index_names();
|
||||
fix_all_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- check the fixed index names
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910030 | short_910030
|
||||
p_910032 | p_another_col_partition_col_idx_910032
|
||||
partition_table_with_very_long_name_910031 | partition_table_with_very_long_na_another_col_p_dd884a3b_910031
|
||||
(3 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- should now work
|
||||
ALTER INDEX partition_table_with_very_long_na_another_col_partition_col_idx RENAME TO partition_table_with_very_long_name_idx;
|
||||
-- now we can drop index on detached partition
|
||||
ALTER TABLE dist_partitioned_table DETACH PARTITION p;
|
||||
DROP INDEX p_another_col_partition_col_idx;
|
||||
\c - - - :worker_1_port
|
||||
-- check that indexes have been renamed
|
||||
-- and that index on p has been dropped (it won't appear)
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_910030 | short_910030
|
||||
partition_table_with_very_long_name_910031 | partition_table_with_very_long_name_idx_910031
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
DROP SCHEMA fix_idx_names CASCADE;
|
||||
NOTICE: drop cascades to 5 other objects
|
||||
DETAIL: drop cascades to table not_partitioned
|
||||
drop cascades to table not_distributed
|
||||
drop cascades to table fk_table
|
||||
drop cascades to table dist_partitioned_table
|
||||
drop cascades to table p
|
||||
SELECT run_command_on_workers($$ DROP SCHEMA IF EXISTS fix_idx_names CASCADE $$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP SCHEMA")
|
||||
(localhost,57638,t,"DROP SCHEMA")
|
||||
(2 rows)
|
||||
|
|
@ -7,6 +7,7 @@
|
|||
-- CREATE TEST TABLES
|
||||
--
|
||||
CREATE SCHEMA multi_index_statements;
|
||||
CREATE SCHEMA multi_index_statements_2;
|
||||
SET search_path TO multi_index_statements;
|
||||
SET citus.next_shard_id TO 102080;
|
||||
CREATE TABLE index_test_range(a int, b int, c int);
|
||||
|
@ -75,6 +76,34 @@ CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON index_test_hash(a,b) WH
|
|||
CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range(a,b) WHERE c IS NOT NULL;
|
||||
CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON index_test_hash(a) INCLUDE (b,c);
|
||||
RESET client_min_messages;
|
||||
-- Verify that we can create expression indexes and be robust to different schemas
|
||||
CREATE OR REPLACE FUNCTION value_plus_one(a int)
|
||||
RETURNS int IMMUTABLE AS $$
|
||||
BEGIN
|
||||
RETURN a + 1;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
SELECT create_distributed_function('value_plus_one(int)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION multi_index_statements_2.value_plus_one(a int)
|
||||
RETURNS int IMMUTABLE AS $$
|
||||
BEGIN
|
||||
RETURN a + 1;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
SELECT create_distributed_function('multi_index_statements_2.value_plus_one(int)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX ON index_test_hash ((value_plus_one(b)));
|
||||
CREATE INDEX ON index_test_hash ((multi_index_statements.value_plus_one(b)));
|
||||
CREATE INDEX ON index_test_hash ((multi_index_statements_2.value_plus_one(b)));
|
||||
-- Verify that we handle if not exists statements correctly
|
||||
CREATE INDEX lineitem_orderkey_index on public.lineitem(l_orderkey);
|
||||
ERROR: relation "lineitem_orderkey_index" already exists
|
||||
|
@ -104,6 +133,9 @@ DROP TABLE local_table;
|
|||
SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname;
|
||||
schemaname | tablename | indexname | tablespace | indexdef
|
||||
---------------------------------------------------------------------
|
||||
multi_index_statements | index_test_hash | index_test_hash_expr_idx | | CREATE INDEX index_test_hash_expr_idx ON multi_index_statements.index_test_hash USING btree (value_plus_one(b))
|
||||
multi_index_statements | index_test_hash | index_test_hash_expr_idx1 | | CREATE INDEX index_test_hash_expr_idx1 ON multi_index_statements.index_test_hash USING btree (value_plus_one(b))
|
||||
multi_index_statements | index_test_hash | index_test_hash_expr_idx2 | | CREATE INDEX index_test_hash_expr_idx2 ON multi_index_statements.index_test_hash USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON multi_index_statements.index_test_hash USING btree (a)
|
||||
multi_index_statements | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON multi_index_statements.index_test_hash USING btree (a, b)
|
||||
multi_index_statements | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON multi_index_statements.index_test_hash USING btree (a) INCLUDE (b, c)
|
||||
|
@ -120,7 +152,7 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t
|
|||
public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC)
|
||||
public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber)
|
||||
public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate)
|
||||
(16 rows)
|
||||
(19 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1);
|
||||
|
@ -132,7 +164,7 @@ SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class
|
|||
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%';
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
32
|
||||
56
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%';
|
||||
|
@ -186,6 +218,9 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t
|
|||
schemaname | tablename | indexname | tablespace | indexdef
|
||||
---------------------------------------------------------------------
|
||||
multi_index_statements | index_test_hash | index_test_hash_a_idx | | CREATE UNIQUE INDEX index_test_hash_a_idx ON multi_index_statements.index_test_hash USING btree (a)
|
||||
multi_index_statements | index_test_hash | index_test_hash_expr_idx | | CREATE INDEX index_test_hash_expr_idx ON multi_index_statements.index_test_hash USING btree (value_plus_one(b))
|
||||
multi_index_statements | index_test_hash | index_test_hash_expr_idx1 | | CREATE INDEX index_test_hash_expr_idx1 ON multi_index_statements.index_test_hash USING btree (value_plus_one(b))
|
||||
multi_index_statements | index_test_hash | index_test_hash_expr_idx2 | | CREATE INDEX index_test_hash_expr_idx2 ON multi_index_statements.index_test_hash USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON multi_index_statements.index_test_hash USING btree (a)
|
||||
multi_index_statements | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON multi_index_statements.index_test_hash USING btree (a, b)
|
||||
multi_index_statements | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON multi_index_statements.index_test_hash USING btree (a) INCLUDE (b, c)
|
||||
|
@ -204,7 +239,7 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t
|
|||
public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC)
|
||||
public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber)
|
||||
public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate)
|
||||
(19 rows)
|
||||
(22 rows)
|
||||
|
||||
--
|
||||
-- REINDEX
|
||||
|
@ -258,11 +293,14 @@ SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
|
||||
schemaname | tablename | indexname | tablespace | indexdef
|
||||
schemaname | tablename | indexname | tablespace | indexdef
|
||||
---------------------------------------------------------------------
|
||||
multi_index_statements | index_test_hash | index_test_hash_a_idx | | CREATE UNIQUE INDEX index_test_hash_a_idx ON multi_index_statements.index_test_hash USING btree (a)
|
||||
multi_index_statements | index_test_hash | index_test_hash_expr_idx | | CREATE INDEX index_test_hash_expr_idx ON multi_index_statements.index_test_hash USING btree (value_plus_one(b))
|
||||
multi_index_statements | index_test_hash | index_test_hash_expr_idx1 | | CREATE INDEX index_test_hash_expr_idx1 ON multi_index_statements.index_test_hash USING btree (value_plus_one(b))
|
||||
multi_index_statements | index_test_hash | index_test_hash_expr_idx2 | | CREATE INDEX index_test_hash_expr_idx2 ON multi_index_statements.index_test_hash USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON multi_index_statements.index_test_hash USING btree (a) INCLUDE (b, c)
|
||||
(2 rows)
|
||||
(5 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%' ORDER BY 1,2;
|
||||
|
@ -273,7 +311,7 @@ SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
|
||||
schemaname | tablename | indexname | tablespace | indexdef
|
||||
schemaname | tablename | indexname | tablespace | indexdef
|
||||
---------------------------------------------------------------------
|
||||
multi_index_statements | index_test_hash_102082 | index_test_hash_a_idx_102082 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102082 ON multi_index_statements.index_test_hash_102082 USING btree (a)
|
||||
multi_index_statements | index_test_hash_102083 | index_test_hash_a_idx_102083 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102083 ON multi_index_statements.index_test_hash_102083 USING btree (a)
|
||||
|
@ -283,6 +321,30 @@ SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
|
|||
multi_index_statements | index_test_hash_102087 | index_test_hash_a_idx_102087 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102087 ON multi_index_statements.index_test_hash_102087 USING btree (a)
|
||||
multi_index_statements | index_test_hash_102088 | index_test_hash_a_idx_102088 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102088 ON multi_index_statements.index_test_hash_102088 USING btree (a)
|
||||
multi_index_statements | index_test_hash_102089 | index_test_hash_a_idx_102089 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102089 ON multi_index_statements.index_test_hash_102089 USING btree (a)
|
||||
multi_index_statements | index_test_hash_102082 | index_test_hash_expr_idx1_102082 | | CREATE INDEX index_test_hash_expr_idx1_102082 ON multi_index_statements.index_test_hash_102082 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102083 | index_test_hash_expr_idx1_102083 | | CREATE INDEX index_test_hash_expr_idx1_102083 ON multi_index_statements.index_test_hash_102083 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102084 | index_test_hash_expr_idx1_102084 | | CREATE INDEX index_test_hash_expr_idx1_102084 ON multi_index_statements.index_test_hash_102084 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102085 | index_test_hash_expr_idx1_102085 | | CREATE INDEX index_test_hash_expr_idx1_102085 ON multi_index_statements.index_test_hash_102085 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102086 | index_test_hash_expr_idx1_102086 | | CREATE INDEX index_test_hash_expr_idx1_102086 ON multi_index_statements.index_test_hash_102086 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102087 | index_test_hash_expr_idx1_102087 | | CREATE INDEX index_test_hash_expr_idx1_102087 ON multi_index_statements.index_test_hash_102087 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102088 | index_test_hash_expr_idx1_102088 | | CREATE INDEX index_test_hash_expr_idx1_102088 ON multi_index_statements.index_test_hash_102088 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102089 | index_test_hash_expr_idx1_102089 | | CREATE INDEX index_test_hash_expr_idx1_102089 ON multi_index_statements.index_test_hash_102089 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102082 | index_test_hash_expr_idx2_102082 | | CREATE INDEX index_test_hash_expr_idx2_102082 ON multi_index_statements.index_test_hash_102082 USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102083 | index_test_hash_expr_idx2_102083 | | CREATE INDEX index_test_hash_expr_idx2_102083 ON multi_index_statements.index_test_hash_102083 USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102084 | index_test_hash_expr_idx2_102084 | | CREATE INDEX index_test_hash_expr_idx2_102084 ON multi_index_statements.index_test_hash_102084 USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102085 | index_test_hash_expr_idx2_102085 | | CREATE INDEX index_test_hash_expr_idx2_102085 ON multi_index_statements.index_test_hash_102085 USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102086 | index_test_hash_expr_idx2_102086 | | CREATE INDEX index_test_hash_expr_idx2_102086 ON multi_index_statements.index_test_hash_102086 USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102087 | index_test_hash_expr_idx2_102087 | | CREATE INDEX index_test_hash_expr_idx2_102087 ON multi_index_statements.index_test_hash_102087 USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102088 | index_test_hash_expr_idx2_102088 | | CREATE INDEX index_test_hash_expr_idx2_102088 ON multi_index_statements.index_test_hash_102088 USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102089 | index_test_hash_expr_idx2_102089 | | CREATE INDEX index_test_hash_expr_idx2_102089 ON multi_index_statements.index_test_hash_102089 USING btree (multi_index_statements_2.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102082 | index_test_hash_expr_idx_102082 | | CREATE INDEX index_test_hash_expr_idx_102082 ON multi_index_statements.index_test_hash_102082 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102083 | index_test_hash_expr_idx_102083 | | CREATE INDEX index_test_hash_expr_idx_102083 ON multi_index_statements.index_test_hash_102083 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102084 | index_test_hash_expr_idx_102084 | | CREATE INDEX index_test_hash_expr_idx_102084 ON multi_index_statements.index_test_hash_102084 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102085 | index_test_hash_expr_idx_102085 | | CREATE INDEX index_test_hash_expr_idx_102085 ON multi_index_statements.index_test_hash_102085 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102086 | index_test_hash_expr_idx_102086 | | CREATE INDEX index_test_hash_expr_idx_102086 ON multi_index_statements.index_test_hash_102086 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102087 | index_test_hash_expr_idx_102087 | | CREATE INDEX index_test_hash_expr_idx_102087 ON multi_index_statements.index_test_hash_102087 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102088 | index_test_hash_expr_idx_102088 | | CREATE INDEX index_test_hash_expr_idx_102088 ON multi_index_statements.index_test_hash_102088 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102089 | index_test_hash_expr_idx_102089 | | CREATE INDEX index_test_hash_expr_idx_102089 ON multi_index_statements.index_test_hash_102089 USING btree (multi_index_statements.value_plus_one(b))
|
||||
multi_index_statements | index_test_hash_102082 | index_test_hash_index_a_b_c_102082 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102082 ON multi_index_statements.index_test_hash_102082 USING btree (a) INCLUDE (b, c)
|
||||
multi_index_statements | index_test_hash_102083 | index_test_hash_index_a_b_c_102083 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102083 ON multi_index_statements.index_test_hash_102083 USING btree (a) INCLUDE (b, c)
|
||||
multi_index_statements | index_test_hash_102084 | index_test_hash_index_a_b_c_102084 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102084 ON multi_index_statements.index_test_hash_102084 USING btree (a) INCLUDE (b, c)
|
||||
|
@ -291,7 +353,7 @@ SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
|
|||
multi_index_statements | index_test_hash_102087 | index_test_hash_index_a_b_c_102087 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102087 ON multi_index_statements.index_test_hash_102087 USING btree (a) INCLUDE (b, c)
|
||||
multi_index_statements | index_test_hash_102088 | index_test_hash_index_a_b_c_102088 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102088 ON multi_index_statements.index_test_hash_102088 USING btree (a) INCLUDE (b, c)
|
||||
multi_index_statements | index_test_hash_102089 | index_test_hash_index_a_b_c_102089 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102089 ON multi_index_statements.index_test_hash_102089 USING btree (a) INCLUDE (b, c)
|
||||
(16 rows)
|
||||
(40 rows)
|
||||
|
||||
-- create index that will conflict with master operations
|
||||
CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON multi_index_statements.index_test_hash_102089(b);
|
||||
|
@ -614,3 +676,4 @@ SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::
|
|||
-- final clean up
|
||||
DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx;
|
||||
DROP SCHEMA multi_index_statements CASCADE;
|
||||
DROP SCHEMA multi_index_statements_2 CASCADE;
|
||||
|
|
|
@ -265,19 +265,26 @@ SELECT * FROM columnar.stripe;
|
|||
|
||||
-- alter a columnar setting
|
||||
SET columnar.chunk_group_row_limit = 1050;
|
||||
DO $proc$
|
||||
BEGIN
|
||||
IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN
|
||||
EXECUTE $$
|
||||
-- create columnar table
|
||||
CREATE TABLE columnar_table (a int) USING columnar;
|
||||
-- alter a columnar table that is created by that unprivileged user
|
||||
SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000);
|
||||
-- and drop it
|
||||
DROP TABLE columnar_table;
|
||||
$$;
|
||||
END IF;
|
||||
END$proc$;
|
||||
-- create columnar table
|
||||
CREATE TABLE columnar_table (a int) USING columnar;
|
||||
-- alter a columnar table that is created by that unprivileged user
|
||||
SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000);
|
||||
alter_columnar_table_set
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- insert some data and read
|
||||
INSERT INTO columnar_table VALUES (1), (1);
|
||||
SELECT * FROM columnar_table;
|
||||
a
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
-- and drop it
|
||||
DROP TABLE columnar_table;
|
||||
-- cannot modify columnar metadata table as unprivileged user
|
||||
INSERT INTO columnar.stripe VALUES(99);
|
||||
ERROR: permission denied for table stripe
|
||||
|
@ -286,6 +293,9 @@ ERROR: permission denied for table stripe
|
|||
-- (since citus extension has a dependency to it)
|
||||
DROP TABLE columnar.chunk;
|
||||
ERROR: must be owner of table chunk
|
||||
-- cannot read columnar.chunk since it could expose chunk min/max values
|
||||
SELECT * FROM columnar.chunk;
|
||||
ERROR: permission denied for table chunk
|
||||
-- test whether a read-only user can read from citus_tables view
|
||||
SELECT distribution_column FROM citus_tables WHERE table_name = 'test'::regclass;
|
||||
distribution_column
|
||||
|
|
|
@ -1028,6 +1028,46 @@ CREATE TABLE partitioned_users_table_2009 PARTITION OF partitioned_users_table F
|
|||
CREATE TABLE partitioned_events_table_2009 PARTITION OF partitioned_events_table FOR VALUES FROM ('2017-01-01') TO ('2018-01-01');
|
||||
INSERT INTO partitioned_events_table SELECT * FROM events_table;
|
||||
INSERT INTO partitioned_users_table_2009 SELECT * FROM users_table;
|
||||
-- test distributed partitions are indeed colocated with the parent table
|
||||
CREATE TABLE sensors(measureid integer, eventdatetime date, measure_data jsonb, PRIMARY KEY (measureid, eventdatetime, measure_data))
|
||||
PARTITION BY RANGE(eventdatetime);
|
||||
CREATE TABLE sensors_old PARTITION OF sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01');
|
||||
CREATE TABLE sensors_2020_01_01 PARTITION OF sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01');
|
||||
CREATE TABLE sensors_new PARTITION OF sensors DEFAULT;
|
||||
SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(DISTINCT colocationid) FROM pg_dist_partition
|
||||
WHERE logicalrelid IN ('sensors'::regclass, 'sensors_old'::regclass, 'sensors_2020_01_01'::regclass, 'sensors_new'::regclass);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE local_sensors(measureid integer, eventdatetime date, measure_data jsonb, PRIMARY KEY (measureid, eventdatetime, measure_data))
|
||||
PARTITION BY RANGE(eventdatetime);
|
||||
CREATE TABLE local_sensors_old PARTITION OF local_sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01');
|
||||
CREATE TABLE local_sensors_2020_01_01 PARTITION OF local_sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01');
|
||||
CREATE TABLE local_sensors_new PARTITION OF local_sensors DEFAULT;
|
||||
SELECT create_distributed_table('local_sensors', 'measureid', colocate_with:='sensors');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(DISTINCT colocationid) FROM pg_dist_partition
|
||||
WHERE logicalrelid IN ('sensors'::regclass, 'sensors_old'::regclass, 'sensors_2020_01_01'::regclass, 'sensors_new'::regclass,
|
||||
'local_sensors'::regclass, 'local_sensors_old'::regclass, 'local_sensors_2020_01_01'::regclass, 'local_sensors_new'::regclass);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
DROP TABLE sensors;
|
||||
DROP TABLE local_sensors;
|
||||
--
|
||||
-- Complex JOINs, subqueries, UNIONs etc...
|
||||
--
|
||||
|
@ -1298,7 +1338,7 @@ INSERT INTO multi_column_partitioning VALUES(1, 1);
|
|||
INSERT INTO multi_column_partitioning_0_0_10_0 VALUES(5, -5);
|
||||
-- test INSERT to multi-column partitioned table where no suitable partition exists
|
||||
INSERT INTO multi_column_partitioning VALUES(10, 1);
|
||||
ERROR: no partition of relation "multi_column_partitioning_1660101" found for row
|
||||
ERROR: no partition of relation "multi_column_partitioning_1660133" found for row
|
||||
DETAIL: Partition key of the failing row contains (c1, c2) = (10, 1).
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- test with MINVALUE/MAXVALUE
|
||||
|
@ -1308,7 +1348,7 @@ INSERT INTO multi_column_partitioning VALUES(11, -11);
|
|||
INSERT INTO multi_column_partitioning_10_max_20_min VALUES(19, -19);
|
||||
-- test INSERT to multi-column partitioned table where no suitable partition exists
|
||||
INSERT INTO multi_column_partitioning VALUES(20, -20);
|
||||
ERROR: no partition of relation "multi_column_partitioning_1660101" found for row
|
||||
ERROR: no partition of relation "multi_column_partitioning_1660133" found for row
|
||||
DETAIL: Partition key of the failing row contains (c1, c2) = (20, -20).
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- see data is loaded to multi-column partitioned table
|
||||
|
@ -3872,11 +3912,84 @@ CALL drop_old_time_partitions('non_partitioned_table', now());
|
|||
ERROR: non_partitioned_table is not partitioned
|
||||
CONTEXT: PL/pgSQL function drop_old_time_partitions(regclass,timestamp with time zone) line XX at RAISE
|
||||
DROP TABLE non_partitioned_table;
|
||||
-- https://github.com/citusdata/citus/issues/4962
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE part_table_with_very_long_name (
|
||||
dist_col integer,
|
||||
long_named_integer_col integer,
|
||||
long_named_part_col timestamp
|
||||
) PARTITION BY RANGE (long_named_part_col);
|
||||
CREATE TABLE part_table_with_long_long_long_long_name
|
||||
PARTITION OF part_table_with_very_long_name
|
||||
FOR VALUES FROM ('2010-01-01') TO ('2015-01-01');
|
||||
SELECT create_distributed_table('part_table_with_very_long_name', 'dist_col');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX ON part_table_with_very_long_name
|
||||
USING btree (long_named_integer_col, long_named_part_col);
|
||||
-- shouldn't work
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
ERROR: relation "part_table_with_long_long_lon_long_named_integer_col_long_n_idx" already exists
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes
|
||||
WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
part_table_with_long_long_long_long_name_361172 | part_table_with_long_long_lon_long_named_integer_col_long_n_idx
|
||||
part_table_with_long_long_long_long_name_361174 | part_table_with_long_long_lon_long_named_integer_col_long__idx1
|
||||
part_table_with_very_long_name_361168 | part_table_with_very_long_nam_long_named_intege_73d4b078_361168
|
||||
part_table_with_very_long_name_361170 | part_table_with_very_long_nam_long_named_intege_73d4b078_361170
|
||||
(4 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET search_path = partitioning_schema;
|
||||
-- fix problematic table
|
||||
SELECT fix_partition_shard_index_names('part_table_with_very_long_name'::regclass);
|
||||
fix_partition_shard_index_names
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- should work
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- check that indexes are renamed
|
||||
SELECT tablename, indexname FROM pg_indexes
|
||||
WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%' ORDER BY 1, 2;
|
||||
tablename | indexname
|
||||
---------------------------------------------------------------------
|
||||
part_table_with_long_long_long_long_name | part_table_with_long_long_lon_long_named_integer_col_long_n_idx
|
||||
part_table_with_long_long_long_long_name_361172 | part_table_with_long_long_lon_long_named_intege_f9175544_361172
|
||||
part_table_with_long_long_long_long_name_361174 | part_table_with_long_long_lon_long_named_intege_f9175544_361174
|
||||
part_table_with_very_long_name | part_table_with_very_long_nam_long_named_integer_col_long_n_idx
|
||||
part_table_with_very_long_name_361168 | part_table_with_very_long_nam_long_named_intege_73d4b078_361168
|
||||
part_table_with_very_long_name_361170 | part_table_with_very_long_nam_long_named_intege_73d4b078_361170
|
||||
(6 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
NOTICE: dropping metadata on the node (localhost,57637)
|
||||
stop_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP SCHEMA partitioning_schema CASCADE;
|
||||
NOTICE: drop cascades to 3 other objects
|
||||
DETAIL: drop cascades to table "schema-test"
|
||||
drop cascades to table another_distributed_table
|
||||
drop cascades to table distributed_parent_table
|
||||
NOTICE: drop cascades to 4 other objects
|
||||
DETAIL: drop cascades to table partitioning_schema."schema-test"
|
||||
drop cascades to table partitioning_schema.another_distributed_table
|
||||
drop cascades to table partitioning_schema.distributed_parent_table
|
||||
drop cascades to table partitioning_schema.part_table_with_very_long_name
|
||||
RESET search_path;
|
||||
DROP TABLE IF EXISTS
|
||||
partitioning_hash_test,
|
||||
|
|
|
@ -25,18 +25,177 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
|||
SET client_min_messages TO ERROR;
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE USER regular_mx_user WITH LOGIN;
|
||||
SELECT 1 FROM run_command_on_workers($$CREATE USER regular_mx_user WITH LOGIN;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user;
|
||||
-- create another table owned by the super user (e.g., current user of the session)
|
||||
-- and GRANT access to the user
|
||||
CREATE SCHEMA "Mx Super User";
|
||||
SELECT 1 FROM run_command_on_workers($$CREATE SCHEMA "Mx Super User";$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
SET citus.next_shard_id TO 2980000;
|
||||
SET search_path TO "Mx Super User";
|
||||
CREATE TABLE super_user_owned_regular_user_granted (a int PRIMARY KEY, b int);
|
||||
SELECT create_reference_table ('"Mx Super User".super_user_owned_regular_user_granted');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show that this table is owned by super user
|
||||
SELECT
|
||||
rolsuper
|
||||
FROM
|
||||
pg_roles
|
||||
WHERE oid
|
||||
IN
|
||||
(SELECT relowner FROM pg_class WHERE oid = '"Mx Super User".super_user_owned_regular_user_granted'::regclass);
|
||||
rolsuper
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- make sure that granting produce the same output for both community and enterprise
|
||||
SET client_min_messages TO ERROR;
|
||||
GRANT USAGE ON SCHEMA "Mx Super User" TO regular_mx_user;
|
||||
GRANT INSERT ON TABLE super_user_owned_regular_user_granted TO regular_mx_user;
|
||||
SELECT 1 FROM run_command_on_workers($$GRANT USAGE ON SCHEMA "Mx Super User" TO regular_mx_user;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
SELECT 1 FROM run_command_on_workers($$GRANT INSERT ON TABLE "Mx Super User".super_user_owned_regular_user_granted TO regular_mx_user;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
SELECT 1 FROM run_command_on_placements('super_user_owned_regular_user_granted', $$GRANT INSERT ON TABLE %s TO regular_mx_user;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
1
|
||||
(3 rows)
|
||||
|
||||
-- now that the GRANT is given, the regular user should be able to
|
||||
-- INSERT into the table
|
||||
\c - regular_mx_user - :master_port
|
||||
SET search_path TO "Mx Super User";
|
||||
COPY super_user_owned_regular_user_granted FROM STDIN WITH CSV;
|
||||
-- however, this specific user doesn't have UPDATE/UPSERT/DELETE/TRUNCATE
|
||||
-- permission, so should fail
|
||||
INSERT INTO super_user_owned_regular_user_granted VALUES (1, 1), (2, 1) ON CONFLICT (a) DO NOTHING;
|
||||
ERROR: permission denied for table super_user_owned_regular_user_granted
|
||||
TRUNCATE super_user_owned_regular_user_granted;
|
||||
ERROR: permission denied for table super_user_owned_regular_user_granted
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
DELETE FROM super_user_owned_regular_user_granted;
|
||||
ERROR: permission denied for table super_user_owned_regular_user_granted
|
||||
UPDATE super_user_owned_regular_user_granted SET a = 1;
|
||||
ERROR: permission denied for table super_user_owned_regular_user_granted
|
||||
-- AccessExclusiveLock == 8 is strictly forbidden for any user
|
||||
SELECT lock_shard_resources(8, ARRAY[2980000]);
|
||||
ERROR: unsupported lockmode 8
|
||||
-- ExclusiveLock == 7 is forbidden for this user
|
||||
-- as only has INSERT rights
|
||||
SELECT lock_shard_resources(7, ARRAY[2980000]);
|
||||
ERROR: permission denied for table super_user_owned_regular_user_granted
|
||||
-- but should be able to acquire RowExclusiveLock
|
||||
BEGIN;
|
||||
SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
|
||||
acquired_lock
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
SELECT lock_shard_resources(3, ARRAY[2980000]);
|
||||
lock_shard_resources
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
|
||||
acquired_lock
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
COMMIT;
|
||||
-- acquring locks on non-existing shards is not meaningful but still we do not throw error as we might be in the middle
|
||||
-- of metadata syncing. We just do not acquire the locks
|
||||
BEGIN;
|
||||
SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
|
||||
acquired_lock
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
SELECT lock_shard_resources(3, ARRAY[123456871]);
|
||||
lock_shard_resources
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
|
||||
acquired_lock
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
COMMIT;
|
||||
\c - postgres - :master_port;
|
||||
SET search_path TO "Mx Super User";
|
||||
SET client_min_messages TO ERROR;
|
||||
-- now allow users to do UPDATE on the tables
|
||||
GRANT UPDATE ON TABLE super_user_owned_regular_user_granted TO regular_mx_user;
|
||||
SELECT 1 FROM run_command_on_workers($$GRANT UPDATE ON TABLE "Mx Super User".super_user_owned_regular_user_granted TO regular_mx_user;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
SELECT 1 FROM run_command_on_placements('super_user_owned_regular_user_granted', $$GRANT UPDATE ON TABLE %s TO regular_mx_user;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
1
|
||||
(3 rows)
|
||||
|
||||
\c - regular_mx_user - :master_port
|
||||
SET search_path TO "Mx Super User";
|
||||
UPDATE super_user_owned_regular_user_granted SET b = 1;
|
||||
-- AccessExclusiveLock == 8 is strictly forbidden for any user
|
||||
-- even after UPDATE is allowed
|
||||
SELECT lock_shard_resources(8, ARRAY[2980000]);
|
||||
ERROR: unsupported lockmode 8
|
||||
\c - postgres - :master_port;
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA "Mx Super User" CASCADE;
|
||||
\c - postgres - :worker_1_port;
|
||||
SET client_min_messages TO ERROR;
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE SCHEMA "Mx Regular User";
|
||||
CREATE USER regular_mx_user WITH LOGIN;
|
||||
GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user;
|
||||
\c - postgres - :worker_2_port;
|
||||
SET client_min_messages TO ERROR;
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE SCHEMA "Mx Regular User";
|
||||
CREATE USER regular_mx_user WITH LOGIN;
|
||||
GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user;
|
||||
-- now connect with that user
|
||||
\c - regular_mx_user - :master_port
|
||||
|
|
|
@ -169,9 +169,8 @@ SELECT create_distributed_table('test_table', 'a');
|
|||
|
||||
(1 row)
|
||||
|
||||
-- we currently don't support this
|
||||
-- operator class options are supported
|
||||
CREATE INDEX test_table_index ON test_table USING gist (b tsvector_ops(siglen = 100));
|
||||
ERROR: citus currently doesn't support operator class parameters in indexes
|
||||
-- testing WAL
|
||||
CREATE TABLE test_wal(a int, b int);
|
||||
-- test WAL without ANALYZE, this should raise an error
|
||||
|
|
|
@ -766,5 +766,54 @@ select * from nummultirange_test natural join nummultirange_test2 order by nmr;
|
|||
{[1.1,2.2)}
|
||||
(7 rows)
|
||||
|
||||
-- verify that recreating distributed procedures with OUT param gets propagated to workers
|
||||
CREATE OR REPLACE PROCEDURE proc_with_out_param(IN parm1 date, OUT parm2 int)
|
||||
LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT 1;
|
||||
$$;
|
||||
-- this should error out
|
||||
SELECT create_distributed_function('proc_with_out_param(date,int)');
|
||||
ERROR: function "proc_with_out_param(date,int)" does not exist
|
||||
-- this should work
|
||||
SELECT create_distributed_function('proc_with_out_param(date)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
CREATE ROLE r1;
|
||||
SELECT 1 FROM run_command_on_workers($$CREATE ROLE r1;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
GRANT EXECUTE ON PROCEDURE proc_with_out_param TO r1;
|
||||
SELECT 1 FROM run_command_on_workers($$GRANT EXECUTE ON PROCEDURE proc_with_out_param TO r1;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
RESET client_min_messages;
|
||||
CREATE OR REPLACE PROCEDURE proc_with_out_param(IN parm1 date, OUT parm2 int)
|
||||
LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT 2;
|
||||
$$;
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc, pg_proc.proowner) from pg_proc where proname = 'proc_with_out_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc, pg_proc.proowner)::text from pg_proc where proname = 'proc_with_out_param')
|
||||
as test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
set client_min_messages to error;
|
||||
drop schema pg14 cascade;
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
CREATE SCHEMA sequences_with_different_types;
|
||||
SET search_path TO sequences_with_different_types;
|
||||
SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE noderole = 'primary';
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(2 rows)
|
||||
|
||||
CREATE TYPE two_big_ints AS (a bigint, b bigint);
|
||||
-- by default, sequences get bigint type
|
||||
CREATE SEQUENCE bigint_sequence_1;
|
||||
CREATE SEQUENCE bigint_sequence_2 START 10000;
|
||||
CREATE SEQUENCE bigint_sequence_3 INCREMENT 10;
|
||||
CREATE SEQUENCE bigint_sequence_4 MINVALUE 1000000;
|
||||
CREATE SEQUENCE bigint_sequence_5;
|
||||
CREATE SEQUENCE bigint_sequence_8;
|
||||
CREATE TABLE table_1
|
||||
(
|
||||
user_id bigint,
|
||||
user_code_1 text DEFAULT (('CD'::text || lpad(nextval('bigint_sequence_1'::regclass)::text, 10, '0'::text))),
|
||||
user_code_2 text DEFAULT nextval('bigint_sequence_2'::regclass)::text,
|
||||
user_code_3 text DEFAULT (nextval('bigint_sequence_3'::regclass) + 1000)::text,
|
||||
user_code_4 float DEFAULT nextval('bigint_sequence_4'::regclass),
|
||||
user_code_5 two_big_ints DEFAULT (nextval('bigint_sequence_5'::regclass), nextval('bigint_sequence_5'::regclass)),
|
||||
user_code_8 jsonb DEFAULT to_jsonb('test'::text) || to_jsonb(nextval('bigint_sequence_8'::regclass))
|
||||
);
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('table_1', 'user_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO table_1 VALUES (1, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT), (2, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING *;
|
||||
user_id | user_code_1 | user_code_2 | user_code_3 | user_code_4 | user_code_5 | user_code_8
|
||||
---------------------------------------------------------------------
|
||||
1 | CD0000000001 | 10000 | 1001 | 1000000 | (1,2) | ["test", 1]
|
||||
2 | CD0000000002 | 10001 | 1011 | 1000001 | (3,4) | ["test", 2]
|
||||
(2 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO sequences_with_different_types;
|
||||
INSERT INTO table_1 VALUES (3, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT), (4, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING *;
|
||||
user_id | user_code_1 | user_code_2 | user_code_3 | user_code_4 | user_code_5 | user_code_8
|
||||
---------------------------------------------------------------------
|
||||
3 | CD3940649673 | 3940649673949185 | 3940649673950185 | 3.94064967394918e+15 | (3940649673949185,3940649673949186) | ["test", 3940649673949185]
|
||||
4 | CD3940649673 | 3940649673949186 | 3940649673950195 | 3.94064967394919e+15 | (3940649673949187,3940649673949188) | ["test", 3940649673949186]
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO sequences_with_different_types;
|
||||
CREATE SEQUENCE bigint_sequence_6;
|
||||
CREATE TABLE table_2
|
||||
(
|
||||
user_id bigint,
|
||||
user_code OID DEFAULT nextval('bigint_sequence_6'::regclass)
|
||||
);
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('table_2', 'user_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- on the coordinator, the sequence starts from 0
|
||||
INSERT INTO table_2 VALUES (1, DEFAULT) RETURNING *;
|
||||
user_id | user_code
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO sequences_with_different_types;
|
||||
-- this fails because on the workers the start value of the sequence
|
||||
-- is greater than the largest value of an oid
|
||||
INSERT INTO table_2 VALUES (1, DEFAULT) RETURNING *;
|
||||
ERROR: OID out of range
|
||||
\c - - - :master_port
|
||||
SET search_path TO sequences_with_different_types;
|
||||
CREATE SEQUENCE bigint_sequence_7;
|
||||
CREATE TABLE table_3
|
||||
(
|
||||
user_id bigint,
|
||||
user_code boolean DEFAULT ((nextval('bigint_sequence_7'::regclass)%2)::int)::boolean
|
||||
);
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('table_3', 'user_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO table_3 VALUES (1, DEFAULT), (2, DEFAULT) RETURNING *;
|
||||
user_id | user_code
|
||||
---------------------------------------------------------------------
|
||||
1 | t
|
||||
2 | f
|
||||
(2 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO sequences_with_different_types;
|
||||
INSERT INTO table_3 VALUES (3, DEFAULT), (4, DEFAULT) RETURNING *;
|
||||
user_id | user_code
|
||||
---------------------------------------------------------------------
|
||||
3 | t
|
||||
4 | f
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA sequences_with_different_types CASCADE;
|
||||
SELECT stop_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE noderole = 'primary';
|
||||
stop_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(2 rows)
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_eleven
|
||||
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
|
||||
\gset
|
||||
\if :server_version_above_eleven
|
||||
\else
|
||||
|
@ -104,7 +104,7 @@ SELECT * FROM matview ORDER BY a;
|
|||
SELECT * FROM columnar.options WHERE regclass = 'test_options_1'::regclass;
|
||||
regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression
|
||||
---------------------------------------------------------------------
|
||||
test_options_1 | 1000 | 5000 | 3 | pglz
|
||||
test_options_1 | 1000 | 5000 | 3 | pglz
|
||||
(1 row)
|
||||
|
||||
VACUUM VERBOSE test_options_1;
|
||||
|
@ -124,7 +124,7 @@ SELECT count(*), sum(a), sum(b) FROM test_options_1;
|
|||
SELECT * FROM columnar.options WHERE regclass = 'test_options_2'::regclass;
|
||||
regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression
|
||||
---------------------------------------------------------------------
|
||||
test_options_2 | 2000 | 6000 | 13 | none
|
||||
test_options_2 | 2000 | 6000 | 13 | none
|
||||
(1 row)
|
||||
|
||||
VACUUM VERBOSE test_options_2;
|
||||
|
@ -141,3 +141,218 @@ SELECT count(*), sum(a), sum(b) FROM test_options_2;
|
|||
20000 | 100010000 | 65015
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
INSERT INTO less_common_data_types_table (dist_key,col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col30, col32, col33, col34, col35, col36, col37, col38)
|
||||
VALUES (5,ARRAY[1], ARRAY[ARRAY[0,0,0]], ARRAY[ARRAY[ARRAY[0,0,0]]], ARRAY['1'], ARRAY[ARRAY['0','0','0']], ARRAY[ARRAY[ARRAY['0','0','0']]], '1', ARRAY[b'1'], ARRAY[ARRAY[b'0',b'0',b'0']], ARRAY[ARRAY[ARRAY[b'0',b'0',b'0']]], '11101',ARRAY[b'1'], ARRAY[ARRAY[b'01',b'01',b'01']], ARRAY[ARRAY[ARRAY[b'011',b'110',b'0000']]], '\xb4a8e04c0b', ARRAY['\xb4a8e04c0b'::BYTEA], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA, '\xb4a8e04c0b'::BYTEA, '\xb4a8e04c0b'::BYTEA]], ARRAY[ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]]], '1', ARRAY[TRUE], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[ARRAY[1::boolean,TRUE,FALSE]]], INET '192.168.1/24', ARRAY[INET '192.168.1.1'], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']]],MACADDR '08:00:2b:01:02:03', ARRAY[MACADDR '08:00:2b:01:02:03'], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']]], 690, ARRAY[1.1], ARRAY[ARRAY[0,0.111,0.15]], ARRAY[ARRAY[ARRAY[0,0,0]]], test_jsonb(), ARRAY[test_jsonb()], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]]]),
|
||||
(6,ARRAY[1,2,3], ARRAY[ARRAY[1,2,3], ARRAY[5,6,7]], ARRAY[ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]], ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]]], ARRAY['1','2','3'], ARRAY[ARRAY['1','2','3'], ARRAY['5','6','7']], ARRAY[ARRAY[ARRAY['1','2','3']], ARRAY[ARRAY['5','6','7']], ARRAY[ARRAY['1','2','3']], ARRAY[ARRAY['5','6','7']]], '0', ARRAY[b'1',b'0',b'0'], ARRAY[ARRAY[b'1',b'1',b'0'], ARRAY[b'0',b'0',b'1']], ARRAY[ARRAY[ARRAY[b'1',b'1',b'1']], ARRAY[ARRAY[b'1','0','0']], ARRAY[ARRAY[b'1','1','1']], ARRAY[ARRAY[b'0','0','0']]], '00010', ARRAY[b'11',b'10',b'01'], ARRAY[ARRAY[b'11',b'010',b'101'], ARRAY[b'101',b'01111',b'1000001']], ARRAY[ARRAY[ARRAY[b'10000',b'111111',b'1101010101']], ARRAY[ARRAY[b'1101010','0','1']], ARRAY[ARRAY[b'1','1','11111111']], ARRAY[ARRAY[b'0000000','0','0']]], '\xb4a8e04c0b', ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA], ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]]], 'true', ARRAY[1::boolean,TRUE,FALSE], ARRAY[ARRAY[1::boolean,TRUE,FALSE], ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]]],'0.0.0.0/32', ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24'], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']]], '0800.2b01.0203', ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203'], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']]], 0.99, ARRAY[1.1,2.22,3.33], ARRAY[ARRAY[1.55,2.66,3.88], ARRAY[11.5,10101.6,7111.1]], ARRAY[ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]], ARRAY[ARRAY[1.1,2.1,3]], ARRAY[ARRAY[5.0,6.0,7.0]]],test_jsonb(), ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()], ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]]]);
|
||||
-- insert the same data with RETURNING
|
||||
INSERT INTO less_common_data_types_table (dist_key,col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col30, col32, col33, col34, col35, col36, col37, col38)
|
||||
VALUES (7,ARRAY[1], ARRAY[ARRAY[0,0,0]], ARRAY[ARRAY[ARRAY[0,0,0]]], ARRAY['1'], ARRAY[ARRAY['0','0','0']], ARRAY[ARRAY[ARRAY['0','0','0']]], '1', ARRAY[b'1'], ARRAY[ARRAY[b'0',b'0',b'0']], ARRAY[ARRAY[ARRAY[b'0',b'0',b'0']]], '11101',ARRAY[b'1'], ARRAY[ARRAY[b'01',b'01',b'01']], ARRAY[ARRAY[ARRAY[b'011',b'110',b'0000']]], '\xb4a8e04c0b', ARRAY['\xb4a8e04c0b'::BYTEA], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA, '\xb4a8e04c0b'::BYTEA, '\xb4a8e04c0b'::BYTEA]], ARRAY[ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]]], '1', ARRAY[TRUE], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[ARRAY[1::boolean,TRUE,FALSE]]], INET '192.168.1/24', ARRAY[INET '192.168.1.1'], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']]],MACADDR '08:00:2b:01:02:03', ARRAY[MACADDR '08:00:2b:01:02:03'], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']]], 690, ARRAY[1.1], ARRAY[ARRAY[0,0.111,0.15]], ARRAY[ARRAY[ARRAY[0,0,0]]], test_jsonb(), ARRAY[test_jsonb()], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]]]),
|
||||
(8,ARRAY[1,2,3], ARRAY[ARRAY[1,2,3], ARRAY[5,6,7]], ARRAY[ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]], ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]]], ARRAY['1','2','3'], ARRAY[ARRAY['1','2','3'], ARRAY['5','6','7']], ARRAY[ARRAY[ARRAY['1','2','3']], ARRAY[ARRAY['5','6','7']], ARRAY[ARRAY['1','2','3']], ARRAY[ARRAY['5','6','7']]], '0', ARRAY[b'1',b'0',b'0'], ARRAY[ARRAY[b'1',b'1',b'0'], ARRAY[b'0',b'0',b'1']], ARRAY[ARRAY[ARRAY[b'1',b'1',b'1']], ARRAY[ARRAY[b'1','0','0']], ARRAY[ARRAY[b'1','1','1']], ARRAY[ARRAY[b'0','0','0']]], '00010', ARRAY[b'11',b'10',b'01'], ARRAY[ARRAY[b'11',b'010',b'101'], ARRAY[b'101',b'01111',b'1000001']], ARRAY[ARRAY[ARRAY[b'10000',b'111111',b'1101010101']], ARRAY[ARRAY[b'1101010','0','1']], ARRAY[ARRAY[b'1','1','11111111']], ARRAY[ARRAY[b'0000000','0','0']]], '\xb4a8e04c0b', ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA], ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]]], 'true', ARRAY[1::boolean,TRUE,FALSE], ARRAY[ARRAY[1::boolean,TRUE,FALSE], ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]]],'0.0.0.0/32', ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24'], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']]], '0800.2b01.0203', ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203'], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']]], 0.99, ARRAY[1.1,2.22,3.33], ARRAY[ARRAY[1.55,2.66,3.88], ARRAY[11.5,10101.6,7111.1]], ARRAY[ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]], ARRAY[ARRAY[1.1,2.1,3]], ARRAY[ARRAY[5.0,6.0,7.0]]],test_jsonb(), ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()], ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]]])
|
||||
RETURNING *;
|
||||
dist_key | col1 | col2 | col3 | col4 | col5 | col6 | col70 | col7 | col8 | col9 | col10 | col11 | col12 | col13 | col14 | col15 | col16 | col17 | col18 | col19 | col20 | col21 | col22 | col23 | col24 | col25 | col26 | col27 | col28 | col29 | col30 | col32 | col33 | col34 | col35 | col36 | col37 | col38
|
||||
---------------------------------------------------------------------
|
||||
7 | {1} | {{0,0,0}} | {{{0,0,0}}} | {1} | {{0,0,0}} | {{{0,0,0}}} | 1 | {1} | {{0,0,0}} | {{{0,0,0}}} | 11101 | {1} | {{01,01,01}} | {{{011,110,0000}}} | \xb4a8e04c0b | {"\\xb4a8e04c0b"} | {{"\\xb4a8e04c0b","\\xb4a8e04c0b","\\xb4a8e04c0b"}} | {{{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}}} | t | {t} | {{t,t,f}} | {{{t,t,f}}} | 192.168.1.0/24 | {192.168.1.1} | {{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}} | {{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}} | 08:00:2b:01:02:03 | {08:00:2b:01:02:03} | {{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}} | {{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}} | 690 | {1.1} | {{0,0.111,0.15}} | {{{0,0,0}}} | {"test_json": "test"} | {"{\"test_json\": \"test\"}"} | {{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}} | {{{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}}}
|
||||
8 | {1,2,3} | {{1,2,3},{5,6,7}} | {{{1,2,3}},{{5,6,7}},{{1,2,3}},{{5,6,7}}} | {1,2,3} | {{1,2,3},{5,6,7}} | {{{1,2,3}},{{5,6,7}},{{1,2,3}},{{5,6,7}}} | 0 | {1,0,0} | {{1,1,0},{0,0,1}} | {{{1,1,1}},{{1,0,0}},{{1,1,1}},{{0,0,0}}} | 00010 | {11,10,01} | {{11,010,101},{101,01111,1000001}} | {{{10000,111111,1101010101}},{{1101010,0,1}},{{1,1,11111111}},{{0000000,0,0}}} | \xb4a8e04c0b | {"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"} | {{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"},{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}} | {{{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}},{{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}},{{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}},{{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}}} | t | {t,t,f} | {{t,t,f},{t,t,f}} | {{{t,t,f}},{{t,t,f}},{{t,t,f}},{{t,t,f}}} | 0.0.0.0 | {0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24} | {{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}} | {{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}} | 08:00:2b:01:02:03 | {08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03} | {{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}} | {{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}} | 0.99 | {1.1,2.22,3.33} | {{1.55,2.66,3.88},{11.5,10101.6,7111.1}} | {{{1,2,3}},{{5,6,7}},{{1.1,2.1,3}},{{5.0,6.0,7.0}}} | {"test_json": "test"} | {"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"} | {{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"},{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}} | {{{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}},{{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}},{{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}},{{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}}}
|
||||
(2 rows)
|
||||
|
||||
ROLLBACK;
|
||||
-- count DISTINCT w/wout dist key
|
||||
SELECT count(DISTINCT(col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38))
|
||||
FROM
|
||||
less_common_data_types_table
|
||||
ORDER BY 1 DESC;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT count(DISTINCT(dist_key, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38))
|
||||
FROM
|
||||
less_common_data_types_table
|
||||
ORDER BY 1 DESC;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
-- some batch loads via INSERT .. SELECT
|
||||
INSERT INTO less_common_data_types_table SELECT * FROM less_common_data_types_table;
|
||||
ERROR: duplicate key value violates unique constraint "unique_index_on_columnar"
|
||||
DETAIL: Key (dist_key, col1)=(1, {1}) already exists.
|
||||
-- a query that might use index, but doesn't use as chunk group filtering is cheaper
|
||||
SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1];
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- make sure that we test index scan
|
||||
set columnar.enable_custom_scan to 'off';
|
||||
set enable_seqscan to off;
|
||||
set seq_page_cost TO 10000000;
|
||||
EXPLAIN (costs off, timing off, summary off, analyze on)
|
||||
SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1];
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Index Scan using non_unique_index_on_columnar on less_common_data_types_table (actual rows=1 loops=1)
|
||||
Index Cond: ((dist_key = 1) AND (col1 = '{1}'::integer[]))
|
||||
(3 rows)
|
||||
|
||||
-- make sure that we re-enable columnar scan
|
||||
RESET columnar.enable_custom_scan;
|
||||
RESET enable_seqscan;
|
||||
RESET seq_page_cost;
|
||||
-- violate (a) PRIMARY KEY
|
||||
INSERT INTO columnar_with_constraints (c1) VALUES (1), (1);
|
||||
ERROR: duplicate key value violates unique constraint "columnar_with_constraints_pkey"
|
||||
DETAIL: Key (c1)=(1) already exists.
|
||||
-- violate (b) UNIQUE KEY
|
||||
INSERT INTO columnar_with_constraints (c1, c2) VALUES (1, 1), (2, 1);
|
||||
ERROR: duplicate key value violates unique constraint "columnar_with_constraints_pkey"
|
||||
DETAIL: Key (c1)=(1) already exists.
|
||||
-- violate (c) EXCLUDE CONSTRAINTS
|
||||
INSERT INTO columnar_with_constraints (c1, c3) VALUES (1, 1), (2, 1);
|
||||
ERROR: duplicate key value violates unique constraint "columnar_with_constraints_pkey"
|
||||
DETAIL: Key (c1)=(1) already exists.
|
||||
-- finally, insert two ROWs
|
||||
BEGIN;
|
||||
INSERT INTO columnar_with_constraints (c1, c2, c3) VALUES (30, 40, 50), (60, 70, 80);
|
||||
ROLLBACK;
|
||||
-- make sure that we can re-create the tables & load some data
|
||||
BEGIN;
|
||||
CREATE TABLE test_retains_data_like (LIKE test_retains_data) USING columnar;
|
||||
INSERT INTO test_retains_data_like SELECT * FROM test_retains_data_like;
|
||||
CREATE TABLE less_common_data_types_table_like (LIKE less_common_data_types_table INCLUDING INDEXES) USING columnar;
|
||||
INSERT INTO less_common_data_types_table_like SELECT * FROM less_common_data_types_table;
|
||||
CREATE TABLE columnar_with_constraints_like (LIKE columnar_with_constraints INCLUDING CONSTRAINTS) USING columnar;
|
||||
INSERT INTO columnar_with_constraints_like SELECT * FROM columnar_with_constraints_like;
|
||||
INSERT INTO text_data (value) SELECT generate_random_string(1024 * 10) FROM generate_series(0,10);
|
||||
SELECT count(DISTINCT value) FROM text_data;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
22
|
||||
(1 row)
|
||||
|
||||
-- make sure that serial is preserved
|
||||
-- since we run "after schedule" twice and "rollback" wouldn't undo
|
||||
-- sequence changes, it can be 22 or 33, not a different value
|
||||
SELECT max(id) in (22, 33) FROM text_data;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- since we run "after schedule" twice, rollback the transaction
|
||||
-- to avoid getting "table already exists" errors
|
||||
ROLLBACK;
|
||||
BEGIN;
|
||||
-- Show that we can still drop the extension after upgrading
|
||||
SET client_min_messages TO WARNING;
|
||||
-- Drop extension migth cascade to columnar.options before dropping a
|
||||
-- columnar table. In that case, we were getting below error when opening
|
||||
-- columnar.options to delete records for the columnar table that we are
|
||||
-- about to drop.: "ERROR: could not open relation with OID 0".
|
||||
--
|
||||
-- I somehow reproduced this bug easily when upgrading pg, that is why
|
||||
-- adding the test to this file.
|
||||
--
|
||||
-- TODO: Need to uncomment following line after fixing
|
||||
-- https://github.com/citusdata/citus/issues/5483.
|
||||
-- DROP EXTENSION citus CASCADE;
|
||||
ROLLBACK;
|
||||
-- Make sure that we define dependencies from all rel objects (tables,
|
||||
-- indexes, sequences ..) to columnar table access method.
|
||||
--
|
||||
-- Given that this test file is run both before and after pg upgrade, the
|
||||
-- first run will test that for columnar--10.2-3--10.2-4.sql script, and the
|
||||
-- second run will test the same for citus_finish_pg_upgrade(), for the post
|
||||
-- pg-upgrade scenario.
|
||||
SELECT pg_class.oid INTO columnar_schema_members
|
||||
FROM pg_class, pg_namespace
|
||||
WHERE pg_namespace.oid=pg_class.relnamespace AND
|
||||
pg_namespace.nspname='columnar';
|
||||
SELECT refobjid INTO columnar_schema_members_pg_depend
|
||||
FROM pg_depend
|
||||
WHERE classid = 'pg_am'::regclass::oid AND
|
||||
objid = (select oid from pg_am where amname = 'columnar') AND
|
||||
objsubid = 0 AND
|
||||
refclassid = 'pg_class'::regclass::oid AND
|
||||
refobjsubid = 0 AND
|
||||
deptype = 'n';
|
||||
-- ... , so this should be empty,
|
||||
(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend)
|
||||
UNION
|
||||
(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members);
|
||||
oid
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- ... , and both columnar_schema_members_pg_depend & columnar_schema_members
|
||||
-- should have 10 entries.
|
||||
SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend;
|
||||
-- Check the same for workers too.
|
||||
SELECT run_command_on_workers(
|
||||
$$
|
||||
SELECT pg_class.oid INTO columnar_schema_members
|
||||
FROM pg_class, pg_namespace
|
||||
WHERE pg_namespace.oid=pg_class.relnamespace AND
|
||||
pg_namespace.nspname='columnar';
|
||||
SELECT refobjid INTO columnar_schema_members_pg_depend
|
||||
FROM pg_depend
|
||||
WHERE classid = 'pg_am'::regclass::oid AND
|
||||
objid = (select oid from pg_am where amname = 'columnar') AND
|
||||
objsubid = 0 AND
|
||||
refclassid = 'pg_class'::regclass::oid AND
|
||||
refobjsubid = 0 AND
|
||||
deptype = 'n';
|
||||
$$
|
||||
);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57636,t,"SELECT 10")
|
||||
(localhost,57637,t,"SELECT 10")
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers(
|
||||
$$
|
||||
(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend)
|
||||
UNION
|
||||
(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members);
|
||||
$$
|
||||
);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57636,t,"")
|
||||
(localhost,57637,t,"")
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers(
|
||||
$$
|
||||
SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend;
|
||||
$$
|
||||
);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57636,t,t)
|
||||
(localhost,57637,t,t)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers(
|
||||
$$
|
||||
DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend;
|
||||
$$
|
||||
);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57636,t,"DROP TABLE")
|
||||
(localhost,57637,t,"DROP TABLE")
|
||||
(2 rows)
|
||||
|
||||
|
|
|
@ -5,6 +5,33 @@ SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_ele
|
|||
\else
|
||||
\q
|
||||
\endif
|
||||
-- Test if relying on topological sort of the objects, not their names, works
|
||||
-- fine when re-creating objects during pg_upgrade.
|
||||
ALTER SCHEMA public RENAME TO citus_schema;
|
||||
SET search_path TO citus_schema;
|
||||
-- As mentioned in https://github.com/citusdata/citus/issues/5447, it
|
||||
-- is essential to already have public schema to be able to use
|
||||
-- citus_prepare_pg_upgrade. Until fixing that bug, let's create public
|
||||
-- schema here on all nodes.
|
||||
CREATE SCHEMA IF NOT EXISTS public;
|
||||
-- Do "SELECT 1" to hide port numbers
|
||||
SELECT 1 FROM run_command_on_workers($$CREATE SCHEMA IF NOT EXISTS public$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
-- create a columnar table within citus_schema
|
||||
CREATE TABLE new_columnar_table (
|
||||
col_1 character varying(100),
|
||||
col_2 date,
|
||||
col_3 character varying(100),
|
||||
col_4 date
|
||||
) USING columnar;
|
||||
INSERT INTO new_columnar_table
|
||||
SELECT ('1', '1999-01-01'::timestamp, '1', '1999-01-01'::timestamp)
|
||||
FROM generate_series(1, 1000);
|
||||
CREATE SCHEMA upgrade_columnar;
|
||||
SET search_path TO upgrade_columnar, public;
|
||||
CREATE TYPE compfoo AS (f1 int, f2 text);
|
||||
|
@ -147,3 +174,207 @@ SELECT alter_columnar_table_set('test_options_2', compression_level => 13);
|
|||
(1 row)
|
||||
|
||||
INSERT INTO test_options_2 SELECT i, floor(i/2000) FROM generate_series(1, 10000) i;
|
||||
Create or replace function test_jsonb() returns jsonb as
|
||||
$$
|
||||
begin
|
||||
return '{"test_json": "test"}';
|
||||
end;
|
||||
$$ language plpgsql;
|
||||
CREATE TABLE less_common_data_types_table
|
||||
(
|
||||
dist_key bigint,
|
||||
col1 int[], col2 int[][], col3 int [][][],
|
||||
col4 varchar[], col5 varchar[][], col6 varchar [][][],
|
||||
col70 bit, col7 bit[], col8 bit[][], col9 bit [][][],
|
||||
col10 bit varying(10),
|
||||
col11 bit varying(10)[], col12 bit varying(10)[][], col13 bit varying(10)[][][],
|
||||
col14 bytea, col15 bytea[], col16 bytea[][], col17 bytea[][][],
|
||||
col18 boolean, col19 boolean[], col20 boolean[][], col21 boolean[][][],
|
||||
col22 inet, col23 inet[], col24 inet[][], col25 inet[][][],
|
||||
col26 macaddr, col27 macaddr[], col28 macaddr[][], col29 macaddr[][][],
|
||||
col30 numeric, col32 numeric[], col33 numeric[][], col34 numeric[][][],
|
||||
col35 jsonb, col36 jsonb[], col37 jsonb[][], col38 jsonb[][][]
|
||||
) USING COLUMNAR;
|
||||
CREATE UNIQUE INDEX unique_index_on_columnar ON less_common_data_types_table(dist_key, col1);
|
||||
CREATE INDEX non_unique_index_on_columnar ON less_common_data_types_table(dist_key, col1);
|
||||
INSERT INTO less_common_data_types_table (dist_key,col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col30, col32, col33, col34, col35, col36, col37, col38)
|
||||
VALUES (1,ARRAY[1], ARRAY[ARRAY[0,0,0]], ARRAY[ARRAY[ARRAY[0,0,0]]], ARRAY['1'], ARRAY[ARRAY['0','0','0']], ARRAY[ARRAY[ARRAY['0','0','0']]], '1', ARRAY[b'1'], ARRAY[ARRAY[b'0',b'0',b'0']], ARRAY[ARRAY[ARRAY[b'0',b'0',b'0']]], '11101',ARRAY[b'1'], ARRAY[ARRAY[b'01',b'01',b'01']], ARRAY[ARRAY[ARRAY[b'011',b'110',b'0000']]], '\xb4a8e04c0b', ARRAY['\xb4a8e04c0b'::BYTEA], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA, '\xb4a8e04c0b'::BYTEA, '\xb4a8e04c0b'::BYTEA]], ARRAY[ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]]], '1', ARRAY[TRUE], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[ARRAY[1::boolean,TRUE,FALSE]]], INET '192.168.1/24', ARRAY[INET '192.168.1.1'], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']]],MACADDR '08:00:2b:01:02:03', ARRAY[MACADDR '08:00:2b:01:02:03'], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']]], 690, ARRAY[1.1], ARRAY[ARRAY[0,0.111,0.15]], ARRAY[ARRAY[ARRAY[0,0,0]]], test_jsonb(), ARRAY[test_jsonb()], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]]]),
|
||||
(2,ARRAY[1,2,3], ARRAY[ARRAY[1,2,3], ARRAY[5,6,7]], ARRAY[ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]], ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]]], ARRAY['1','2','3'], ARRAY[ARRAY['1','2','3'], ARRAY['5','6','7']], ARRAY[ARRAY[ARRAY['1','2','3']], ARRAY[ARRAY['5','6','7']], ARRAY[ARRAY['1','2','3']], ARRAY[ARRAY['5','6','7']]], '0', ARRAY[b'1',b'0',b'0'], ARRAY[ARRAY[b'1',b'1',b'0'], ARRAY[b'0',b'0',b'1']], ARRAY[ARRAY[ARRAY[b'1',b'1',b'1']], ARRAY[ARRAY[b'1','0','0']], ARRAY[ARRAY[b'1','1','1']], ARRAY[ARRAY[b'0','0','0']]], '00010', ARRAY[b'11',b'10',b'01'], ARRAY[ARRAY[b'11',b'010',b'101'], ARRAY[b'101',b'01111',b'1000001']], ARRAY[ARRAY[ARRAY[b'10000',b'111111',b'1101010101']], ARRAY[ARRAY[b'1101010','0','1']], ARRAY[ARRAY[b'1','1','11111111']], ARRAY[ARRAY[b'0000000','0','0']]], '\xb4a8e04c0b', ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA], ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]]], 'true', ARRAY[1::boolean,TRUE,FALSE], ARRAY[ARRAY[1::boolean,TRUE,FALSE], ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]]],'0.0.0.0/32', ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24'], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']]], '0800.2b01.0203', ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203'], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']]], 0.99, ARRAY[1.1,2.22,3.33], ARRAY[ARRAY[1.55,2.66,3.88], ARRAY[11.5,10101.6,7111.1]], ARRAY[ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]], ARRAY[ARRAY[1.1,2.1,3]], ARRAY[ARRAY[5.0,6.0,7.0]]],test_jsonb(), ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()], ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]]]);
|
||||
-- insert the same data with RETURNING
|
||||
INSERT INTO less_common_data_types_table (dist_key,col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col30, col32, col33, col34, col35, col36, col37, col38)
|
||||
VALUES (3,ARRAY[1], ARRAY[ARRAY[0,0,0]], ARRAY[ARRAY[ARRAY[0,0,0]]], ARRAY['1'], ARRAY[ARRAY['0','0','0']], ARRAY[ARRAY[ARRAY['0','0','0']]], '1', ARRAY[b'1'], ARRAY[ARRAY[b'0',b'0',b'0']], ARRAY[ARRAY[ARRAY[b'0',b'0',b'0']]], '11101',ARRAY[b'1'], ARRAY[ARRAY[b'01',b'01',b'01']], ARRAY[ARRAY[ARRAY[b'011',b'110',b'0000']]], '\xb4a8e04c0b', ARRAY['\xb4a8e04c0b'::BYTEA], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA, '\xb4a8e04c0b'::BYTEA, '\xb4a8e04c0b'::BYTEA]], ARRAY[ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]]], '1', ARRAY[TRUE], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[ARRAY[1::boolean,TRUE,FALSE]]], INET '192.168.1/24', ARRAY[INET '192.168.1.1'], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']]],MACADDR '08:00:2b:01:02:03', ARRAY[MACADDR '08:00:2b:01:02:03'], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']]], 690, ARRAY[1.1], ARRAY[ARRAY[0,0.111,0.15]], ARRAY[ARRAY[ARRAY[0,0,0]]], test_jsonb(), ARRAY[test_jsonb()], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]]]),
|
||||
(4,ARRAY[1,2,3], ARRAY[ARRAY[1,2,3], ARRAY[5,6,7]], ARRAY[ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]], ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]]], ARRAY['1','2','3'], ARRAY[ARRAY['1','2','3'], ARRAY['5','6','7']], ARRAY[ARRAY[ARRAY['1','2','3']], ARRAY[ARRAY['5','6','7']], ARRAY[ARRAY['1','2','3']], ARRAY[ARRAY['5','6','7']]], '0', ARRAY[b'1',b'0',b'0'], ARRAY[ARRAY[b'1',b'1',b'0'], ARRAY[b'0',b'0',b'1']], ARRAY[ARRAY[ARRAY[b'1',b'1',b'1']], ARRAY[ARRAY[b'1','0','0']], ARRAY[ARRAY[b'1','1','1']], ARRAY[ARRAY[b'0','0','0']]], '00010', ARRAY[b'11',b'10',b'01'], ARRAY[ARRAY[b'11',b'010',b'101'], ARRAY[b'101',b'01111',b'1000001']], ARRAY[ARRAY[ARRAY[b'10000',b'111111',b'1101010101']], ARRAY[ARRAY[b'1101010','0','1']], ARRAY[ARRAY[b'1','1','11111111']], ARRAY[ARRAY[b'0000000','0','0']]], '\xb4a8e04c0b', ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA], ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]], ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]]], 'true', ARRAY[1::boolean,TRUE,FALSE], ARRAY[ARRAY[1::boolean,TRUE,FALSE], ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]]],'0.0.0.0/32', ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24'], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']]], '0800.2b01.0203', ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203'], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203', '08002b010203']]], 0.99, ARRAY[1.1,2.22,3.33], ARRAY[ARRAY[1.55,2.66,3.88], ARRAY[11.5,10101.6,7111.1]], ARRAY[ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]], ARRAY[ARRAY[1.1,2.1,3]], ARRAY[ARRAY[5.0,6.0,7.0]]],test_jsonb(), ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()], ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]], ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]]])
|
||||
RETURNING *;
|
||||
dist_key | col1 | col2 | col3 | col4 | col5 | col6 | col70 | col7 | col8 | col9 | col10 | col11 | col12 | col13 | col14 | col15 | col16 | col17 | col18 | col19 | col20 | col21 | col22 | col23 | col24 | col25 | col26 | col27 | col28 | col29 | col30 | col32 | col33 | col34 | col35 | col36 | col37 | col38
|
||||
---------------------------------------------------------------------
|
||||
3 | {1} | {{0,0,0}} | {{{0,0,0}}} | {1} | {{0,0,0}} | {{{0,0,0}}} | 1 | {1} | {{0,0,0}} | {{{0,0,0}}} | 11101 | {1} | {{01,01,01}} | {{{011,110,0000}}} | \xb4a8e04c0b | {"\\xb4a8e04c0b"} | {{"\\xb4a8e04c0b","\\xb4a8e04c0b","\\xb4a8e04c0b"}} | {{{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}}} | t | {t} | {{t,t,f}} | {{{t,t,f}}} | 192.168.1.0/24 | {192.168.1.1} | {{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}} | {{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}} | 08:00:2b:01:02:03 | {08:00:2b:01:02:03} | {{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}} | {{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}} | 690 | {1.1} | {{0,0.111,0.15}} | {{{0,0,0}}} | {"test_json": "test"} | {"{\"test_json\": \"test\"}"} | {{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}} | {{{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}}}
|
||||
4 | {1,2,3} | {{1,2,3},{5,6,7}} | {{{1,2,3}},{{5,6,7}},{{1,2,3}},{{5,6,7}}} | {1,2,3} | {{1,2,3},{5,6,7}} | {{{1,2,3}},{{5,6,7}},{{1,2,3}},{{5,6,7}}} | 0 | {1,0,0} | {{1,1,0},{0,0,1}} | {{{1,1,1}},{{1,0,0}},{{1,1,1}},{{0,0,0}}} | 00010 | {11,10,01} | {{11,010,101},{101,01111,1000001}} | {{{10000,111111,1101010101}},{{1101010,0,1}},{{1,1,11111111}},{{0000000,0,0}}} | \xb4a8e04c0b | {"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"} | {{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"},{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}} | {{{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}},{{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}},{{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}},{{"\\xb4a8e04c0b","\\x18a232a678","\\x38b2697632"}}} | t | {t,t,f} | {{t,t,f},{t,t,f}} | {{{t,t,f}},{{t,t,f}},{{t,t,f}},{{t,t,f}}} | 0.0.0.0 | {0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24} | {{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}} | {{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}} | 08:00:2b:01:02:03 | {08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03} | {{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}} | {{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}} | 0.99 | {1.1,2.22,3.33} | {{1.55,2.66,3.88},{11.5,10101.6,7111.1}} | {{{1,2,3}},{{5,6,7}},{{1.1,2.1,3}},{{5.0,6.0,7.0}}} | {"test_json": "test"} | {"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"} | {{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"},{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}} | {{{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}},{{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}},{{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}},{{"{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}","{\"test_json\": \"test\"}"}}}
|
||||
(2 rows)
|
||||
|
||||
-- GROUP BY w/wout the dist key
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
less_common_data_types_table
|
||||
GROUP BY
|
||||
col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
2
|
||||
(2 rows)
|
||||
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
less_common_data_types_table
|
||||
GROUP BY
|
||||
dist_key, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
(4 rows)
|
||||
|
||||
-- window function w/wout distribution key
|
||||
SELECT
|
||||
count(*) OVER (PARTITION BY col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38)
|
||||
FROM
|
||||
less_common_data_types_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
2
|
||||
2
|
||||
2
|
||||
(4 rows)
|
||||
|
||||
SELECT
|
||||
count(*) OVER (PARTITION BY dist_key, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38)
|
||||
FROM
|
||||
less_common_data_types_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
(4 rows)
|
||||
|
||||
-- DISTINCT w/wout distribution key
|
||||
-- there seems to be an issue with SELECT DISTINCT ROW with PG14
|
||||
-- so we add an alternative output that gives an error, this should
|
||||
-- be removed after the issue is fixed on PG14.
|
||||
SELECT DISTINCT(col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38)
|
||||
FROM
|
||||
less_common_data_types_table
|
||||
ORDER BY 1 DESC;
|
||||
row
|
||||
---------------------------------------------------------------------
|
||||
("{1,2,3}","{{1,2,3},{5,6,7}}","{{{1,2,3}},{{5,6,7}},{{1,2,3}},{{5,6,7}}}","{1,2,3}","{{1,2,3},{5,6,7}}","{{{1,2,3}},{{5,6,7}},{{1,2,3}},{{5,6,7}}}",0,"{1,0,0}","{{1,1,0},{0,0,1}}","{{{1,1,1}},{{1,0,0}},{{1,1,1}},{{0,0,0}}}",00010,"{11,10,01}","{{11,010,101},{101,01111,1000001}}","{{{10000,111111,1101010101}},{{1101010,0,1}},{{1,1,11111111}},{{0000000,0,0}}}","\\xb4a8e04c0b","{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}","{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""},{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}}","{{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}},{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}},{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}},{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}}}",t,"{t,t,f}","{{t,t,f},{t,t,f}}","{{{t,t,f}},{{t,t,f}},{{t,t,f}},{{t,t,f}}}",0.0.0.0,"{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}","{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}","{{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}}",08:00:2b:01:02:03,"{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}","{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}","{{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}}","{1.1,2.22,3.33}","{{1.55,2.66,3.88},{11.5,10101.6,7111.1}}","{{{1,2,3}},{{5,6,7}},{{1.1,2.1,3}},{{5.0,6.0,7.0}}}","{""test_json"": ""test""}","{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}","{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""},{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}","{{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}},{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}},{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}},{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}}")
|
||||
({1},"{{0,0,0}}","{{{0,0,0}}}",{1},"{{0,0,0}}","{{{0,0,0}}}",1,{1},"{{0,0,0}}","{{{0,0,0}}}",11101,{1},"{{01,01,01}}","{{{011,110,0000}}}","\\xb4a8e04c0b","{""\\\\xb4a8e04c0b""}","{{""\\\\xb4a8e04c0b"",""\\\\xb4a8e04c0b"",""\\\\xb4a8e04c0b""}}","{{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}}}",t,{t},"{{t,t,f}}","{{{t,t,f}}}",192.168.1.0/24,{192.168.1.1},"{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}","{{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}}",08:00:2b:01:02:03,{08:00:2b:01:02:03},"{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}","{{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}}",{1.1},"{{0,0.111,0.15}}","{{{0,0,0}}}","{""test_json"": ""test""}","{""{\\""test_json\\"": \\""test\\""}""}","{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}","{{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}}")
|
||||
(2 rows)
|
||||
|
||||
SELECT DISTINCT(dist_key, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38)
|
||||
FROM
|
||||
less_common_data_types_table
|
||||
ORDER BY 1 DESC;
|
||||
row
|
||||
---------------------------------------------------------------------
|
||||
(4,"{1,2,3}","{{1,2,3},{5,6,7}}","{{{1,2,3}},{{5,6,7}},{{1,2,3}},{{5,6,7}}}","{1,2,3}","{{1,2,3},{5,6,7}}","{{{1,2,3}},{{5,6,7}},{{1,2,3}},{{5,6,7}}}",0,"{1,0,0}","{{1,1,0},{0,0,1}}","{{{1,1,1}},{{1,0,0}},{{1,1,1}},{{0,0,0}}}",00010,"{11,10,01}","{{11,010,101},{101,01111,1000001}}","{{{10000,111111,1101010101}},{{1101010,0,1}},{{1,1,11111111}},{{0000000,0,0}}}","\\xb4a8e04c0b","{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}","{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""},{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}}","{{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}},{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}},{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}},{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}}}",t,"{t,t,f}","{{t,t,f},{t,t,f}}","{{{t,t,f}},{{t,t,f}},{{t,t,f}},{{t,t,f}}}",0.0.0.0,"{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}","{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}","{{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}}",08:00:2b:01:02:03,"{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}","{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}","{{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}}","{1.1,2.22,3.33}","{{1.55,2.66,3.88},{11.5,10101.6,7111.1}}","{{{1,2,3}},{{5,6,7}},{{1.1,2.1,3}},{{5.0,6.0,7.0}}}","{""test_json"": ""test""}","{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}","{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""},{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}","{{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}},{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}},{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}},{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}}")
|
||||
(3,{1},"{{0,0,0}}","{{{0,0,0}}}",{1},"{{0,0,0}}","{{{0,0,0}}}",1,{1},"{{0,0,0}}","{{{0,0,0}}}",11101,{1},"{{01,01,01}}","{{{011,110,0000}}}","\\xb4a8e04c0b","{""\\\\xb4a8e04c0b""}","{{""\\\\xb4a8e04c0b"",""\\\\xb4a8e04c0b"",""\\\\xb4a8e04c0b""}}","{{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}}}",t,{t},"{{t,t,f}}","{{{t,t,f}}}",192.168.1.0/24,{192.168.1.1},"{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}","{{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}}",08:00:2b:01:02:03,{08:00:2b:01:02:03},"{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}","{{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}}",{1.1},"{{0,0.111,0.15}}","{{{0,0,0}}}","{""test_json"": ""test""}","{""{\\""test_json\\"": \\""test\\""}""}","{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}","{{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}}")
|
||||
(2,"{1,2,3}","{{1,2,3},{5,6,7}}","{{{1,2,3}},{{5,6,7}},{{1,2,3}},{{5,6,7}}}","{1,2,3}","{{1,2,3},{5,6,7}}","{{{1,2,3}},{{5,6,7}},{{1,2,3}},{{5,6,7}}}",0,"{1,0,0}","{{1,1,0},{0,0,1}}","{{{1,1,1}},{{1,0,0}},{{1,1,1}},{{0,0,0}}}",00010,"{11,10,01}","{{11,010,101},{101,01111,1000001}}","{{{10000,111111,1101010101}},{{1101010,0,1}},{{1,1,11111111}},{{0000000,0,0}}}","\\xb4a8e04c0b","{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}","{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""},{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}}","{{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}},{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}},{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}},{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}}}",t,"{t,t,f}","{{t,t,f},{t,t,f}}","{{{t,t,f}},{{t,t,f}},{{t,t,f}},{{t,t,f}}}",0.0.0.0,"{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}","{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}","{{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}},{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}}",08:00:2b:01:02:03,"{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}","{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}","{{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}},{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}}","{1.1,2.22,3.33}","{{1.55,2.66,3.88},{11.5,10101.6,7111.1}}","{{{1,2,3}},{{5,6,7}},{{1.1,2.1,3}},{{5.0,6.0,7.0}}}","{""test_json"": ""test""}","{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}","{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""},{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}","{{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}},{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}},{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}},{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}}")
|
||||
(1,{1},"{{0,0,0}}","{{{0,0,0}}}",{1},"{{0,0,0}}","{{{0,0,0}}}",1,{1},"{{0,0,0}}","{{{0,0,0}}}",11101,{1},"{{01,01,01}}","{{{011,110,0000}}}","\\xb4a8e04c0b","{""\\\\xb4a8e04c0b""}","{{""\\\\xb4a8e04c0b"",""\\\\xb4a8e04c0b"",""\\\\xb4a8e04c0b""}}","{{{""\\\\xb4a8e04c0b"",""\\\\x18a232a678"",""\\\\x38b2697632""}}}",t,{t},"{{t,t,f}}","{{{t,t,f}}}",192.168.1.0/24,{192.168.1.1},"{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}","{{{0.0.0.0,0.0.0.0,::ffff:255.240.0.1,192.168.1.0/24}}}",08:00:2b:01:02:03,{08:00:2b:01:02:03},"{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}","{{{08:00:2b:01:02:03,08:00:2b:01:02:03,08:00:2b:01:02:03}}}",{1.1},"{{0,0.111,0.15}}","{{{0,0,0}}}","{""test_json"": ""test""}","{""{\\""test_json\\"": \\""test\\""}""}","{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}","{{{""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}"",""{\\""test_json\\"": \\""test\\""}""}}}")
|
||||
(4 rows)
|
||||
|
||||
-- count DISTINCT w/wout dist key
|
||||
SELECT count(DISTINCT(col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38))
|
||||
FROM
|
||||
less_common_data_types_table
|
||||
ORDER BY 1 DESC;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT count(DISTINCT(dist_key, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38))
|
||||
FROM
|
||||
less_common_data_types_table
|
||||
ORDER BY 1 DESC;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
-- a query that might use index, but doesn't use as chunk group filtering is cheaper
|
||||
SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1];
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- make sure that we test index scan
|
||||
set columnar.enable_custom_scan to 'off';
|
||||
set enable_seqscan to off;
|
||||
set seq_page_cost TO 10000000;
|
||||
EXPLAIN (costs off, timing off, summary off, analyze on)
|
||||
SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1];
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Index Scan using non_unique_index_on_columnar on less_common_data_types_table (actual rows=1 loops=1)
|
||||
Index Cond: ((dist_key = 1) AND (col1 = '{1}'::integer[]))
|
||||
(3 rows)
|
||||
|
||||
-- make sure that we re-enable columnar scan
|
||||
RESET columnar.enable_custom_scan;
|
||||
RESET enable_seqscan;
|
||||
RESET seq_page_cost;
|
||||
-- Create a columnar table with
|
||||
-- (a) PRIMARY KEY on c1
|
||||
-- (b) UNIQUE KEY on c2
|
||||
-- (c) EXCLUDE CONSTRAINT on c1
|
||||
CREATE TABLE columnar_with_constraints (c1 INT PRIMARY KEY,
|
||||
c2 INT UNIQUE,
|
||||
c3 INT, EXCLUDE USING btree (c3 WITH =))
|
||||
USING columnar;
|
||||
-- violate (a) PRIMARY KEY
|
||||
INSERT INTO columnar_with_constraints (c1) VALUES (1), (1);
|
||||
ERROR: duplicate key value violates unique constraint "columnar_with_constraints_pkey"
|
||||
DETAIL: Key (c1)=(1) already exists.
|
||||
-- violate (b) UNIQUE KEY
|
||||
INSERT INTO columnar_with_constraints (c1, c2) VALUES (1, 1), (2, 1);
|
||||
ERROR: duplicate key value violates unique constraint "columnar_with_constraints_c2_key"
|
||||
DETAIL: Key (c2)=(1) already exists.
|
||||
-- violate (c) EXCLUDE CONSTRAINTS
|
||||
INSERT INTO columnar_with_constraints (c1, c3) VALUES (1, 1), (2, 1);
|
||||
ERROR: conflicting key value violates exclusion constraint "columnar_with_constraints_c3_excl"
|
||||
DETAIL: Key (c3)=(1) conflicts with existing key (c3)=(1).
|
||||
-- finally, insert a ROW
|
||||
INSERT INTO columnar_with_constraints (c1, c2, c3) VALUES (1, 2, 3);
|
||||
-- some data with TOAST column and data
|
||||
CREATE OR REPLACE FUNCTION generate_random_string(
|
||||
length INTEGER,
|
||||
characters TEXT default 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
|
||||
) RETURNS TEXT AS
|
||||
$$
|
||||
DECLARE
|
||||
result TEXT := '';
|
||||
BEGIN
|
||||
FOR __ IN 1..length LOOP
|
||||
result := result || substr(characters, floor(random() * length(characters))::int + 1, 1);
|
||||
end loop;
|
||||
RETURN result;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
-- create table and load data
|
||||
CREATE TABLE text_data (id SERIAL, value TEXT) USING COLUMNAR;
|
||||
INSERT INTO text_data (value) SELECT generate_random_string(1024 * 10) FROM generate_series(0,10);
|
||||
select count(DISTINCT value) from text_data;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
11
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ ORDER BY 1;
|
|||
function citus_extradata_container(internal)
|
||||
function citus_finish_pg_upgrade()
|
||||
function citus_get_active_worker_nodes()
|
||||
function citus_internal.columnar_ensure_am_depends_catalog()
|
||||
function citus_internal.downgrade_columnar_storage(regclass)
|
||||
function citus_internal.find_groupid_for_node(text,integer)
|
||||
function citus_internal.pg_dist_node_trigger_func()
|
||||
|
@ -123,6 +124,8 @@ ORDER BY 1;
|
|||
function dump_global_wait_edges()
|
||||
function dump_local_wait_edges()
|
||||
function fetch_intermediate_results(text[],text,integer)
|
||||
function fix_all_partition_shard_index_names()
|
||||
function fix_partition_shard_index_names(regclass)
|
||||
function fix_pre_citus10_partitioned_table_constraint_names()
|
||||
function fix_pre_citus10_partitioned_table_constraint_names(regclass)
|
||||
function get_all_active_transactions()
|
||||
|
@ -202,6 +205,7 @@ ORDER BY 1;
|
|||
function worker_drop_distributed_table(text)
|
||||
function worker_fetch_foreign_file(text,text,bigint,text[],integer[])
|
||||
function worker_fetch_partition_file(bigint,integer,integer,integer,text,integer)
|
||||
function worker_fix_partition_shard_index_names(regclass, text, text)
|
||||
function worker_fix_pre_citus10_partitioned_table_constraint_names(regclass,bigint,text)
|
||||
function worker_hash("any")
|
||||
function worker_hash_partition_table(bigint,integer,text,text,oid,anyarray)
|
||||
|
@ -250,13 +254,13 @@ ORDER BY 1;
|
|||
type noderole
|
||||
view citus_dist_stat_activity
|
||||
view citus_lock_waits
|
||||
view citus_schema.citus_tables
|
||||
view citus_shard_indexes_on_worker
|
||||
view citus_shards
|
||||
view citus_shards_on_worker
|
||||
view citus_stat_statements
|
||||
view citus_tables
|
||||
view citus_worker_stat_activity
|
||||
view pg_dist_shard_placement
|
||||
view time_partitions
|
||||
(242 rows)
|
||||
(246 rows)
|
||||
|
||||
|
|
|
@ -1,247 +0,0 @@
|
|||
-- print version above 11 (eg. 12 and above)
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven;
|
||||
version_above_eleven
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- list all postgres objects belonging to the citus extension
|
||||
SELECT pg_catalog.pg_describe_object(classid, objid, 0) AS description
|
||||
FROM pg_catalog.pg_depend, pg_catalog.pg_extension e
|
||||
WHERE refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass
|
||||
AND refobjid = e.oid
|
||||
AND deptype = 'e'
|
||||
AND e.extname='citus'
|
||||
ORDER BY 1;
|
||||
description
|
||||
---------------------------------------------------------------------
|
||||
event trigger citus_cascade_to_partition
|
||||
function alter_distributed_table(regclass,text,integer,text,boolean)
|
||||
function alter_old_partitions_set_access_method(regclass,timestamp with time zone,name)
|
||||
function alter_role_if_exists(text,text)
|
||||
function alter_table_set_access_method(regclass,text)
|
||||
function any_value(anyelement)
|
||||
function any_value_agg(anyelement,anyelement)
|
||||
function array_cat_agg(anyarray)
|
||||
function assign_distributed_transaction_id(integer,bigint,timestamp with time zone)
|
||||
function authinfo_valid(text)
|
||||
function broadcast_intermediate_result(text,text)
|
||||
function check_distributed_deadlocks()
|
||||
function citus_activate_node(text,integer)
|
||||
function citus_add_inactive_node(text,integer,integer,noderole,name)
|
||||
function citus_add_local_table_to_metadata(regclass,boolean)
|
||||
function citus_add_node(text,integer,integer,noderole,name)
|
||||
function citus_add_rebalance_strategy(name,regproc,regproc,regproc,real,real,real)
|
||||
function citus_add_secondary_node(text,integer,text,integer,name)
|
||||
function citus_blocking_pids(integer)
|
||||
function citus_conninfo_cache_invalidate()
|
||||
function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode)
|
||||
function citus_create_restore_point(text)
|
||||
function citus_disable_node(text,integer)
|
||||
function citus_dist_local_group_cache_invalidate()
|
||||
function citus_dist_node_cache_invalidate()
|
||||
function citus_dist_object_cache_invalidate()
|
||||
function citus_dist_partition_cache_invalidate()
|
||||
function citus_dist_placement_cache_invalidate()
|
||||
function citus_dist_shard_cache_invalidate()
|
||||
function citus_dist_stat_activity()
|
||||
function citus_drain_node(text,integer,citus.shard_transfer_mode,name)
|
||||
function citus_drop_all_shards(regclass,text,text)
|
||||
function citus_drop_trigger()
|
||||
function citus_executor_name(integer)
|
||||
function citus_extradata_container(internal)
|
||||
function citus_finish_pg_upgrade()
|
||||
function citus_get_active_worker_nodes()
|
||||
function citus_internal.downgrade_columnar_storage(regclass)
|
||||
function citus_internal.find_groupid_for_node(text,integer)
|
||||
function citus_internal.pg_dist_node_trigger_func()
|
||||
function citus_internal.pg_dist_rebalance_strategy_enterprise_check()
|
||||
function citus_internal.pg_dist_rebalance_strategy_trigger_func()
|
||||
function citus_internal.pg_dist_shard_placement_trigger_func()
|
||||
function citus_internal.refresh_isolation_tester_prepared_statement()
|
||||
function citus_internal.replace_isolation_tester_func()
|
||||
function citus_internal.restore_isolation_tester_func()
|
||||
function citus_internal.upgrade_columnar_storage(regclass)
|
||||
function citus_isolation_test_session_is_blocked(integer,integer[])
|
||||
function citus_json_concatenate(json,json)
|
||||
function citus_json_concatenate_final(json)
|
||||
function citus_jsonb_concatenate(jsonb,jsonb)
|
||||
function citus_jsonb_concatenate_final(jsonb)
|
||||
function citus_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode)
|
||||
function citus_node_capacity_1(integer)
|
||||
function citus_prepare_pg_upgrade()
|
||||
function citus_query_stats()
|
||||
function citus_relation_size(regclass)
|
||||
function citus_remote_connection_stats()
|
||||
function citus_remove_node(text,integer)
|
||||
function citus_server_id()
|
||||
function citus_set_coordinator_host(text,integer,noderole,name)
|
||||
function citus_set_default_rebalance_strategy(text)
|
||||
function citus_set_node_property(text,integer,text,boolean)
|
||||
function citus_shard_allowed_on_node_true(bigint,integer)
|
||||
function citus_shard_cost_1(bigint)
|
||||
function citus_shard_cost_by_disk_size(bigint)
|
||||
function citus_shard_sizes()
|
||||
function citus_stat_statements()
|
||||
function citus_stat_statements_reset()
|
||||
function citus_table_is_visible(oid)
|
||||
function citus_table_size(regclass)
|
||||
function citus_text_send_as_jsonb(text)
|
||||
function citus_total_relation_size(regclass,boolean)
|
||||
function citus_truncate_trigger()
|
||||
function citus_unmark_object_distributed(oid,oid,integer)
|
||||
function citus_update_node(integer,text,integer,boolean,integer)
|
||||
function citus_update_shard_statistics(bigint)
|
||||
function citus_update_table_statistics(regclass)
|
||||
function citus_validate_rebalance_strategy_functions(regproc,regproc,regproc)
|
||||
function citus_version()
|
||||
function citus_worker_stat_activity()
|
||||
function column_name_to_column(regclass,text)
|
||||
function column_to_column_name(regclass,text)
|
||||
function coord_combine_agg(oid,cstring,anyelement)
|
||||
function coord_combine_agg_ffunc(internal,oid,cstring,anyelement)
|
||||
function coord_combine_agg_sfunc(internal,oid,cstring,anyelement)
|
||||
function create_distributed_function(regprocedure,text,text)
|
||||
function create_distributed_table(regclass,text,citus.distribution_type,text,integer)
|
||||
function create_intermediate_result(text,text)
|
||||
function create_reference_table(regclass)
|
||||
function create_time_partitions(regclass,interval,timestamp with time zone,timestamp with time zone)
|
||||
function distributed_tables_colocated(regclass,regclass)
|
||||
function drop_old_time_partitions(regclass,timestamp with time zone)
|
||||
function dump_global_wait_edges()
|
||||
function dump_local_wait_edges()
|
||||
function fetch_intermediate_results(text[],text,integer)
|
||||
function fix_pre_citus10_partitioned_table_constraint_names()
|
||||
function fix_pre_citus10_partitioned_table_constraint_names(regclass)
|
||||
function get_all_active_transactions()
|
||||
function get_colocated_shard_array(bigint)
|
||||
function get_colocated_table_array(regclass)
|
||||
function get_current_transaction_id()
|
||||
function get_global_active_transactions()
|
||||
function get_missing_time_partition_ranges(regclass,interval,timestamp with time zone,timestamp with time zone)
|
||||
function get_rebalance_progress()
|
||||
function get_rebalance_table_shards_plan(regclass,real,integer,bigint[],boolean,name,real)
|
||||
function get_shard_id_for_distribution_column(regclass,"any")
|
||||
function isolate_tenant_to_new_shard(regclass,"any",text)
|
||||
function json_cat_agg(json)
|
||||
function jsonb_cat_agg(jsonb)
|
||||
function lock_relation_if_exists(text,text)
|
||||
function lock_shard_metadata(integer,bigint[])
|
||||
function lock_shard_resources(integer,bigint[])
|
||||
function master_activate_node(text,integer)
|
||||
function master_add_inactive_node(text,integer,integer,noderole,name)
|
||||
function master_add_node(text,integer,integer,noderole,name)
|
||||
function master_add_secondary_node(text,integer,text,integer,name)
|
||||
function master_append_table_to_shard(bigint,text,text,integer)
|
||||
function master_apply_delete_command(text)
|
||||
function master_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode)
|
||||
function master_create_empty_shard(text)
|
||||
function master_disable_node(text,integer)
|
||||
function master_drain_node(text,integer,citus.shard_transfer_mode,name)
|
||||
function master_get_active_worker_nodes()
|
||||
function master_get_new_placementid()
|
||||
function master_get_new_shardid()
|
||||
function master_get_table_ddl_events(text)
|
||||
function master_get_table_metadata(text)
|
||||
function master_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode)
|
||||
function master_remove_distributed_table_metadata_from_workers(regclass,text,text)
|
||||
function master_remove_node(text,integer)
|
||||
function master_remove_partition_metadata(regclass,text,text)
|
||||
function master_run_on_worker(text[],integer[],text[],boolean)
|
||||
function master_set_node_property(text,integer,text,boolean)
|
||||
function master_unmark_object_distributed(oid,oid,integer)
|
||||
function master_update_node(integer,text,integer,boolean,integer)
|
||||
function master_update_shard_statistics(bigint)
|
||||
function master_update_table_statistics(regclass)
|
||||
function notify_constraint_dropped()
|
||||
function poolinfo_valid(text)
|
||||
function read_intermediate_result(text,citus_copy_format)
|
||||
function read_intermediate_results(text[],citus_copy_format)
|
||||
function rebalance_table_shards(regclass,real,integer,bigint[],citus.shard_transfer_mode,boolean,name)
|
||||
function recover_prepared_transactions()
|
||||
function relation_is_a_known_shard(regclass)
|
||||
function remove_local_tables_from_metadata()
|
||||
function replicate_reference_tables()
|
||||
function replicate_table_shards(regclass,integer,integer,bigint[],citus.shard_transfer_mode)
|
||||
function role_exists(name)
|
||||
function run_command_on_colocated_placements(regclass,regclass,text,boolean)
|
||||
function run_command_on_placements(regclass,text,boolean)
|
||||
function run_command_on_shards(regclass,text,boolean)
|
||||
function run_command_on_workers(text,boolean)
|
||||
function shard_name(regclass,bigint)
|
||||
function start_metadata_sync_to_node(text,integer)
|
||||
function stop_metadata_sync_to_node(text,integer)
|
||||
function time_partition_range(regclass)
|
||||
function truncate_local_data_after_distributing_table(regclass)
|
||||
function undistribute_table(regclass,boolean)
|
||||
function update_distributed_table_colocation(regclass,text)
|
||||
function worker_append_table_to_shard(text,text,text,integer)
|
||||
function worker_apply_inter_shard_ddl_command(bigint,text,bigint,text,text)
|
||||
function worker_apply_sequence_command(text)
|
||||
function worker_apply_sequence_command(text,regtype)
|
||||
function worker_apply_shard_ddl_command(bigint,text)
|
||||
function worker_apply_shard_ddl_command(bigint,text,text)
|
||||
function worker_change_sequence_dependency(regclass,regclass,regclass)
|
||||
function worker_cleanup_job_schema_cache()
|
||||
function worker_create_or_alter_role(text,text,text)
|
||||
function worker_create_or_replace_object(text)
|
||||
function worker_create_schema(bigint,text)
|
||||
function worker_create_truncate_trigger(regclass)
|
||||
function worker_drop_distributed_table(text)
|
||||
function worker_fetch_foreign_file(text,text,bigint,text[],integer[])
|
||||
function worker_fetch_partition_file(bigint,integer,integer,integer,text,integer)
|
||||
function worker_fix_pre_citus10_partitioned_table_constraint_names(regclass,bigint,text)
|
||||
function worker_hash("any")
|
||||
function worker_hash_partition_table(bigint,integer,text,text,oid,anyarray)
|
||||
function worker_last_saved_explain_analyze()
|
||||
function worker_merge_files_into_table(bigint,integer,text[],text[])
|
||||
function worker_partial_agg(oid,anyelement)
|
||||
function worker_partial_agg_ffunc(internal)
|
||||
function worker_partial_agg_sfunc(internal,oid,anyelement)
|
||||
function worker_partition_query_result(text,text,integer,citus.distribution_type,text[],text[],boolean)
|
||||
function worker_range_partition_table(bigint,integer,text,text,oid,anyarray)
|
||||
function worker_record_sequence_dependency(regclass,regclass,name)
|
||||
function worker_repartition_cleanup(bigint)
|
||||
function worker_save_query_explain_analyze(text,jsonb)
|
||||
schema citus
|
||||
schema citus_internal
|
||||
schema columnar
|
||||
sequence columnar.storageid_seq
|
||||
sequence pg_dist_colocationid_seq
|
||||
sequence pg_dist_groupid_seq
|
||||
sequence pg_dist_node_nodeid_seq
|
||||
sequence pg_dist_placement_placementid_seq
|
||||
sequence pg_dist_shardid_seq
|
||||
table citus.pg_dist_object
|
||||
table columnar.chunk
|
||||
table columnar.chunk_group
|
||||
table columnar.options
|
||||
table columnar.stripe
|
||||
table pg_dist_authinfo
|
||||
table pg_dist_colocation
|
||||
table pg_dist_local_group
|
||||
table pg_dist_node
|
||||
table pg_dist_node_metadata
|
||||
table pg_dist_partition
|
||||
table pg_dist_placement
|
||||
table pg_dist_poolinfo
|
||||
table pg_dist_rebalance_strategy
|
||||
table pg_dist_shard
|
||||
table pg_dist_transaction
|
||||
type citus.distribution_type
|
||||
type citus.shard_transfer_mode
|
||||
type citus_copy_format
|
||||
type noderole
|
||||
view citus_dist_stat_activity
|
||||
view citus_lock_waits
|
||||
view citus_shard_indexes_on_worker
|
||||
view citus_shards
|
||||
view citus_shards_on_worker
|
||||
view citus_stat_statements
|
||||
view citus_tables
|
||||
view citus_worker_stat_activity
|
||||
view pg_dist_shard_placement
|
||||
view time_partitions
|
||||
(227 rows)
|
||||
|
|
@ -66,6 +66,7 @@ test: shared_connection_waits
|
|||
test: isolation_cancellation
|
||||
test: isolation_undistribute_table
|
||||
test: isolation_citus_update_table_statistics
|
||||
test: isolation_fix_partition_shard_index_names
|
||||
|
||||
# Rebalancer
|
||||
test: isolation_blocking_move_single_shard_commands
|
||||
|
|
|
@ -67,6 +67,7 @@ test: ensure_no_intermediate_data_leak
|
|||
# ----------
|
||||
test: multi_partitioning_utils multi_partitioning partitioning_issue_3970 replicated_partitioned_table
|
||||
test: drop_partitioned_table
|
||||
test: multi_fix_partition_shard_index_names
|
||||
test: partition_wise_join
|
||||
|
||||
# ----------
|
||||
|
|
|
@ -51,7 +51,7 @@ test: subqueries_deep subquery_view subquery_partitioning subqueries_not_support
|
|||
test: subquery_in_targetlist subquery_in_where subquery_complex_target_list
|
||||
test: subquery_prepared_statements
|
||||
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins non_colocated_join_order
|
||||
test: cte_inline recursive_view_local_table values
|
||||
test: cte_inline recursive_view_local_table values sequences_with_different_types
|
||||
test: pg13 pg12
|
||||
# run pg14 sequentially as it syncs metadata
|
||||
test: pg14
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE dist_partitioned_table(a INT, created_at timestamptz) PARTITION BY RANGE (created_at);
|
||||
SELECT create_distributed_table('dist_partitioned_table', 'a');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS dist_partitioned_table;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-drop-table"
|
||||
{
|
||||
DROP TABLE dist_partitioned_table;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-fix-partition-shard-index-names"
|
||||
{
|
||||
SET client_min_messages TO NOTICE;
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
permutation "s1-begin" "s1-drop-table" "s2-fix-partition-shard-index-names" "s1-commit"
|
||||
permutation "s2-begin" "s2-fix-partition-shard-index-names" "s1-drop-table" "s2-commit"
|
|
@ -376,6 +376,9 @@ select percentile_cont(0.5) within group(order by valf) from aggdata;
|
|||
select key, percentile_cont(key/10.0) within group(order by val) from aggdata group by key;
|
||||
select array_agg(val order by valf) from aggdata;
|
||||
|
||||
-- test by using some other node types as arguments to agg
|
||||
select key, percentile_cont((key - (key > 4)::int) / 10.0) within group(order by val) from aggdata group by key;
|
||||
|
||||
-- Test TransformSubqueryNode
|
||||
|
||||
select * FROM (
|
||||
|
@ -479,6 +482,54 @@ SELECT square_func(5), a FROM t1 GROUP BY a;
|
|||
-- the expression will be pushed down.
|
||||
SELECT square_func(5), a, count(a) FROM t1 GROUP BY a;
|
||||
|
||||
-- Test the cases where the worker agg exec. returns no tuples.
|
||||
|
||||
CREATE TABLE dist_table (dist_col int, agg_col numeric);
|
||||
SELECT create_distributed_table('dist_table', 'dist_col');
|
||||
|
||||
CREATE TABLE ref_table (int_col int);
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM (SELECT *, random() FROM dist_table) a;
|
||||
|
||||
SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT SUM(COALESCE(agg_col, 3))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT AVG(COALESCE(agg_col, 10))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
insert into dist_table values (2, 11.2), (3, NULL), (6, 3.22), (3, 4.23), (5, 5.25), (4, 63.4), (75, NULL), (80, NULL), (96, NULL), (8, 1078), (0, 1.19);
|
||||
|
||||
-- run the same queries after loading some data
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM (SELECT *, random() FROM dist_table) a;
|
||||
|
||||
SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT floor(SUM(COALESCE(agg_col, 3)))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT floor(AVG(COALESCE(agg_col, 10)))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
set client_min_messages to error;
|
||||
drop schema aggregate_support cascade;
|
||||
|
|
|
@ -374,3 +374,74 @@ select * from numrange_test natural join numrange_test2 order by nr;
|
|||
DROP TABLE atest1, atest2, t1, t2, t3, numrange_test, numrange_test2;
|
||||
|
||||
set default_table_access_method to default;
|
||||
|
||||
set columnar.planner_debug_level to notice;
|
||||
|
||||
BEGIN;
|
||||
SET LOCAL columnar.stripe_row_limit = 2000;
|
||||
SET LOCAL columnar.chunk_group_row_limit = 1000;
|
||||
create table pushdown_test (a int, b int) using columnar;
|
||||
insert into pushdown_test values (generate_series(1, 200000));
|
||||
COMMIT;
|
||||
|
||||
SET columnar.max_custom_scan_paths TO 50;
|
||||
SET columnar.qual_pushdown_correlation_threshold TO 0.0;
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b;
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
|
||||
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where
|
||||
(
|
||||
a > random()
|
||||
and
|
||||
(
|
||||
(a < 200 and a not in (select a from pushdown_test)) or
|
||||
(a > 1000 and a < 2000)
|
||||
)
|
||||
)
|
||||
or
|
||||
(a > 200000-2010);
|
||||
SELECT sum(a) FROM pushdown_test where
|
||||
(
|
||||
a > random()
|
||||
and
|
||||
(
|
||||
(a < 200 and a not in (select a from pushdown_test)) or
|
||||
(a > 1000 and a < 2000)
|
||||
)
|
||||
)
|
||||
or
|
||||
(a > 200000-2010);
|
||||
|
||||
create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as
|
||||
$$ BEGIN RETURN 1+arg; END; $$;
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
|
||||
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
|
||||
|
||||
RESET columnar.max_custom_scan_paths;
|
||||
RESET columnar.qual_pushdown_correlation_threshold;
|
||||
RESET columnar.planner_debug_level;
|
||||
DROP TABLE pushdown_test;
|
||||
|
|
|
@ -546,5 +546,94 @@ begin;
|
|||
insert into uniq select generate_series(1,100);
|
||||
rollback;
|
||||
|
||||
-- Show that we nicely ignore index deletion requests made to columnarAM.
|
||||
--
|
||||
-- An INSERT command might trigger index deletion if index already had dead
|
||||
-- entries for the key we are about to insert.
|
||||
-- There are two ways of index deletion:
|
||||
-- a) simple deletion
|
||||
-- b) bottom-up deletion (>= pg14)
|
||||
--
|
||||
-- Since columnar_index_fetch_tuple never sets all_dead to true, columnarAM
|
||||
-- doesn't expect to receive simple deletion as we don't mark any index
|
||||
-- entries as dead.
|
||||
-- Otherwise, columnarAM would throw an error for all of below six test cases.
|
||||
--
|
||||
-- However, since columnarAM doesn't delete any dead entries via simple
|
||||
-- deletion, postgres might ask for a more comprehensive deletion (bottom-up)
|
||||
-- at some point when pg >= 14.
|
||||
-- For this reason, all following six test cases would certainly trigger
|
||||
-- bottom-up deletion. Show that we gracefully ignore such requests.
|
||||
CREATE TABLE index_tuple_delete (a int UNIQUE) USING COLUMNAR;
|
||||
ALTER TABLE index_tuple_delete SET (autovacuum_enabled = false);
|
||||
|
||||
BEGIN;
|
||||
-- i) rollback before flushing
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK;
|
||||
|
||||
-- index deletion test-1
|
||||
BEGIN;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
|
||||
TRUNCATE index_tuple_delete;
|
||||
|
||||
BEGIN;
|
||||
-- ii) rollback after flushing
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
SELECT SUM(a) > 0 FROM index_tuple_delete;
|
||||
ROLLBACK;
|
||||
|
||||
-- index deletion test-2
|
||||
BEGIN;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
|
||||
TRUNCATE index_tuple_delete;
|
||||
|
||||
BEGIN;
|
||||
-- iii) rollback before flushing, use savepoint
|
||||
SAVEPOINT sp1;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK TO sp1;
|
||||
|
||||
-- index deletion test-3
|
||||
SAVEPOINT sp2;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK TO sp2;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
ROLLBACK;
|
||||
|
||||
-- index deletion test-4
|
||||
BEGIN;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
|
||||
TRUNCATE index_tuple_delete;
|
||||
|
||||
BEGIN;
|
||||
-- iv) rollback after flushing, use savepoint
|
||||
SAVEPOINT sp1;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
SELECT SUM(a) > 0 FROM index_tuple_delete;
|
||||
ROLLBACK TO sp1;
|
||||
|
||||
-- index deletion test-5
|
||||
SAVEPOINT sp2;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK TO sp2;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
ROLLBACK;
|
||||
|
||||
-- index deletion test-6
|
||||
BEGIN;
|
||||
INSERT INTO index_tuple_delete SELECT i FROM generate_series(0,10000)i;
|
||||
ROLLBACK;
|
||||
COPY index_tuple_delete FROM PROGRAM 'seq 10000';
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA columnar_indexes CASCADE;
|
||||
|
|
|
@ -199,6 +199,16 @@ BEGIN;
|
|||
SELECT a FROM flush_create_index WHERE a=5;
|
||||
ROLLBACK;
|
||||
|
||||
CREATE OR REPLACE FUNCTION test_columnar_storage_write_new_page(relation regclass) RETURNS void
|
||||
STRICT LANGUAGE c AS 'citus', 'test_columnar_storage_write_new_page';
|
||||
|
||||
CREATE TABLE aborted_write (a int, b int) USING columnar;
|
||||
|
||||
SELECT test_columnar_storage_write_new_page('aborted_write');
|
||||
|
||||
SET client_min_messages TO DEBUG4;
|
||||
INSERT INTO aborted_write VALUES (5);
|
||||
|
||||
RESET search_path;
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA columnar_insert CASCADE;
|
||||
|
|
|
@ -479,6 +479,204 @@ SELECT * FROM test ORDER BY id;
|
|||
|
||||
DROP TABLE test;
|
||||
|
||||
-- verify that recreating distributed functions with TABLE params gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_with_return_table(int)
|
||||
RETURNS TABLE (date date)
|
||||
LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
RETURN query SELECT '2011-01-01'::date;
|
||||
END;
|
||||
$$;
|
||||
|
||||
SELECT create_distributed_function('func_with_return_table(int)');
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_with_return_table(int)
|
||||
RETURNS TABLE (date date)
|
||||
LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
RETURN query SELECT '2011-01-02'::date;
|
||||
END;
|
||||
$$;
|
||||
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'func_with_return_table';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'func_with_return_table')
|
||||
as test;
|
||||
|
||||
-- verify that recreating distributed functions with OUT params gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_with_out_param(a int, out b int)
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 1; $$;
|
||||
|
||||
SELECT create_distributed_function('func_with_out_param(int)');
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
CREATE ROLE r1;
|
||||
SELECT 1 FROM run_command_on_workers($$CREATE ROLE r1;$$);
|
||||
GRANT EXECUTE ON FUNCTION func_with_out_param TO r1;
|
||||
SELECT 1 FROM run_command_on_workers($$GRANT EXECUTE ON FUNCTION func_with_out_param TO r1;$$);
|
||||
RESET client_min_messages;
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_with_out_param(a int, out b int)
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 2; $$;
|
||||
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc, pg_proc.proowner) from pg_proc where proname = 'func_with_out_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc, pg_proc.proowner)::text from pg_proc where proname = 'func_with_out_param')
|
||||
as test;
|
||||
|
||||
-- verify that recreating distributed functions with INOUT params gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_with_inout_param(a int, inout b int)
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 1; $$;
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_function('func_with_inout_param(int)');
|
||||
-- this should work
|
||||
SELECT create_distributed_function('func_with_inout_param(int,int)');
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_with_inout_param(a int, inout b int)
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 2; $$;
|
||||
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'func_with_inout_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'func_with_inout_param')
|
||||
as test;
|
||||
|
||||
-- verify that recreating distributed functions with VARIADIC params gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_with_variadic_param(a int, variadic b int[])
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 1; $$;
|
||||
|
||||
-- this should work
|
||||
SELECT create_distributed_function('func_with_variadic_param(int,int[])');
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_with_variadic_param(a int, variadic b int[])
|
||||
RETURNS int
|
||||
LANGUAGE sql AS $$ select 2; $$;
|
||||
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'func_with_variadic_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'func_with_variadic_param')
|
||||
as test;
|
||||
|
||||
-- verify that recreating distributed functions returning setof records gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_returning_setof_int(IN parm1 date, IN parm2 interval)
|
||||
RETURNS SETOF integer AS
|
||||
$BODY$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT 1;
|
||||
END;
|
||||
$BODY$
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
COST 100;
|
||||
|
||||
SELECT create_distributed_function('func_returning_setof_int(date,interval)');
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_returning_setof_int(IN parm1 date, IN parm2 interval)
|
||||
RETURNS SETOF integer AS
|
||||
$BODY$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT 2;
|
||||
|
||||
END;
|
||||
$BODY$
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
COST 100;
|
||||
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'func_returning_setof_int';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'func_returning_setof_int')
|
||||
as test;
|
||||
|
||||
-- verify that recreating distributed functions with variadic param returning setof records gets propagated to workers
|
||||
CREATE OR REPLACE FUNCTION func_returning_setof_int_with_variadic_param(IN parm1 date, VARIADIC parm2 int[])
|
||||
RETURNS SETOF integer AS
|
||||
$BODY$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT 1;
|
||||
END;
|
||||
$BODY$
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
COST 100;
|
||||
|
||||
SELECT create_distributed_function('func_returning_setof_int_with_variadic_param(date,int[])');
|
||||
|
||||
CREATE OR REPLACE FUNCTION func_returning_setof_int_with_variadic_param(IN parm1 date, VARIADIC parm2 int[])
|
||||
RETURNS SETOF integer AS
|
||||
$BODY$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT 2;
|
||||
END;
|
||||
$BODY$
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
COST 100;
|
||||
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'func_returning_setof_int_with_variadic_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'func_returning_setof_int_with_variadic_param')
|
||||
as test;
|
||||
|
||||
-- verify that recreating distributed procedures with out params gets propagated to workers
|
||||
CREATE OR REPLACE PROCEDURE proc_with_variadic_param(IN parm1 date, VARIADIC parm2 int[])
|
||||
LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT 1;
|
||||
$$;
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_function('proc_with_variadic_param(date)');
|
||||
-- this should work
|
||||
SELECT create_distributed_function('proc_with_variadic_param(date,int[])');
|
||||
|
||||
CREATE OR REPLACE PROCEDURE proc_with_variadic_param(IN parm1 date, VARIADIC parm2 int[])
|
||||
LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT 2;
|
||||
$$;
|
||||
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'proc_with_variadic_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'proc_with_variadic_param')
|
||||
as test;
|
||||
|
||||
-- verify that recreating distributed procedures with INOUT param gets propagated to workers
|
||||
CREATE OR REPLACE PROCEDURE proc_with_inout_param(IN parm1 date, INOUT parm2 int)
|
||||
LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT 1;
|
||||
$$;
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_function('proc_with_inout_param(date)');
|
||||
-- this should work
|
||||
SELECT create_distributed_function('proc_with_inout_param(date,int)');
|
||||
|
||||
CREATE OR REPLACE PROCEDURE proc_with_inout_param(IN parm1 date, INOUT parm2 int)
|
||||
LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT 2;
|
||||
$$;
|
||||
|
||||
SELECT count(*) FROM
|
||||
(SELECT result FROM
|
||||
run_command_on_workers($$select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc) from pg_proc where proname = 'proc_with_inout_param';$$)
|
||||
UNION select row(pg_proc.pronargs, pg_proc.proargtypes, pg_proc.prosrc)::text from pg_proc where proname = 'proc_with_inout_param')
|
||||
as test;
|
||||
|
||||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA function_tests CASCADE;
|
||||
DROP SCHEMA function_tests2 CASCADE;
|
||||
|
|
|
@ -115,6 +115,20 @@ $$ LANGUAGE plpgsql STRICT IMMUTABLE;
|
|||
SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_agg(integer) (STYPE = integer,SFUNC = proc_conflict.existing_func2)');
|
||||
SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_agg(integer) (STYPE = integer,SFUNC = proc_conflict.existing_func2)');
|
||||
|
||||
-- test worker_create_or_replace_object with a function that returns table
|
||||
CREATE OR REPLACE FUNCTION func_with_return_table(int)
|
||||
RETURNS TABLE (date date)
|
||||
LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
RETURN query SELECT '2011-01-01'::date;
|
||||
END;
|
||||
$$;
|
||||
|
||||
SELECT worker_create_or_replace_object('CREATE OR REPLACE FUNCTION func_with_return_table(int) RETURNS TABLE (date date) LANGUAGE plpgsql AS $$ BEGIN RETURN query SELECT ''2011-01-01''::date; END; $$;');
|
||||
|
||||
-- verify that a backup function is created
|
||||
SELECT COUNT(*)=2 FROM pg_proc WHERE proname LIKE 'func_with_return_table%';
|
||||
|
||||
-- hide cascades
|
||||
SET client_min_messages TO error;
|
||||
DROP SCHEMA proc_conflict CASCADE;
|
||||
|
|
|
@ -1084,5 +1084,11 @@ EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subqu
|
|||
PREPARE dummy_prep_stmt(int) AS SELECT FROM distributed_table_1;
|
||||
EXPLAIN :default_analyze_flags EXECUTE dummy_prep_stmt(50);
|
||||
|
||||
CREATE TYPE multi_explain.int_wrapper_type AS (int_field int);
|
||||
CREATE TABLE tbl (a int, b multi_explain.int_wrapper_type);
|
||||
SELECT create_distributed_table('tbl', 'a');
|
||||
|
||||
EXPLAIN :default_analyze_flags SELECT * FROM tbl;
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA multi_explain CASCADE;
|
||||
|
|
|
@ -297,6 +297,35 @@ ALTER EXTENSION citus UPDATE TO '9.5-1';
|
|||
ALTER EXTENSION citus UPDATE TO '10.0-4';
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
|
||||
-- not print "HINT: " to hide current lib version
|
||||
\set VERBOSITY terse
|
||||
CREATE TABLE columnar_table(a INT, b INT) USING columnar;
|
||||
SET citus.enable_version_checks TO ON;
|
||||
|
||||
-- all should throw an error due to version mismatch
|
||||
VACUUM FULL columnar_table;
|
||||
INSERT INTO columnar_table SELECT i FROM generate_series(1, 10) i;
|
||||
VACUUM columnar_table;
|
||||
TRUNCATE columnar_table;
|
||||
DROP TABLE columnar_table;
|
||||
CREATE INDEX ON columnar_table (a);
|
||||
SELECT alter_columnar_table_set('columnar_table', compression => 'pglz');
|
||||
SELECT alter_columnar_table_reset('columnar_table');
|
||||
INSERT INTO columnar_table SELECT * FROM columnar_table;
|
||||
|
||||
SELECT 1 FROM columnar_table; -- columnar custom scan
|
||||
|
||||
SET columnar.enable_custom_scan TO OFF;
|
||||
SELECT 1 FROM columnar_table; -- seq scan
|
||||
|
||||
CREATE TABLE new_columnar_table (a int) USING columnar;
|
||||
|
||||
-- do cleanup for the rest of the tests
|
||||
SET citus.enable_version_checks TO OFF;
|
||||
DROP TABLE columnar_table;
|
||||
RESET columnar.enable_custom_scan;
|
||||
\set VERBOSITY default
|
||||
|
||||
-- Test downgrade to 10.0-4 from 10.1-1
|
||||
ALTER EXTENSION citus UPDATE TO '10.1-1';
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-4';
|
||||
|
@ -317,6 +346,74 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
ALTER EXTENSION citus UPDATE TO '10.2-1';
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
|
||||
-- Test downgrade to 10.2-1 from 10.2-2
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-2';
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-1';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
|
||||
-- Snapshot of state at 10.2-2
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-2';
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
|
||||
-- Test downgrade to 10.2-2 from 10.2-3
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-3';
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-2';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
|
||||
-- Snapshot of state at 10.2-3
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-3';
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
|
||||
-- Test downgrade to 10.2-3 from 10.2-4
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-3';
|
||||
|
||||
-- Make sure that we don't delete pg_depend entries added in
|
||||
-- columnar--10.2-3--10.2-4.sql when downgrading to 10.2-3.
|
||||
SELECT COUNT(*)=10
|
||||
FROM pg_depend
|
||||
WHERE classid = 'pg_am'::regclass::oid AND
|
||||
objid = (select oid from pg_am where amname = 'columnar') AND
|
||||
objsubid = 0 AND
|
||||
refclassid = 'pg_class'::regclass::oid AND
|
||||
refobjsubid = 0 AND
|
||||
deptype = 'n';
|
||||
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
|
||||
-- Snapshot of state at 10.2-4
|
||||
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
|
||||
-- Make sure that we defined dependencies from all rel objects (tables,
|
||||
-- indexes, sequences ..) to columnar table access method ...
|
||||
SELECT pg_class.oid INTO columnar_schema_members
|
||||
FROM pg_class, pg_namespace
|
||||
WHERE pg_namespace.oid=pg_class.relnamespace AND
|
||||
pg_namespace.nspname='columnar';
|
||||
SELECT refobjid INTO columnar_schema_members_pg_depend
|
||||
FROM pg_depend
|
||||
WHERE classid = 'pg_am'::regclass::oid AND
|
||||
objid = (select oid from pg_am where amname = 'columnar') AND
|
||||
objsubid = 0 AND
|
||||
refclassid = 'pg_class'::regclass::oid AND
|
||||
refobjsubid = 0 AND
|
||||
deptype = 'n';
|
||||
|
||||
-- ... , so this should be empty,
|
||||
(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend)
|
||||
UNION
|
||||
(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members);
|
||||
|
||||
-- ... , and both columnar_schema_members_pg_depend & columnar_schema_members
|
||||
-- should have 10 entries.
|
||||
SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend;
|
||||
|
||||
DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend;
|
||||
|
||||
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||
|
||||
-- show running version
|
||||
|
|
|
@ -0,0 +1,321 @@
|
|||
----------------------------------------------------
|
||||
-- multi_fix_partition_shard_index_names
|
||||
-- check the following two issues
|
||||
-- https://github.com/citusdata/citus/issues/4962
|
||||
-- https://github.com/citusdata/citus/issues/5138
|
||||
----------------------------------------------------
|
||||
SET citus.next_shard_id TO 910000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE SCHEMA fix_idx_names;
|
||||
SET search_path TO fix_idx_names, public;
|
||||
|
||||
-- NULL input should automatically return NULL since
|
||||
-- fix_partition_shard_index_names is strict
|
||||
-- same for worker_fix_partition_shard_index_names
|
||||
SELECT fix_partition_shard_index_names(NULL);
|
||||
SELECT worker_fix_partition_shard_index_names(NULL, NULL, NULL);
|
||||
|
||||
-- fix_partition_shard_index_names cannot be called for distributed
|
||||
-- and not partitioned tables
|
||||
CREATE TABLE not_partitioned(id int);
|
||||
SELECT create_distributed_table('not_partitioned', 'id');
|
||||
SELECT fix_partition_shard_index_names('not_partitioned'::regclass);
|
||||
|
||||
-- fix_partition_shard_index_names cannot be called for partitioned
|
||||
-- and not distributed tables
|
||||
CREATE TABLE not_distributed(created_at timestamptz) PARTITION BY RANGE (created_at);
|
||||
SELECT fix_partition_shard_index_names('not_distributed'::regclass);
|
||||
|
||||
-- test with proper table
|
||||
CREATE TABLE dist_partitioned_table (dist_col int, another_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
|
||||
SELECT create_distributed_table('dist_partitioned_table', 'dist_col');
|
||||
|
||||
-- create a partition with a long name and another with a short name
|
||||
CREATE TABLE partition_table_with_very_long_name PARTITION OF dist_partitioned_table FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
|
||||
CREATE TABLE p PARTITION OF dist_partitioned_table FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
|
||||
|
||||
-- create an index on parent table
|
||||
-- we will see that it doesn't matter whether we name the index on parent or not
|
||||
-- indexes auto-generated on partitions will not use this name
|
||||
CREATE INDEX short ON dist_partitioned_table USING btree (another_col, partition_col);
|
||||
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- Note that, the shell table from above partition_table_with_very_long_name
|
||||
-- and its shard partition_table_with_very_long_name_910008
|
||||
-- have the same index name: partition_table_with_very_long_na_another_col_partition_col_idx
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
-- this should fail because of the name clash explained above
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
|
||||
-- let's fix the problematic table
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- shard id has been appended to all index names which didn't end in shard id
|
||||
-- this goes in line with Citus's way of naming indexes of shards: always append shardid to the end
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
|
||||
-- this should now work
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
|
||||
-- if we run this command again, the names will not change anymore since shardid is appended to them
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
SELECT fix_all_partition_shard_index_names();
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 910020;
|
||||
|
||||
-- if we explicitly create index on partition-to-be table, Citus handles the naming
|
||||
-- hence we would have no broken index names
|
||||
CREATE TABLE another_partition_table_with_very_long_name (dist_col int, another_col int, partition_col timestamp);
|
||||
SELECT create_distributed_table('another_partition_table_with_very_long_name', 'dist_col');
|
||||
CREATE INDEX ON another_partition_table_with_very_long_name USING btree (another_col, partition_col);
|
||||
ALTER TABLE dist_partitioned_table ATTACH PARTITION another_partition_table_with_very_long_name FOR VALUES FROM ('2020-01-01') TO ('2021-01-01');
|
||||
|
||||
-- check it works even if we give a weird index name
|
||||
CREATE TABLE yet_another_partition_table (dist_col int, another_col int, partition_col timestamp);
|
||||
SELECT create_distributed_table('yet_another_partition_table', 'dist_col');
|
||||
CREATE INDEX "really weird index name !!" ON yet_another_partition_table USING btree (another_col, partition_col);
|
||||
ALTER TABLE dist_partitioned_table ATTACH PARTITION yet_another_partition_table FOR VALUES FROM ('2021-01-01') TO ('2022-01-01');
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- notice indexes of shards of another_partition_table_with_very_long_name already have shardid appended to the end
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- this command would not do anything
|
||||
SELECT fix_all_partition_shard_index_names();
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- names are the same as before
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
|
||||
DROP INDEX short;
|
||||
DROP TABLE yet_another_partition_table, another_partition_table_with_very_long_name;
|
||||
-- this will create constraint1 index on parent
|
||||
ALTER TABLE dist_partitioned_table ADD CONSTRAINT constraint1 UNIQUE (dist_col, partition_col);
|
||||
CREATE TABLE fk_table (id int, fk_column timestamp, FOREIGN KEY (id, fk_column) REFERENCES dist_partitioned_table (dist_col, partition_col));
|
||||
|
||||
-- try creating index to foreign key
|
||||
CREATE INDEX ON dist_partitioned_table USING btree (dist_col, partition_col);
|
||||
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
\c - - - :worker_1_port
|
||||
-- index names don't end in shardid for partitions
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SELECT fix_all_partition_shard_index_names();
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- now index names end in shardid
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
|
||||
ALTER TABLE dist_partitioned_table DROP CONSTRAINT constraint1 CASCADE;
|
||||
DROP INDEX dist_partitioned_table_dist_col_partition_col_idx;
|
||||
|
||||
-- try with index on only parent
|
||||
-- this is also an invalid index
|
||||
-- also try with hash method, not btree
|
||||
CREATE INDEX short_parent ON ONLY dist_partitioned_table USING hash (dist_col);
|
||||
-- only another_partition will have the index on dist_col inherited from short_parent
|
||||
-- hence short_parent will still be invalid
|
||||
CREATE TABLE another_partition (dist_col int, another_col int, partition_col timestamp);
|
||||
ALTER TABLE dist_partitioned_table ATTACH PARTITION another_partition FOR VALUES FROM ('2017-01-01') TO ('2018-01-01');
|
||||
|
||||
SELECT c.relname AS indexname
|
||||
FROM pg_catalog.pg_class c, pg_catalog.pg_namespace n, pg_catalog.pg_index i
|
||||
WHERE (i.indisvalid = false) AND i.indexrelid = c.oid AND c.relnamespace = n.oid AND n.nspname = 'fix_idx_names';
|
||||
|
||||
-- try with index on only partition
|
||||
CREATE INDEX short_child ON ONLY p USING hash (dist_col);
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- index names are already correct except for inherited index for another_partition
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- this will fix inherited index for another_partition
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
-- this will error out becuase p is not partitioned, it is rather a partition
|
||||
SELECT fix_partition_shard_index_names('p'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
|
||||
DROP INDEX short_parent;
|
||||
DROP INDEX short_child;
|
||||
DROP TABLE another_partition;
|
||||
|
||||
-- expression indexes have the same problem with naming
|
||||
CREATE INDEX expression_index ON dist_partitioned_table ((dist_col || ' ' || another_col));
|
||||
-- try with statistics on index
|
||||
CREATE INDEX statistics_on_index on dist_partitioned_table ((dist_col+another_col), (dist_col-another_col));
|
||||
ALTER INDEX statistics_on_index ALTER COLUMN 1 SET STATISTICS 3737;
|
||||
ALTER INDEX statistics_on_index ALTER COLUMN 2 SET STATISTICS 3737;
|
||||
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
|
||||
-- try with a table with no partitions
|
||||
ALTER TABLE dist_partitioned_table DETACH PARTITION p;
|
||||
ALTER TABLE dist_partitioned_table DETACH PARTITION partition_table_with_very_long_name;
|
||||
DROP TABLE p;
|
||||
DROP TABLE partition_table_with_very_long_name;
|
||||
|
||||
-- still dist_partitioned_table has indexes
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
-- this does nothing
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
DROP TABLE dist_partitioned_table;
|
||||
|
||||
-- add test with replication factor = 2
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
SET citus.next_shard_id TO 910050;
|
||||
|
||||
CREATE TABLE dist_partitioned_table (dist_col int, another_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
|
||||
SELECT create_distributed_table('dist_partitioned_table', 'dist_col');
|
||||
|
||||
-- create a partition with a long name
|
||||
CREATE TABLE partition_table_with_very_long_name PARTITION OF dist_partitioned_table FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
|
||||
|
||||
-- create an index on parent table
|
||||
CREATE INDEX index_rep_factor_2 ON dist_partitioned_table USING btree (another_col, partition_col);
|
||||
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
-- let's fix the problematic table
|
||||
SET search_path TO fix_idx_names, public;
|
||||
SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass);
|
||||
|
||||
\c - - - :worker_2_port
|
||||
-- shard id has been appended to all index names which didn't end in shard id
|
||||
-- this goes in line with Citus's way of naming indexes of shards: always append shardid to the end
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
|
||||
-- test with role that is not superuser
|
||||
SET client_min_messages TO warning;
|
||||
SET citus.enable_ddl_propagation TO off;
|
||||
CREATE USER user1;
|
||||
RESET client_min_messages;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
|
||||
SET ROLE user1;
|
||||
SELECT fix_partition_shard_index_names('fix_idx_names.dist_partitioned_table'::regclass);
|
||||
|
||||
RESET ROLE;
|
||||
SET search_path TO fix_idx_names, public;
|
||||
DROP TABLE dist_partitioned_table;
|
||||
|
||||
-- also, we cannot do any further operations (e.g. rename) on the indexes of partitions because
|
||||
-- the index names on shards of partitions have been generated by Postgres, not Citus
|
||||
-- it doesn't matter here whether the partition name is long or short
|
||||
|
||||
-- replicate scenario from above but this time with one shard so that this test isn't flaky
|
||||
SET citus.shard_count TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 910030;
|
||||
|
||||
CREATE TABLE dist_partitioned_table (dist_col int, another_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
|
||||
SELECT create_distributed_table('dist_partitioned_table', 'dist_col');
|
||||
CREATE TABLE partition_table_with_very_long_name PARTITION OF dist_partitioned_table FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
|
||||
CREATE TABLE p PARTITION OF dist_partitioned_table FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
|
||||
CREATE INDEX short ON dist_partitioned_table USING btree (another_col, partition_col);
|
||||
|
||||
-- rename shouldn't work
|
||||
ALTER INDEX partition_table_with_very_long_na_another_col_partition_col_idx RENAME TO partition_table_with_very_long_name_idx;
|
||||
|
||||
-- we currently can't drop index on detached partition
|
||||
-- https://github.com/citusdata/citus/issues/5138
|
||||
ALTER TABLE dist_partitioned_table DETACH PARTITION p;
|
||||
DROP INDEX p_another_col_partition_col_idx;
|
||||
|
||||
-- let's reattach and retry after fixing index names
|
||||
ALTER TABLE dist_partitioned_table ATTACH PARTITION p FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- check the current broken index names
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- fix index names
|
||||
SELECT fix_all_partition_shard_index_names();
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- check the fixed index names
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
-- should now work
|
||||
ALTER INDEX partition_table_with_very_long_na_another_col_partition_col_idx RENAME TO partition_table_with_very_long_name_idx;
|
||||
|
||||
-- now we can drop index on detached partition
|
||||
ALTER TABLE dist_partitioned_table DETACH PARTITION p;
|
||||
DROP INDEX p_another_col_partition_col_idx;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- check that indexes have been renamed
|
||||
-- and that index on p has been dropped (it won't appear)
|
||||
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO fix_idx_names, public;
|
||||
|
||||
DROP SCHEMA fix_idx_names CASCADE;
|
||||
SELECT run_command_on_workers($$ DROP SCHEMA IF EXISTS fix_idx_names CASCADE $$);
|
|
@ -10,6 +10,7 @@
|
|||
--
|
||||
|
||||
CREATE SCHEMA multi_index_statements;
|
||||
CREATE SCHEMA multi_index_statements_2;
|
||||
SET search_path TO multi_index_statements;
|
||||
|
||||
SET citus.next_shard_id TO 102080;
|
||||
|
@ -55,6 +56,28 @@ CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range(a,b)
|
|||
CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON index_test_hash(a) INCLUDE (b,c);
|
||||
RESET client_min_messages;
|
||||
|
||||
|
||||
-- Verify that we can create expression indexes and be robust to different schemas
|
||||
CREATE OR REPLACE FUNCTION value_plus_one(a int)
|
||||
RETURNS int IMMUTABLE AS $$
|
||||
BEGIN
|
||||
RETURN a + 1;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
SELECT create_distributed_function('value_plus_one(int)');
|
||||
|
||||
CREATE OR REPLACE FUNCTION multi_index_statements_2.value_plus_one(a int)
|
||||
RETURNS int IMMUTABLE AS $$
|
||||
BEGIN
|
||||
RETURN a + 1;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
SELECT create_distributed_function('multi_index_statements_2.value_plus_one(int)');
|
||||
|
||||
CREATE INDEX ON index_test_hash ((value_plus_one(b)));
|
||||
CREATE INDEX ON index_test_hash ((multi_index_statements.value_plus_one(b)));
|
||||
CREATE INDEX ON index_test_hash ((multi_index_statements_2.value_plus_one(b)));
|
||||
|
||||
-- Verify that we handle if not exists statements correctly
|
||||
CREATE INDEX lineitem_orderkey_index on public.lineitem(l_orderkey);
|
||||
CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on public.lineitem(l_orderkey);
|
||||
|
@ -403,3 +426,4 @@ SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::
|
|||
DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx;
|
||||
|
||||
DROP SCHEMA multi_index_statements CASCADE;
|
||||
DROP SCHEMA multi_index_statements_2 CASCADE;
|
||||
|
|
|
@ -162,19 +162,15 @@ SELECT * FROM columnar.stripe;
|
|||
-- alter a columnar setting
|
||||
SET columnar.chunk_group_row_limit = 1050;
|
||||
|
||||
DO $proc$
|
||||
BEGIN
|
||||
IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN
|
||||
EXECUTE $$
|
||||
-- create columnar table
|
||||
CREATE TABLE columnar_table (a int) USING columnar;
|
||||
-- alter a columnar table that is created by that unprivileged user
|
||||
SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000);
|
||||
-- and drop it
|
||||
DROP TABLE columnar_table;
|
||||
$$;
|
||||
END IF;
|
||||
END$proc$;
|
||||
-- create columnar table
|
||||
CREATE TABLE columnar_table (a int) USING columnar;
|
||||
-- alter a columnar table that is created by that unprivileged user
|
||||
SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000);
|
||||
-- insert some data and read
|
||||
INSERT INTO columnar_table VALUES (1), (1);
|
||||
SELECT * FROM columnar_table;
|
||||
-- and drop it
|
||||
DROP TABLE columnar_table;
|
||||
|
||||
-- cannot modify columnar metadata table as unprivileged user
|
||||
INSERT INTO columnar.stripe VALUES(99);
|
||||
|
@ -183,6 +179,8 @@ INSERT INTO columnar.stripe VALUES(99);
|
|||
-- (since citus extension has a dependency to it)
|
||||
DROP TABLE columnar.chunk;
|
||||
|
||||
-- cannot read columnar.chunk since it could expose chunk min/max values
|
||||
SELECT * FROM columnar.chunk;
|
||||
|
||||
-- test whether a read-only user can read from citus_tables view
|
||||
SELECT distribution_column FROM citus_tables WHERE table_name = 'test'::regclass;
|
||||
|
|
|
@ -638,6 +638,35 @@ CREATE TABLE partitioned_events_table_2009 PARTITION OF partitioned_events_table
|
|||
INSERT INTO partitioned_events_table SELECT * FROM events_table;
|
||||
INSERT INTO partitioned_users_table_2009 SELECT * FROM users_table;
|
||||
|
||||
-- test distributed partitions are indeed colocated with the parent table
|
||||
CREATE TABLE sensors(measureid integer, eventdatetime date, measure_data jsonb, PRIMARY KEY (measureid, eventdatetime, measure_data))
|
||||
PARTITION BY RANGE(eventdatetime);
|
||||
|
||||
CREATE TABLE sensors_old PARTITION OF sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01');
|
||||
CREATE TABLE sensors_2020_01_01 PARTITION OF sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01');
|
||||
CREATE TABLE sensors_new PARTITION OF sensors DEFAULT;
|
||||
|
||||
SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none');
|
||||
|
||||
SELECT count(DISTINCT colocationid) FROM pg_dist_partition
|
||||
WHERE logicalrelid IN ('sensors'::regclass, 'sensors_old'::regclass, 'sensors_2020_01_01'::regclass, 'sensors_new'::regclass);
|
||||
|
||||
CREATE TABLE local_sensors(measureid integer, eventdatetime date, measure_data jsonb, PRIMARY KEY (measureid, eventdatetime, measure_data))
|
||||
PARTITION BY RANGE(eventdatetime);
|
||||
|
||||
CREATE TABLE local_sensors_old PARTITION OF local_sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01');
|
||||
CREATE TABLE local_sensors_2020_01_01 PARTITION OF local_sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01');
|
||||
CREATE TABLE local_sensors_new PARTITION OF local_sensors DEFAULT;
|
||||
|
||||
SELECT create_distributed_table('local_sensors', 'measureid', colocate_with:='sensors');
|
||||
|
||||
SELECT count(DISTINCT colocationid) FROM pg_dist_partition
|
||||
WHERE logicalrelid IN ('sensors'::regclass, 'sensors_old'::regclass, 'sensors_2020_01_01'::regclass, 'sensors_new'::regclass,
|
||||
'local_sensors'::regclass, 'local_sensors_old'::regclass, 'local_sensors_2020_01_01'::regclass, 'local_sensors_new'::regclass);
|
||||
|
||||
DROP TABLE sensors;
|
||||
DROP TABLE local_sensors;
|
||||
|
||||
--
|
||||
-- Complex JOINs, subqueries, UNIONs etc...
|
||||
--
|
||||
|
@ -1857,6 +1886,46 @@ SELECT create_time_partitions('non_partitioned_table', INTERVAL '1 month', now()
|
|||
CALL drop_old_time_partitions('non_partitioned_table', now());
|
||||
DROP TABLE non_partitioned_table;
|
||||
|
||||
-- https://github.com/citusdata/citus/issues/4962
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE part_table_with_very_long_name (
|
||||
dist_col integer,
|
||||
long_named_integer_col integer,
|
||||
long_named_part_col timestamp
|
||||
) PARTITION BY RANGE (long_named_part_col);
|
||||
|
||||
CREATE TABLE part_table_with_long_long_long_long_name
|
||||
PARTITION OF part_table_with_very_long_name
|
||||
FOR VALUES FROM ('2010-01-01') TO ('2015-01-01');
|
||||
|
||||
SELECT create_distributed_table('part_table_with_very_long_name', 'dist_col');
|
||||
|
||||
CREATE INDEX ON part_table_with_very_long_name
|
||||
USING btree (long_named_integer_col, long_named_part_col);
|
||||
|
||||
-- shouldn't work
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT tablename, indexname FROM pg_indexes
|
||||
WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET search_path = partitioning_schema;
|
||||
-- fix problematic table
|
||||
SELECT fix_partition_shard_index_names('part_table_with_very_long_name'::regclass);
|
||||
-- should work
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- check that indexes are renamed
|
||||
SELECT tablename, indexname FROM pg_indexes
|
||||
WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%' ORDER BY 1, 2;
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
|
||||
DROP SCHEMA partitioning_schema CASCADE;
|
||||
RESET search_path;
|
||||
DROP TABLE IF EXISTS
|
||||
|
|
|
@ -12,20 +12,107 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
|||
SET client_min_messages TO ERROR;
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE USER regular_mx_user WITH LOGIN;
|
||||
SELECT 1 FROM run_command_on_workers($$CREATE USER regular_mx_user WITH LOGIN;$$);
|
||||
GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user;
|
||||
|
||||
-- create another table owned by the super user (e.g., current user of the session)
|
||||
-- and GRANT access to the user
|
||||
CREATE SCHEMA "Mx Super User";
|
||||
SELECT 1 FROM run_command_on_workers($$CREATE SCHEMA "Mx Super User";$$);
|
||||
SET citus.next_shard_id TO 2980000;
|
||||
SET search_path TO "Mx Super User";
|
||||
CREATE TABLE super_user_owned_regular_user_granted (a int PRIMARY KEY, b int);
|
||||
SELECT create_reference_table ('"Mx Super User".super_user_owned_regular_user_granted');
|
||||
|
||||
-- show that this table is owned by super user
|
||||
SELECT
|
||||
rolsuper
|
||||
FROM
|
||||
pg_roles
|
||||
WHERE oid
|
||||
IN
|
||||
(SELECT relowner FROM pg_class WHERE oid = '"Mx Super User".super_user_owned_regular_user_granted'::regclass);
|
||||
|
||||
-- make sure that granting produce the same output for both community and enterprise
|
||||
SET client_min_messages TO ERROR;
|
||||
GRANT USAGE ON SCHEMA "Mx Super User" TO regular_mx_user;
|
||||
GRANT INSERT ON TABLE super_user_owned_regular_user_granted TO regular_mx_user;
|
||||
|
||||
SELECT 1 FROM run_command_on_workers($$GRANT USAGE ON SCHEMA "Mx Super User" TO regular_mx_user;$$);
|
||||
SELECT 1 FROM run_command_on_workers($$GRANT INSERT ON TABLE "Mx Super User".super_user_owned_regular_user_granted TO regular_mx_user;$$);
|
||||
SELECT 1 FROM run_command_on_placements('super_user_owned_regular_user_granted', $$GRANT INSERT ON TABLE %s TO regular_mx_user;$$);
|
||||
|
||||
-- now that the GRANT is given, the regular user should be able to
|
||||
-- INSERT into the table
|
||||
\c - regular_mx_user - :master_port
|
||||
SET search_path TO "Mx Super User";
|
||||
COPY super_user_owned_regular_user_granted FROM STDIN WITH CSV;
|
||||
1,1
|
||||
2,1
|
||||
\.
|
||||
|
||||
-- however, this specific user doesn't have UPDATE/UPSERT/DELETE/TRUNCATE
|
||||
-- permission, so should fail
|
||||
INSERT INTO super_user_owned_regular_user_granted VALUES (1, 1), (2, 1) ON CONFLICT (a) DO NOTHING;
|
||||
TRUNCATE super_user_owned_regular_user_granted;
|
||||
DELETE FROM super_user_owned_regular_user_granted;
|
||||
UPDATE super_user_owned_regular_user_granted SET a = 1;
|
||||
|
||||
-- AccessExclusiveLock == 8 is strictly forbidden for any user
|
||||
SELECT lock_shard_resources(8, ARRAY[2980000]);
|
||||
|
||||
-- ExclusiveLock == 7 is forbidden for this user
|
||||
-- as only has INSERT rights
|
||||
SELECT lock_shard_resources(7, ARRAY[2980000]);
|
||||
|
||||
-- but should be able to acquire RowExclusiveLock
|
||||
BEGIN;
|
||||
SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
|
||||
SELECT lock_shard_resources(3, ARRAY[2980000]);
|
||||
SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
|
||||
COMMIT;
|
||||
|
||||
-- acquring locks on non-existing shards is not meaningful but still we do not throw error as we might be in the middle
|
||||
-- of metadata syncing. We just do not acquire the locks
|
||||
BEGIN;
|
||||
SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
|
||||
SELECT lock_shard_resources(3, ARRAY[123456871]);
|
||||
SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
|
||||
COMMIT;
|
||||
|
||||
|
||||
\c - postgres - :master_port;
|
||||
SET search_path TO "Mx Super User";
|
||||
SET client_min_messages TO ERROR;
|
||||
|
||||
-- now allow users to do UPDATE on the tables
|
||||
GRANT UPDATE ON TABLE super_user_owned_regular_user_granted TO regular_mx_user;
|
||||
SELECT 1 FROM run_command_on_workers($$GRANT UPDATE ON TABLE "Mx Super User".super_user_owned_regular_user_granted TO regular_mx_user;$$);
|
||||
SELECT 1 FROM run_command_on_placements('super_user_owned_regular_user_granted', $$GRANT UPDATE ON TABLE %s TO regular_mx_user;$$);
|
||||
|
||||
\c - regular_mx_user - :master_port
|
||||
SET search_path TO "Mx Super User";
|
||||
|
||||
UPDATE super_user_owned_regular_user_granted SET b = 1;
|
||||
|
||||
-- AccessExclusiveLock == 8 is strictly forbidden for any user
|
||||
-- even after UPDATE is allowed
|
||||
SELECT lock_shard_resources(8, ARRAY[2980000]);
|
||||
|
||||
\c - postgres - :master_port;
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA "Mx Super User" CASCADE;
|
||||
|
||||
\c - postgres - :worker_1_port;
|
||||
SET client_min_messages TO ERROR;
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE SCHEMA "Mx Regular User";
|
||||
CREATE USER regular_mx_user WITH LOGIN;
|
||||
GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user;
|
||||
|
||||
\c - postgres - :worker_2_port;
|
||||
SET client_min_messages TO ERROR;
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE SCHEMA "Mx Regular User";
|
||||
CREATE USER regular_mx_user WITH LOGIN;
|
||||
GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user;
|
||||
|
||||
-- now connect with that user
|
||||
|
|
|
@ -94,7 +94,7 @@ SELECT create_distributed_table('my_table', 'a');
|
|||
|
||||
CREATE TABLE test_table(a int, b tsvector);
|
||||
SELECT create_distributed_table('test_table', 'a');
|
||||
-- we currently don't support this
|
||||
-- operator class options are supported
|
||||
CREATE INDEX test_table_index ON test_table USING gist (b tsvector_ops(siglen = 100));
|
||||
|
||||
-- testing WAL
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue