Merge branch 'master' into component_governance_config

pull/4283/head
SaitTalhaNisanci 2020-11-19 13:16:01 +03:00 committed by GitHub
commit 3dca29a4c3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
180 changed files with 28715 additions and 2421 deletions

View File

@ -199,6 +199,37 @@ jobs:
flags: 'test_11,follower-cluster' flags: 'test_11,follower-cluster'
- store_artifacts: - store_artifacts:
path: '/tmp/core_dumps' path: '/tmp/core_dumps'
test-11_check-columnar:
docker:
- image: 'citus/exttester:11.9'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- run:
name: 'Install and Test (check-columnar)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-columnar'
no_output_timeout: 2m
- codecov/upload:
flags: 'test_11,columnar'
test-11_check-columnar-isolation:
docker:
- image: 'citus/exttester:11.9'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- run:
name: 'Install and Test (check-columnar-isolation)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-columnar-isolation'
no_output_timeout: 2m
- codecov/upload:
flags: 'test_11,columnar,isolation'
test-11_check-failure: test-11_check-failure:
docker: docker:
- image: 'citus/failtester:11.9' - image: 'citus/failtester:11.9'
@ -337,6 +368,36 @@ jobs:
- store_artifacts: - store_artifacts:
path: '/tmp/core_dumps' path: '/tmp/core_dumps'
test-12_check-columnar:
docker:
- image: 'citus/exttester:12.4'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- run:
name: 'Install and Test (check-columnar)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-columnar'
no_output_timeout: 2m
- codecov/upload:
flags: 'test_12,columnar'
test-12_check-columnar-isolation:
docker:
- image: 'citus/exttester:12.4'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- run:
name: 'Install and Test (check-columnar-isolation)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-columnar-isolation'
no_output_timeout: 2m
- codecov/upload:
flags: 'test_12,columnar,isolation'
test-12_check-failure: test-12_check-failure:
docker: docker:
- image: 'citus/failtester:12.4' - image: 'citus/failtester:12.4'
@ -473,6 +534,36 @@ jobs:
- store_artifacts: - store_artifacts:
path: '/tmp/core_dumps' path: '/tmp/core_dumps'
test-13_check-columnar:
docker:
- image: 'citus/exttester:13.0'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- run:
name: 'Install and Test (check-columnar)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-columnar'
no_output_timeout: 2m
- codecov/upload:
flags: 'test_13,columnar'
test-13_check-columnar-isolation:
docker:
- image: 'citus/exttester:13.0'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- run:
name: 'Install and Test (check-columnar-isolation)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-columnar-isolation'
no_output_timeout: 2m
- codecov/upload:
flags: 'test_13,columnar,isolation'
test-13_check-failure: test-13_check-failure:
docker: docker:
- image: 'citus/failtester:13.0' - image: 'citus/failtester:13.0'
@ -556,6 +647,10 @@ workflows:
requires: [build-11] requires: [build-11]
- test-11_check-follower-cluster: - test-11_check-follower-cluster:
requires: [build-11] requires: [build-11]
- test-11_check-columnar:
requires: [build-11]
- test-11_check-columnar-isolation:
requires: [build-11]
- test-11_check-failure: - test-11_check-failure:
requires: [build-11] requires: [build-11]
@ -571,6 +666,10 @@ workflows:
requires: [build-12] requires: [build-12]
- test-12_check-follower-cluster: - test-12_check-follower-cluster:
requires: [build-12] requires: [build-12]
- test-12_check-columnar:
requires: [build-12]
- test-12_check-columnar-isolation:
requires: [build-12]
- test-12_check-failure: - test-12_check-failure:
requires: [build-12] requires: [build-12]
@ -586,6 +685,10 @@ workflows:
requires: [build-13] requires: [build-13]
- test-13_check-follower-cluster: - test-13_check-follower-cluster:
requires: [build-13] requires: [build-13]
- test-13_check-columnar:
requires: [build-13]
- test-13_check-columnar-isolation:
requires: [build-13]
- test-13_check-failure: - test-13_check-failure:
requires: [build-13] requires: [build-13]

View File

@ -1,3 +1,95 @@
### citus v9.5.0 (November 10, 2020) ###
* Adds support for PostgreSQL 13
* Removes the task-tracker executor
* Introduces citus local tables
* Introduces `undistribute_table` UDF to convert tables back to postgres tables
* Adds support for `EXPLAIN (ANALYZE) EXECUTE` and `EXPLAIN EXECUTE`
* Adds support for `EXPLAIN (ANALYZE, WAL)` for PG13
* Sorts the output of `EXPLAIN (ANALYZE)` by execution duration.
* Adds support for CREATE TABLE ... USING table_access_method
* Adds support for `WITH TIES` option in SELECT and INSERT SELECT queries
* Avoids taking multi-shard locks on workers
* Enforces `citus.max_shared_pool_size` config in COPY queries
* Enables custom aggregates with multiple parameters to be executed on workers
* Enforces `citus.max_intermediate_result_size` in local execution
* Improves cost estimation of INSERT SELECT plans
* Introduces delegation of procedures that read from reference tables
* Prevents pull-push execution for simple pushdownable subqueries
* Improves error message when creating a foreign key to a local table
* Makes `citus_prepare_pg_upgrade` idempotent by dropping transition tables
* Disallows `ON TRUE` outer joins with reference & distributed tables when
reference table is outer relation to avoid incorrect results
* Disallows field indirection in INSERT/UPDATE queries to avoid incorrect
results
* Disallows volatile functions in UPDATE subqueries to avoid incorrect results
* Fixes CREATE INDEX CONCURRENTLY crash with local execution
* Fixes `citus_finish_pg_upgrade` to drop all backup tables
* Fixes a bug that cause failures when `RECURSIVE VIEW` joined reference table
* Fixes DROP SEQUENCE failures when metadata syncing is enabled
* Fixes a bug that caused CREATE TABLE with CHECK constraint to fail
* Fixes a bug that could cause VACUUM to deadlock
* Fixes master_update_node failure when no background worker slots are available
* Fixes a bug that caused replica identity to not be propagated on shard repair
* Fixes a bug that could cause crashes after connection timeouts
* Fixes a bug that could cause crashes with certain compile flags
* Fixes a bug that could cause deadlocks on CREATE INDEX
* Fixes a bug with genetic query optimization in outer joins
* Fixes a crash when aggregating empty tables
* Fixes a crash with inserting domain constrained composite types
* Fixes a crash with multi-row & router INSERT's in local execution
* Fixes a possibility of doing temporary file cleanup more than once
* Fixes incorrect setting of join related fields
* Fixes memory issues around deparsing index commands
* Fixes reference table access tracking for sequential execution
* Fixes removal of a single node with only reference tables
* Fixes sending commands to coordinator when it is added as a worker
* Fixes write queries with const expressions and COLLATE in various places
* Fixes wrong cancellation message about distributed deadlock
### citus v9.4.2 (October 21, 2020) ### ### citus v9.4.2 (October 21, 2020) ###
* Fixes a bug that could lead to multiple maintenance daemons * Fixes a bug that could lead to multiple maintenance daemons

View File

@ -92,5 +92,7 @@ endif
override CPPFLAGS := @CPPFLAGS@ @CITUS_CPPFLAGS@ -I '${citus_abs_top_srcdir}/src/include' -I'${citus_top_builddir}/src/include' $(CPPFLAGS) override CPPFLAGS := @CPPFLAGS@ @CITUS_CPPFLAGS@ -I '${citus_abs_top_srcdir}/src/include' -I'${citus_top_builddir}/src/include' $(CPPFLAGS)
override LDFLAGS += @LDFLAGS@ @CITUS_LDFLAGS@ override LDFLAGS += @LDFLAGS@ @CITUS_LDFLAGS@
HAS_TABLEAM:=@HAS_TABLEAM@
# optional file with user defined, additional, rules # optional file with user defined, additional, rules
-include ${citus_abs_srcdir}/src/Makefile.custom -include ${citus_abs_srcdir}/src/Makefile.custom

View File

@ -4,7 +4,7 @@ set -euo pipefail
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source ci/ci_helpers.sh source ci/ci_helpers.sh
for udf_dir in src/backend/distributed/sql/udfs/*; do for udf_dir in src/backend/distributed/sql/udfs/* src/backend/columnar/sql/udfs/*; do
# We want to find the last snapshotted sql file, to make sure it's the same # We want to find the last snapshotted sql file, to make sure it's the same
# as "latest.sql". This is done by: # as "latest.sql". This is done by:
# 1. Getting the filenames in the UDF directory (using find instead of ls, to keep shellcheck happy) # 1. Getting the filenames in the UDF directory (using find instead of ls, to keep shellcheck happy)

31
configure vendored
View File

@ -1,6 +1,6 @@
#! /bin/sh #! /bin/sh
# Guess values for system-dependent variables and create Makefiles. # Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for Citus 9.5devel. # Generated by GNU Autoconf 2.69 for Citus 10.0devel.
# #
# #
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package. # Identity of this package.
PACKAGE_NAME='Citus' PACKAGE_NAME='Citus'
PACKAGE_TARNAME='citus' PACKAGE_TARNAME='citus'
PACKAGE_VERSION='9.5devel' PACKAGE_VERSION='10.0devel'
PACKAGE_STRING='Citus 9.5devel' PACKAGE_STRING='Citus 10.0devel'
PACKAGE_BUGREPORT='' PACKAGE_BUGREPORT=''
PACKAGE_URL='' PACKAGE_URL=''
@ -622,6 +622,7 @@ ac_includes_default="\
ac_subst_vars='LTLIBOBJS ac_subst_vars='LTLIBOBJS
LIBOBJS LIBOBJS
HAS_TABLEAM
HAS_DOTGIT HAS_DOTGIT
POSTGRES_BUILDDIR POSTGRES_BUILDDIR
POSTGRES_SRCDIR POSTGRES_SRCDIR
@ -1242,7 +1243,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing. # Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh. # This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF cat <<_ACEOF
\`configure' configures Citus 9.5devel to adapt to many kinds of systems. \`configure' configures Citus 10.0devel to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]... Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1303,7 +1304,7 @@ fi
if test -n "$ac_init_help"; then if test -n "$ac_init_help"; then
case $ac_init_help in case $ac_init_help in
short | recursive ) echo "Configuration of Citus 9.5devel:";; short | recursive ) echo "Configuration of Citus 10.0devel:";;
esac esac
cat <<\_ACEOF cat <<\_ACEOF
@ -1403,7 +1404,7 @@ fi
test -n "$ac_init_help" && exit $ac_status test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then if $ac_init_version; then
cat <<\_ACEOF cat <<\_ACEOF
Citus configure 9.5devel Citus configure 10.0devel
generated by GNU Autoconf 2.69 generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc. Copyright (C) 2012 Free Software Foundation, Inc.
@ -1886,7 +1887,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake. running configure, to aid debugging if configure makes a mistake.
It was created by Citus $as_me 9.5devel, which was It was created by Citus $as_me 10.0devel, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@ $ $0 $@
@ -4468,6 +4469,16 @@ cat >>confdefs.h <<_ACEOF
_ACEOF _ACEOF
if test "$version_num" != '11'; then
HAS_TABLEAM=yes
$as_echo "#define HAS_TABLEAM 1" >>confdefs.h
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: postgres version does not support table access methodds" >&5
$as_echo "$as_me: postgres version does not support table access methodds" >&6;}
fi;
# Check if git is installed, when installed the gitref of the checkout will be baked in the application # Check if git is installed, when installed the gitref of the checkout will be baked in the application
# Extract the first word of "git", so it can be a program name with args. # Extract the first word of "git", so it can be a program name with args.
set dummy git; ac_word=$2 set dummy git; ac_word=$2
@ -4543,6 +4554,8 @@ POSTGRES_BUILDDIR="$POSTGRES_BUILDDIR"
HAS_DOTGIT="$HAS_DOTGIT" HAS_DOTGIT="$HAS_DOTGIT"
HAS_TABLEAM="$HAS_TABLEAM"
ac_config_files="$ac_config_files Makefile.global" ac_config_files="$ac_config_files Makefile.global"
@ -5055,7 +5068,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their # report actual input values of CONFIG_FILES etc. instead of their
# values after options handling. # values after options handling.
ac_log=" ac_log="
This file was extended by Citus $as_me 9.5devel, which was This file was extended by Citus $as_me 10.0devel, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES CONFIG_FILES = $CONFIG_FILES
@ -5117,7 +5130,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\ ac_cs_version="\\
Citus config.status 9.5devel Citus config.status 10.0devel
configured by $0, generated by GNU Autoconf 2.69, configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\" with options \\"\$ac_cs_config\\"

View File

@ -5,7 +5,7 @@
# everyone needing autoconf installed, the resulting files are checked # everyone needing autoconf installed, the resulting files are checked
# into the SCM. # into the SCM.
AC_INIT([Citus], [9.5devel]) AC_INIT([Citus], [10.0devel])
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.]) AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
# we'll need sed and awk for some of the version commands # we'll need sed and awk for some of the version commands
@ -212,6 +212,13 @@ PGAC_ARG_REQ(with, reports-hostname, [HOSTNAME],
AC_DEFINE_UNQUOTED(REPORTS_BASE_URL, "$REPORTS_BASE_URL", AC_DEFINE_UNQUOTED(REPORTS_BASE_URL, "$REPORTS_BASE_URL",
[Base URL for statistics collection and update checks]) [Base URL for statistics collection and update checks])
if test "$version_num" != '11'; then
HAS_TABLEAM=yes
AC_DEFINE([HAS_TABLEAM], 1, [Define to 1 to build with table access method support, pg12 and up])
else
AC_MSG_NOTICE([postgres version does not support table access methodds])
fi;
# Check if git is installed, when installed the gitref of the checkout will be baked in the application # Check if git is installed, when installed the gitref of the checkout will be baked in the application
AC_PATH_PROG(GIT_BIN, git) AC_PATH_PROG(GIT_BIN, git)
AC_CHECK_FILE(.git,[HAS_DOTGIT=yes], [HAS_DOTGIT=]) AC_CHECK_FILE(.git,[HAS_DOTGIT=yes], [HAS_DOTGIT=])
@ -222,6 +229,7 @@ AC_SUBST(CITUS_LDFLAGS, "$LIBS $CITUS_LDFLAGS")
AC_SUBST(POSTGRES_SRCDIR, "$POSTGRES_SRCDIR") AC_SUBST(POSTGRES_SRCDIR, "$POSTGRES_SRCDIR")
AC_SUBST(POSTGRES_BUILDDIR, "$POSTGRES_BUILDDIR") AC_SUBST(POSTGRES_BUILDDIR, "$POSTGRES_BUILDDIR")
AC_SUBST(HAS_DOTGIT, "$HAS_DOTGIT") AC_SUBST(HAS_DOTGIT, "$HAS_DOTGIT")
AC_SUBST(HAS_TABLEAM, "$HAS_TABLEAM")
AC_CONFIG_FILES([Makefile.global]) AC_CONFIG_FILES([Makefile.global])
AC_CONFIG_HEADERS([src/include/citus_config.h] [src/include/citus_version.h]) AC_CONFIG_HEADERS([src/include/citus_config.h] [src/include/citus_version.h])

26
src/backend/columnar/.gitattributes vendored Normal file
View File

@ -0,0 +1,26 @@
* whitespace=space-before-tab,trailing-space
*.[chly] whitespace=space-before-tab,trailing-space,indent-with-non-tab,tabwidth=4
*.dsl whitespace=space-before-tab,trailing-space,tab-in-indent
*.patch -whitespace
*.pl whitespace=space-before-tab,trailing-space,tabwidth=4
*.po whitespace=space-before-tab,trailing-space,tab-in-indent,-blank-at-eof
*.sgml whitespace=space-before-tab,trailing-space,tab-in-indent,-blank-at-eol
*.x[ms]l whitespace=space-before-tab,trailing-space,tab-in-indent
# Avoid confusing ASCII underlines with leftover merge conflict markers
README conflict-marker-size=32
README.* conflict-marker-size=32
# Certain data files that contain special whitespace, and other special cases
*.data -whitespace
# Test output files that contain extra whitespace
*.out -whitespace
src/test/regress/output/*.source -whitespace
# These files are maintained or generated elsewhere. We take them as is.
configure -whitespace
# all C files (implementation and header) use our style...
*.[ch] citus-style

68
src/backend/columnar/.gitignore vendored Normal file
View File

@ -0,0 +1,68 @@
# =====
# = C =
# =====
# Object files
*.o
*.ko
*.obj
*.elf
*.bc
# Libraries
*.lib
*.a
# Shared objects (inc. Windows DLLs)
*.dll
*.so
*.so.*
*.dylib
# Executables
*.exe
*.app
*.i*86
*.x86_64
*.hex
# ========
# = Gcov =
# ========
# gcc coverage testing tool files
*.gcno
*.gcda
*.gcov
# ====================
# = Project-Specific =
# ====================
/data/*.cstore
/data/*.footer
/sql/*block_filtering.sql
/sql/*copyto.sql
/sql/*create.sql
/sql/*data_types.sql
/sql/*load.sql
/expected/*block_filtering.out
/expected/*copyto.out
/expected/*create.out
/expected/*data_types.out
/expected/*load.out
/results/*
/.deps/*
/regression.diffs
/regression.out
.vscode
*.pb-c.*
# ignore files that could be created by circleci automation
files.lst
install-*.tar
install-*/

View File

@ -0,0 +1,101 @@
/*-------------------------------------------------------------------------
*
* cstore.c
*
* This file contains...
*
* Copyright (c) 2016, Citus Data, Inc.
*
* $Id$
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <sys/stat.h>
#include <unistd.h>
#include "miscadmin.h"
#include "utils/guc.h"
#include "utils/rel.h"
#include "columnar/cstore.h"
/* Default values for option parameters */
#define DEFAULT_COMPRESSION_TYPE COMPRESSION_NONE
#define DEFAULT_STRIPE_ROW_COUNT 150000
#define DEFAULT_BLOCK_ROW_COUNT 10000
int cstore_compression = DEFAULT_COMPRESSION_TYPE;
int cstore_stripe_row_count = DEFAULT_STRIPE_ROW_COUNT;
int cstore_block_row_count = DEFAULT_BLOCK_ROW_COUNT;
static const struct config_enum_entry cstore_compression_options[] =
{
{ "none", COMPRESSION_NONE, false },
{ "pglz", COMPRESSION_PG_LZ, false },
{ NULL, 0, false }
};
void
cstore_init()
{
DefineCustomEnumVariable("cstore.compression",
"Compression type for cstore.",
NULL,
&cstore_compression,
DEFAULT_COMPRESSION_TYPE,
cstore_compression_options,
PGC_USERSET,
0,
NULL,
NULL,
NULL);
DefineCustomIntVariable("cstore.stripe_row_count",
"Maximum number of tuples per stripe.",
NULL,
&cstore_stripe_row_count,
DEFAULT_STRIPE_ROW_COUNT,
STRIPE_ROW_COUNT_MINIMUM,
STRIPE_ROW_COUNT_MAXIMUM,
PGC_USERSET,
0,
NULL,
NULL,
NULL);
DefineCustomIntVariable("cstore.block_row_count",
"Maximum number of rows per block.",
NULL,
&cstore_block_row_count,
DEFAULT_BLOCK_ROW_COUNT,
BLOCK_ROW_COUNT_MINIMUM,
BLOCK_ROW_COUNT_MAXIMUM,
PGC_USERSET,
0,
NULL,
NULL,
NULL);
}
/* ParseCompressionType converts a string to a compression type. */
CompressionType
ParseCompressionType(const char *compressionTypeString)
{
CompressionType compressionType = COMPRESSION_TYPE_INVALID;
Assert(compressionTypeString != NULL);
if (strncmp(compressionTypeString, COMPRESSION_STRING_NONE, NAMEDATALEN) == 0)
{
compressionType = COMPRESSION_NONE;
}
else if (strncmp(compressionTypeString, COMPRESSION_STRING_PG_LZ, NAMEDATALEN) == 0)
{
compressionType = COMPRESSION_PG_LZ;
}
return compressionType;
}

View File

@ -0,0 +1,196 @@
/*-------------------------------------------------------------------------
*
* cstore_compression.c
*
* This file contains compression/decompression functions definitions
* used in cstore_fdw.
*
* Copyright (c) 2016, Citus Data, Inc.
*
* $Id$
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#if PG_VERSION_NUM >= 90500
#include "common/pg_lzcompress.h"
#else
#include "utils/pg_lzcompress.h"
#endif
#include "columnar/cstore.h"
#if PG_VERSION_NUM >= 90500
/*
* The information at the start of the compressed data. This decription is taken
* from pg_lzcompress in pre-9.5 version of PostgreSQL.
*/
typedef struct CStoreCompressHeader
{
int32 vl_len_; /* varlena header (do not touch directly!) */
int32 rawsize;
} CStoreCompressHeader;
/*
* Utilities for manipulation of header information for compressed data
*/
#define CSTORE_COMPRESS_HDRSZ ((int32) sizeof(CStoreCompressHeader))
#define CSTORE_COMPRESS_RAWSIZE(ptr) (((CStoreCompressHeader *) (ptr))->rawsize)
#define CSTORE_COMPRESS_RAWDATA(ptr) (((char *) (ptr)) + CSTORE_COMPRESS_HDRSZ)
#define CSTORE_COMPRESS_SET_RAWSIZE(ptr, len) (((CStoreCompressHeader *) (ptr))->rawsize = \
(len))
#else
#define CSTORE_COMPRESS_HDRSZ (0)
#define CSTORE_COMPRESS_RAWSIZE(ptr) (PGLZ_RAW_SIZE((PGLZ_Header *) buffer->data))
#define CSTORE_COMPRESS_RAWDATA(ptr) (((PGLZ_Header *) (ptr)))
#define CSTORE_COMPRESS_SET_RAWSIZE(ptr, len) (((CStoreCompressHeader *) (ptr))->rawsize = \
(len))
#endif
/*
* CompressBuffer compresses the given buffer with the given compression type
* outputBuffer enlarged to contain compressed data. The function returns true
* if compression is done, returns false if compression is not done.
* outputBuffer is valid only if the function returns true.
*/
bool
CompressBuffer(StringInfo inputBuffer, StringInfo outputBuffer,
CompressionType compressionType)
{
uint64 maximumLength = PGLZ_MAX_OUTPUT(inputBuffer->len) + CSTORE_COMPRESS_HDRSZ;
bool compressionResult = false;
#if PG_VERSION_NUM >= 90500
int32 compressedByteCount = 0;
#endif
if (compressionType != COMPRESSION_PG_LZ)
{
return false;
}
resetStringInfo(outputBuffer);
enlargeStringInfo(outputBuffer, maximumLength);
#if PG_VERSION_NUM >= 90500
compressedByteCount = pglz_compress((const char *) inputBuffer->data,
inputBuffer->len,
CSTORE_COMPRESS_RAWDATA(outputBuffer->data),
PGLZ_strategy_always);
if (compressedByteCount >= 0)
{
CSTORE_COMPRESS_SET_RAWSIZE(outputBuffer->data, inputBuffer->len);
SET_VARSIZE_COMPRESSED(outputBuffer->data,
compressedByteCount + CSTORE_COMPRESS_HDRSZ);
compressionResult = true;
}
#else
compressionResult = pglz_compress(inputBuffer->data, inputBuffer->len,
CSTORE_COMPRESS_RAWDATA(outputBuffer->data),
PGLZ_strategy_always);
#endif
if (compressionResult)
{
outputBuffer->len = VARSIZE(outputBuffer->data);
}
return compressionResult;
}
/*
* DecompressBuffer decompresses the given buffer with the given compression
* type. This function returns the buffer as-is when no compression is applied.
*/
StringInfo
DecompressBuffer(StringInfo buffer, CompressionType compressionType)
{
StringInfo decompressedBuffer = NULL;
Assert(compressionType == COMPRESSION_NONE || compressionType == COMPRESSION_PG_LZ);
if (compressionType == COMPRESSION_NONE)
{
/* in case of no compression, return buffer */
decompressedBuffer = buffer;
}
else if (compressionType == COMPRESSION_PG_LZ)
{
uint32 compressedDataSize = VARSIZE(buffer->data) - CSTORE_COMPRESS_HDRSZ;
uint32 decompressedDataSize = CSTORE_COMPRESS_RAWSIZE(buffer->data);
char *decompressedData = NULL;
#if PG_VERSION_NUM >= 90500
int32 decompressedByteCount = 0;
#endif
if (compressedDataSize + CSTORE_COMPRESS_HDRSZ != buffer->len)
{
ereport(ERROR, (errmsg("cannot decompress the buffer"),
errdetail("Expected %u bytes, but received %u bytes",
compressedDataSize, buffer->len)));
}
decompressedData = palloc0(decompressedDataSize);
#if PG_VERSION_NUM >= 90500
#if PG_VERSION_NUM >= 120000
decompressedByteCount = pglz_decompress(CSTORE_COMPRESS_RAWDATA(buffer->data),
compressedDataSize, decompressedData,
decompressedDataSize, true);
#else
decompressedByteCount = pglz_decompress(CSTORE_COMPRESS_RAWDATA(buffer->data),
compressedDataSize, decompressedData,
decompressedDataSize);
#endif
if (decompressedByteCount < 0)
{
ereport(ERROR, (errmsg("cannot decompress the buffer"),
errdetail("compressed data is corrupted")));
}
#else
pglz_decompress((PGLZ_Header *) buffer->data, decompressedData);
#endif
decompressedBuffer = palloc0(sizeof(StringInfoData));
decompressedBuffer->data = decompressedData;
decompressedBuffer->len = decompressedDataSize;
decompressedBuffer->maxlen = decompressedDataSize;
}
return decompressedBuffer;
}
/*
* CompressionTypeStr returns string representation of a compression type.
*/
char *
CompressionTypeStr(CompressionType type)
{
switch (type)
{
case COMPRESSION_NONE:
{
return "none";
}
case COMPRESSION_PG_LZ:
{
return "pglz";
}
default:
return "unknown";
}
}

View File

@ -0,0 +1,429 @@
/*-------------------------------------------------------------------------
*
* cstore_customscan.c
*
* This file contains the implementation of a postgres custom scan that
* we use to push down the projections into the table access methods.
*
* $Id$
*
*-------------------------------------------------------------------------
*/
#include "citus_version.h"
#if HAS_TABLEAM
#include "postgres.h"
#include "access/skey.h"
#include "nodes/extensible.h"
#include "nodes/pg_list.h"
#include "nodes/plannodes.h"
#include "optimizer/optimizer.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/restrictinfo.h"
#include "utils/relcache.h"
#include "columnar/cstore.h"
#include "columnar/cstore_customscan.h"
#include "columnar/cstore_tableam.h"
typedef struct CStoreScanPath
{
CustomPath custom_path;
/* place for local state during planning */
} CStoreScanPath;
typedef struct CStoreScanScan
{
CustomScan custom_scan;
/* place for local state during execution */
} CStoreScanScan;
typedef struct CStoreScanState
{
CustomScanState custom_scanstate;
List *qual;
} CStoreScanState;
static void CStoreSetRelPathlistHook(PlannerInfo *root, RelOptInfo *rel, Index rti,
RangeTblEntry *rte);
static Path * CreateCStoreScanPath(RelOptInfo *rel, RangeTblEntry *rte);
static Cost CStoreScanCost(RangeTblEntry *rte);
static Plan * CStoreScanPath_PlanCustomPath(PlannerInfo *root,
RelOptInfo *rel,
struct CustomPath *best_path,
List *tlist,
List *clauses,
List *custom_plans);
static Node * CStoreScan_CreateCustomScanState(CustomScan *cscan);
static void CStoreScan_BeginCustomScan(CustomScanState *node, EState *estate, int eflags);
static TupleTableSlot * CStoreScan_ExecCustomScan(CustomScanState *node);
static void CStoreScan_EndCustomScan(CustomScanState *node);
static void CStoreScan_ReScanCustomScan(CustomScanState *node);
/* saved hook value in case of unload */
static set_rel_pathlist_hook_type PreviousSetRelPathlistHook = NULL;
static bool EnableCStoreCustomScan = true;
const struct CustomPathMethods CStoreScanPathMethods = {
.CustomName = "CStoreScan",
.PlanCustomPath = CStoreScanPath_PlanCustomPath,
};
const struct CustomScanMethods CStoreScanScanMethods = {
.CustomName = "CStoreScan",
.CreateCustomScanState = CStoreScan_CreateCustomScanState,
};
const struct CustomExecMethods CStoreExecuteMethods = {
.CustomName = "CStoreScan",
.BeginCustomScan = CStoreScan_BeginCustomScan,
.ExecCustomScan = CStoreScan_ExecCustomScan,
.EndCustomScan = CStoreScan_EndCustomScan,
.ReScanCustomScan = CStoreScan_ReScanCustomScan,
.ExplainCustomScan = NULL,
};
/*
* cstore_customscan_init installs the hook required to intercept the postgres planner and
* provide extra paths for cstore tables
*/
void
cstore_customscan_init()
{
PreviousSetRelPathlistHook = set_rel_pathlist_hook;
set_rel_pathlist_hook = CStoreSetRelPathlistHook;
/* register customscan specific GUC's */
DefineCustomBoolVariable(
"cstore.enable_custom_scan",
gettext_noop("Enables the use of a custom scan to push projections and quals "
"into the storage layer"),
NULL,
&EnableCStoreCustomScan,
true,
PGC_USERSET,
GUC_NO_SHOW_ALL,
NULL, NULL, NULL);
}
static void
clear_paths(RelOptInfo *rel)
{
rel->pathlist = NULL;
rel->partial_pathlist = NULL;
rel->cheapest_startup_path = NULL;
rel->cheapest_total_path = NULL;
rel->cheapest_unique_path = NULL;
}
static void
CStoreSetRelPathlistHook(PlannerInfo *root, RelOptInfo *rel, Index rti,
RangeTblEntry *rte)
{
/* call into previous hook if assigned */
if (PreviousSetRelPathlistHook)
{
PreviousSetRelPathlistHook(root, rel, rti, rte);
}
if (!EnableCStoreCustomScan)
{
/* custon scans are disabled, use normal table access method api instead */
return;
}
if (!OidIsValid(rte->relid) || rte->rtekind != RTE_RELATION)
{
/* some calls to the pathlist hook don't have a valid relation set. Do nothing */
return;
}
/*
* Here we want to inspect if this relation pathlist hook is accessing a cstore table.
* If that is the case we want to insert an extra path that pushes down the projection
* into the scan of the table to minimize the data read.
*/
Relation relation = RelationIdGetRelation(rte->relid);
if (relation->rd_tableam == GetCstoreTableAmRoutine())
{
Path *customPath = CreateCStoreScanPath(rel, rte);
ereport(DEBUG1, (errmsg("pathlist hook for cstore table am")));
/* we propose a new path that will be the only path for scanning this relation */
clear_paths(rel);
add_path(rel, customPath);
}
RelationClose(relation);
}
static Path *
CreateCStoreScanPath(RelOptInfo *rel, RangeTblEntry *rte)
{
CStoreScanPath *cspath = (CStoreScanPath *) newNode(sizeof(CStoreScanPath),
T_CustomPath);
/*
* popuate custom path information
*/
CustomPath *cpath = &cspath->custom_path;
cpath->methods = &CStoreScanPathMethods;
/*
* populate generic path information
*/
Path *path = &cpath->path;
path->pathtype = T_CustomScan;
path->parent = rel;
path->pathtarget = rel->reltarget;
/*
* Add cost estimates for a cstore table scan, row count is the rows estimated by
* postgres' planner.
*/
path->rows = rel->rows;
path->startup_cost = 0;
path->total_cost = path->startup_cost + CStoreScanCost(rte);
return (Path *) cspath;
}
/*
* CStoreScanCost calculates the cost of scanning the cstore table. The cost is estimated
* by using all stripe metadata to estimate based on the columns to read how many pages
* need to be read.
*/
static Cost
CStoreScanCost(RangeTblEntry *rte)
{
Relation rel = RelationIdGetRelation(rte->relid);
DataFileMetadata *metadata = ReadDataFileMetadata(rel->rd_node.relNode, false);
uint32 maxColumnCount = 0;
uint64 totalStripeSize = 0;
ListCell *stripeMetadataCell = NULL;
RelationClose(rel);
rel = NULL;
foreach(stripeMetadataCell, metadata->stripeMetadataList)
{
StripeMetadata *stripeMetadata = (StripeMetadata *) lfirst(stripeMetadataCell);
totalStripeSize += stripeMetadata->dataLength;
maxColumnCount = Max(maxColumnCount, stripeMetadata->columnCount);
}
{
Bitmapset *attr_needed = rte->selectedCols;
double numberOfColumnsRead = bms_num_members(attr_needed);
double selectionRatio = numberOfColumnsRead / (double) maxColumnCount;
Cost scanCost = (double) totalStripeSize / BLCKSZ * selectionRatio;
return scanCost;
}
}
static Plan *
CStoreScanPath_PlanCustomPath(PlannerInfo *root,
RelOptInfo *rel,
struct CustomPath *best_path,
List *tlist,
List *clauses,
List *custom_plans)
{
CStoreScanScan *plan = (CStoreScanScan *) newNode(sizeof(CStoreScanScan),
T_CustomScan);
CustomScan *cscan = &plan->custom_scan;
cscan->methods = &CStoreScanScanMethods;
/* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
clauses = extract_actual_clauses(clauses, false);
cscan->scan.plan.targetlist = list_copy(tlist);
cscan->scan.plan.qual = clauses;
cscan->scan.scanrelid = best_path->path.parent->relid;
return (Plan *) plan;
}
static Node *
CStoreScan_CreateCustomScanState(CustomScan *cscan)
{
CStoreScanState *cstorescanstate = (CStoreScanState *) newNode(
sizeof(CStoreScanState), T_CustomScanState);
CustomScanState *cscanstate = &cstorescanstate->custom_scanstate;
cscanstate->methods = &CStoreExecuteMethods;
cstorescanstate->qual = cscan->scan.plan.qual;
return (Node *) cscanstate;
}
static void
CStoreScan_BeginCustomScan(CustomScanState *cscanstate, EState *estate, int eflags)
{
/* scan slot is already initialized */
}
static Bitmapset *
CStoreAttrNeeded(ScanState *ss)
{
TupleTableSlot *slot = ss->ss_ScanTupleSlot;
int natts = slot->tts_tupleDescriptor->natts;
Bitmapset *attr_needed = NULL;
Plan *plan = ss->ps.plan;
int flags = PVC_RECURSE_AGGREGATES |
PVC_RECURSE_WINDOWFUNCS | PVC_RECURSE_PLACEHOLDERS;
List *vars = list_concat(pull_var_clause((Node *) plan->targetlist, flags),
pull_var_clause((Node *) plan->qual, flags));
ListCell *lc;
foreach(lc, vars)
{
Var *var = lfirst(lc);
if (var->varattno == 0)
{
elog(DEBUG1, "Need attribute: all");
/* all attributes are required, we don't need to add more so break*/
attr_needed = bms_add_range(attr_needed, 0, natts - 1);
break;
}
elog(DEBUG1, "Need attribute: %d", var->varattno);
attr_needed = bms_add_member(attr_needed, var->varattno - 1);
}
return attr_needed;
}
static TupleTableSlot *
CStoreScanNext(CStoreScanState *cstorescanstate)
{
CustomScanState *node = (CustomScanState *) cstorescanstate;
/*
* get information from the estate and scan state
*/
TableScanDesc scandesc = node->ss.ss_currentScanDesc;
EState *estate = node->ss.ps.state;
ScanDirection direction = estate->es_direction;
TupleTableSlot *slot = node->ss.ss_ScanTupleSlot;
if (scandesc == NULL)
{
/* the cstore access method does not use the flags, they are specific to heap */
uint32 flags = 0;
Bitmapset *attr_needed = CStoreAttrNeeded(&node->ss);
/*
* We reach here if the scan is not parallel, or if we're serially
* executing a scan that was planned to be parallel.
*/
scandesc = cstore_beginscan_extended(node->ss.ss_currentRelation,
estate->es_snapshot,
0, NULL, NULL, flags, attr_needed,
cstorescanstate->qual);
bms_free(attr_needed);
node->ss.ss_currentScanDesc = scandesc;
}
/*
* get the next tuple from the table
*/
if (table_scan_getnextslot(scandesc, direction, slot))
{
return slot;
}
return NULL;
}
/*
* SeqRecheck -- access method routine to recheck a tuple in EvalPlanQual
*/
static bool
CStoreScanRecheck(CStoreScanState *node, TupleTableSlot *slot)
{
return true;
}
static TupleTableSlot *
CStoreScan_ExecCustomScan(CustomScanState *node)
{
return ExecScan(&node->ss,
(ExecScanAccessMtd) CStoreScanNext,
(ExecScanRecheckMtd) CStoreScanRecheck);
}
static void
CStoreScan_EndCustomScan(CustomScanState *node)
{
/*
* get information from node
*/
TableScanDesc scanDesc = node->ss.ss_currentScanDesc;
/*
* Free the exprcontext
*/
ExecFreeExprContext(&node->ss.ps);
/*
* clean out the tuple table
*/
if (node->ss.ps.ps_ResultTupleSlot)
{
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
}
ExecClearTuple(node->ss.ss_ScanTupleSlot);
/*
* close heap scan
*/
if (scanDesc != NULL)
{
table_endscan(scanDesc);
}
}
static void
CStoreScan_ReScanCustomScan(CustomScanState *node)
{
TableScanDesc scanDesc = node->ss.ss_currentScanDesc;
if (scanDesc != NULL)
{
table_rescan(node->ss.ss_currentScanDesc, NULL);
}
}
#endif /* HAS_TABLEAM */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,998 @@
/*-------------------------------------------------------------------------
*
* cstore_metadata_tables.c
*
* Copyright (c), Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "safe_lib.h"
#include "columnar/cstore.h"
#include "columnar/cstore_version_compat.h"
#include <sys/stat.h>
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/nbtree.h"
#include "access/xact.h"
#include "catalog/indexing.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_type.h"
#include "catalog/namespace.h"
#include "commands/defrem.h"
#include "commands/trigger.h"
#include "executor/executor.h"
#include "executor/spi.h"
#include "miscadmin.h"
#include "nodes/execnodes.h"
#include "lib/stringinfo.h"
#include "port.h"
#include "storage/fd.h"
#include "storage/lmgr.h"
#include "storage/smgr.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/memutils.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
typedef struct
{
Relation rel;
EState *estate;
} ModifyState;
static void InsertStripeMetadataRow(Oid relfilenode, StripeMetadata *stripe);
static void GetHighestUsedAddressAndId(Oid relfilenode,
uint64 *highestUsedAddress,
uint64 *highestUsedId);
static List * ReadDataFileStripeList(Oid relfilenode, Snapshot snapshot);
static Oid CStoreStripesRelationId(void);
static Oid CStoreStripesIndexRelationId(void);
static Oid CStoreDataFilesRelationId(void);
static Oid CStoreDataFilesIndexRelationId(void);
static Oid CStoreSkipNodesRelationId(void);
static Oid CStoreSkipNodesIndexRelationId(void);
static Oid CStoreNamespaceId(void);
static bool ReadCStoreDataFiles(Oid relfilenode, DataFileMetadata *metadata);
static ModifyState * StartModifyRelation(Relation rel);
static void InsertTupleAndEnforceConstraints(ModifyState *state, Datum *values,
bool *nulls);
static void DeleteTupleAndEnforceConstraints(ModifyState *state, HeapTuple heapTuple);
static void FinishModifyRelation(ModifyState *state);
static EState * create_estate_for_relation(Relation rel);
static bytea * DatumToBytea(Datum value, Form_pg_attribute attrForm);
static Datum ByteaToDatum(bytea *bytes, Form_pg_attribute attrForm);
/* constants for cstore_table */
#define Natts_cstore_data_files 6
#define Anum_cstore_data_files_relfilenode 1
#define Anum_cstore_data_files_block_row_count 2
#define Anum_cstore_data_files_stripe_row_count 3
#define Anum_cstore_data_files_compression 4
#define Anum_cstore_data_files_version_major 5
#define Anum_cstore_data_files_version_minor 6
/* ----------------
* cstore.cstore_data_files definition.
* ----------------
*/
typedef struct FormData_cstore_data_files
{
Oid relfilenode;
int32 block_row_count;
int32 stripe_row_count;
NameData compression;
int64 version_major;
int64 version_minor;
#ifdef CATALOG_VARLEN /* variable-length fields start here */
#endif
} FormData_cstore_data_files;
typedef FormData_cstore_data_files *Form_cstore_data_files;
/* constants for cstore_stripe */
#define Natts_cstore_stripes 8
#define Anum_cstore_stripes_relfilenode 1
#define Anum_cstore_stripes_stripe 2
#define Anum_cstore_stripes_file_offset 3
#define Anum_cstore_stripes_data_length 4
#define Anum_cstore_stripes_column_count 5
#define Anum_cstore_stripes_block_count 6
#define Anum_cstore_stripes_block_row_count 7
#define Anum_cstore_stripes_row_count 8
/* constants for cstore_skipnodes */
#define Natts_cstore_skipnodes 12
#define Anum_cstore_skipnodes_relfilenode 1
#define Anum_cstore_skipnodes_stripe 2
#define Anum_cstore_skipnodes_attr 3
#define Anum_cstore_skipnodes_block 4
#define Anum_cstore_skipnodes_row_count 5
#define Anum_cstore_skipnodes_minimum_value 6
#define Anum_cstore_skipnodes_maximum_value 7
#define Anum_cstore_skipnodes_value_stream_offset 8
#define Anum_cstore_skipnodes_value_stream_length 9
#define Anum_cstore_skipnodes_exists_stream_offset 10
#define Anum_cstore_skipnodes_exists_stream_length 11
#define Anum_cstore_skipnodes_value_compression_type 12
/*
* InitCStoreDataFileMetadata adds a record for the given relfilenode
* in cstore_data_files.
*/
void
InitCStoreDataFileMetadata(Oid relfilenode, int blockRowCount, int stripeRowCount,
CompressionType compression)
{
NameData compressionName = { 0 };
bool nulls[Natts_cstore_data_files] = { 0 };
Datum values[Natts_cstore_data_files] = {
ObjectIdGetDatum(relfilenode),
Int32GetDatum(blockRowCount),
Int32GetDatum(stripeRowCount),
0, /* to be filled below */
Int32GetDatum(CSTORE_VERSION_MAJOR),
Int32GetDatum(CSTORE_VERSION_MINOR)
};
namestrcpy(&compressionName, CompressionTypeStr(compression));
values[Anum_cstore_data_files_compression - 1] = NameGetDatum(&compressionName);
DeleteDataFileMetadataRowIfExists(relfilenode);
Oid cstoreDataFilesOid = CStoreDataFilesRelationId();
Relation cstoreDataFiles = heap_open(cstoreDataFilesOid, RowExclusiveLock);
ModifyState *modifyState = StartModifyRelation(cstoreDataFiles);
InsertTupleAndEnforceConstraints(modifyState, values, nulls);
FinishModifyRelation(modifyState);
CommandCounterIncrement();
heap_close(cstoreDataFiles, NoLock);
}
void
UpdateCStoreDataFileMetadata(Oid relfilenode, int blockRowCount, int stripeRowCount,
CompressionType compression)
{
const int scanKeyCount = 1;
ScanKeyData scanKey[1];
bool indexOK = true;
Datum values[Natts_cstore_data_files] = { 0 };
bool isnull[Natts_cstore_data_files] = { 0 };
bool replace[Natts_cstore_data_files] = { 0 };
bool changed = false;
Relation cstoreDataFiles = heap_open(CStoreDataFilesRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(cstoreDataFiles);
ScanKeyInit(&scanKey[0], Anum_cstore_data_files_relfilenode, BTEqualStrategyNumber,
F_INT8EQ, ObjectIdGetDatum(relfilenode));
SysScanDesc scanDescriptor = systable_beginscan(cstoreDataFiles,
CStoreDataFilesIndexRelationId(),
indexOK,
NULL, scanKeyCount, scanKey);
HeapTuple heapTuple = systable_getnext(scanDescriptor);
if (heapTuple == NULL)
{
ereport(ERROR, (errmsg("relfilenode %d doesn't belong to a cstore table",
relfilenode)));
}
Form_cstore_data_files metadata = (Form_cstore_data_files) GETSTRUCT(heapTuple);
if (metadata->block_row_count != blockRowCount)
{
values[Anum_cstore_data_files_block_row_count - 1] = Int32GetDatum(blockRowCount);
isnull[Anum_cstore_data_files_block_row_count - 1] = false;
replace[Anum_cstore_data_files_block_row_count - 1] = true;
changed = true;
}
if (metadata->stripe_row_count != stripeRowCount)
{
values[Anum_cstore_data_files_stripe_row_count - 1] = Int32GetDatum(
stripeRowCount);
isnull[Anum_cstore_data_files_stripe_row_count - 1] = false;
replace[Anum_cstore_data_files_stripe_row_count - 1] = true;
changed = true;
}
if (ParseCompressionType(NameStr(metadata->compression)) != compression)
{
Name compressionName = palloc0(sizeof(NameData));
namestrcpy(compressionName, CompressionTypeStr(compression));
values[Anum_cstore_data_files_compression - 1] = NameGetDatum(compressionName);
isnull[Anum_cstore_data_files_compression - 1] = false;
replace[Anum_cstore_data_files_compression - 1] = true;
changed = true;
}
if (changed)
{
heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull,
replace);
CatalogTupleUpdate(cstoreDataFiles, &heapTuple->t_self, heapTuple);
CommandCounterIncrement();
}
systable_endscan(scanDescriptor);
heap_close(cstoreDataFiles, NoLock);
}
/*
* SaveStripeSkipList saves StripeSkipList for a given stripe as rows
* of cstore_skipnodes.
*/
void
SaveStripeSkipList(Oid relfilenode, uint64 stripe, StripeSkipList *stripeSkipList,
TupleDesc tupleDescriptor)
{
uint32 columnIndex = 0;
uint32 blockIndex = 0;
uint32 columnCount = stripeSkipList->columnCount;
Oid cstoreSkipNodesOid = CStoreSkipNodesRelationId();
Relation cstoreSkipNodes = heap_open(cstoreSkipNodesOid, RowExclusiveLock);
ModifyState *modifyState = StartModifyRelation(cstoreSkipNodes);
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
for (blockIndex = 0; blockIndex < stripeSkipList->blockCount; blockIndex++)
{
ColumnBlockSkipNode *skipNode =
&stripeSkipList->blockSkipNodeArray[columnIndex][blockIndex];
Datum values[Natts_cstore_skipnodes] = {
ObjectIdGetDatum(relfilenode),
Int64GetDatum(stripe),
Int32GetDatum(columnIndex + 1),
Int32GetDatum(blockIndex),
Int64GetDatum(skipNode->rowCount),
0, /* to be filled below */
0, /* to be filled below */
Int64GetDatum(skipNode->valueBlockOffset),
Int64GetDatum(skipNode->valueLength),
Int64GetDatum(skipNode->existsBlockOffset),
Int64GetDatum(skipNode->existsLength),
Int32GetDatum(skipNode->valueCompressionType)
};
bool nulls[Natts_cstore_skipnodes] = { false };
if (skipNode->hasMinMax)
{
values[Anum_cstore_skipnodes_minimum_value - 1] =
PointerGetDatum(DatumToBytea(skipNode->minimumValue,
&tupleDescriptor->attrs[columnIndex]));
values[Anum_cstore_skipnodes_maximum_value - 1] =
PointerGetDatum(DatumToBytea(skipNode->maximumValue,
&tupleDescriptor->attrs[columnIndex]));
}
else
{
nulls[Anum_cstore_skipnodes_minimum_value - 1] = true;
nulls[Anum_cstore_skipnodes_maximum_value - 1] = true;
}
InsertTupleAndEnforceConstraints(modifyState, values, nulls);
}
}
FinishModifyRelation(modifyState);
heap_close(cstoreSkipNodes, NoLock);
CommandCounterIncrement();
}
/*
* ReadStripeSkipList fetches StripeSkipList for a given stripe.
*/
StripeSkipList *
ReadStripeSkipList(Oid relfilenode, uint64 stripe, TupleDesc tupleDescriptor,
uint32 blockCount)
{
int32 columnIndex = 0;
HeapTuple heapTuple = NULL;
uint32 columnCount = tupleDescriptor->natts;
ScanKeyData scanKey[2];
Oid cstoreSkipNodesOid = CStoreSkipNodesRelationId();
Relation cstoreSkipNodes = heap_open(cstoreSkipNodesOid, AccessShareLock);
Relation index = index_open(CStoreSkipNodesIndexRelationId(), AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_cstore_skipnodes_relfilenode,
BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(relfilenode));
ScanKeyInit(&scanKey[1], Anum_cstore_skipnodes_stripe,
BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(stripe));
SysScanDesc scanDescriptor = systable_beginscan_ordered(cstoreSkipNodes, index, NULL,
2, scanKey);
StripeSkipList *skipList = palloc0(sizeof(StripeSkipList));
skipList->blockCount = blockCount;
skipList->columnCount = columnCount;
skipList->blockSkipNodeArray = palloc0(columnCount * sizeof(ColumnBlockSkipNode *));
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
skipList->blockSkipNodeArray[columnIndex] =
palloc0(blockCount * sizeof(ColumnBlockSkipNode));
}
while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor)))
{
Datum datumArray[Natts_cstore_skipnodes];
bool isNullArray[Natts_cstore_skipnodes];
heap_deform_tuple(heapTuple, RelationGetDescr(cstoreSkipNodes), datumArray,
isNullArray);
int32 attr = DatumGetInt32(datumArray[Anum_cstore_skipnodes_attr - 1]);
int32 blockIndex = DatumGetInt32(datumArray[Anum_cstore_skipnodes_block - 1]);
if (attr <= 0 || attr > columnCount)
{
ereport(ERROR, (errmsg("invalid stripe skipnode entry"),
errdetail("Attribute number out of range: %d", attr)));
}
if (blockIndex < 0 || blockIndex >= blockCount)
{
ereport(ERROR, (errmsg("invalid stripe skipnode entry"),
errdetail("Block number out of range: %d", blockIndex)));
}
columnIndex = attr - 1;
ColumnBlockSkipNode *skipNode =
&skipList->blockSkipNodeArray[columnIndex][blockIndex];
skipNode->rowCount = DatumGetInt64(datumArray[Anum_cstore_skipnodes_row_count -
1]);
skipNode->valueBlockOffset =
DatumGetInt64(datumArray[Anum_cstore_skipnodes_value_stream_offset - 1]);
skipNode->valueLength =
DatumGetInt64(datumArray[Anum_cstore_skipnodes_value_stream_length - 1]);
skipNode->existsBlockOffset =
DatumGetInt64(datumArray[Anum_cstore_skipnodes_exists_stream_offset - 1]);
skipNode->existsLength =
DatumGetInt64(datumArray[Anum_cstore_skipnodes_exists_stream_length - 1]);
skipNode->valueCompressionType =
DatumGetInt32(datumArray[Anum_cstore_skipnodes_value_compression_type - 1]);
if (isNullArray[Anum_cstore_skipnodes_minimum_value - 1] ||
isNullArray[Anum_cstore_skipnodes_maximum_value - 1])
{
skipNode->hasMinMax = false;
}
else
{
bytea *minValue = DatumGetByteaP(
datumArray[Anum_cstore_skipnodes_minimum_value - 1]);
bytea *maxValue = DatumGetByteaP(
datumArray[Anum_cstore_skipnodes_maximum_value - 1]);
skipNode->minimumValue =
ByteaToDatum(minValue, &tupleDescriptor->attrs[columnIndex]);
skipNode->maximumValue =
ByteaToDatum(maxValue, &tupleDescriptor->attrs[columnIndex]);
skipNode->hasMinMax = true;
}
}
systable_endscan_ordered(scanDescriptor);
index_close(index, NoLock);
heap_close(cstoreSkipNodes, NoLock);
return skipList;
}
/*
* InsertStripeMetadataRow adds a row to cstore_stripes.
*/
static void
InsertStripeMetadataRow(Oid relfilenode, StripeMetadata *stripe)
{
bool nulls[Natts_cstore_stripes] = { 0 };
Datum values[Natts_cstore_stripes] = {
ObjectIdGetDatum(relfilenode),
Int64GetDatum(stripe->id),
Int64GetDatum(stripe->fileOffset),
Int64GetDatum(stripe->dataLength),
Int32GetDatum(stripe->columnCount),
Int32GetDatum(stripe->blockCount),
Int32GetDatum(stripe->blockRowCount),
Int64GetDatum(stripe->rowCount)
};
Oid cstoreStripesOid = CStoreStripesRelationId();
Relation cstoreStripes = heap_open(cstoreStripesOid, RowExclusiveLock);
ModifyState *modifyState = StartModifyRelation(cstoreStripes);
InsertTupleAndEnforceConstraints(modifyState, values, nulls);
FinishModifyRelation(modifyState);
CommandCounterIncrement();
heap_close(cstoreStripes, NoLock);
}
/*
* ReadDataFileMetadata constructs DataFileMetadata for a given relfilenode by reading
* from cstore_data_files and cstore_stripes.
*/
DataFileMetadata *
ReadDataFileMetadata(Oid relfilenode, bool missingOk)
{
DataFileMetadata *datafileMetadata = palloc0(sizeof(DataFileMetadata));
bool found = ReadCStoreDataFiles(relfilenode, datafileMetadata);
if (!found)
{
if (!missingOk)
{
ereport(ERROR, (errmsg("Relfilenode %d doesn't belong to a cstore table.",
relfilenode)));
}
else
{
return NULL;
}
}
datafileMetadata->stripeMetadataList =
ReadDataFileStripeList(relfilenode, GetTransactionSnapshot());
return datafileMetadata;
}
/*
* GetHighestUsedAddress returns the highest used address for the given
* relfilenode across all active and inactive transactions.
*/
uint64
GetHighestUsedAddress(Oid relfilenode)
{
uint64 highestUsedAddress = 0;
uint64 highestUsedId = 0;
GetHighestUsedAddressAndId(relfilenode, &highestUsedAddress, &highestUsedId);
return highestUsedAddress;
}
/*
* GetHighestUsedAddressAndId returns the highest used address and id for
* the given relfilenode across all active and inactive transactions.
*/
static void
GetHighestUsedAddressAndId(Oid relfilenode,
uint64 *highestUsedAddress,
uint64 *highestUsedId)
{
ListCell *stripeMetadataCell = NULL;
SnapshotData SnapshotDirty;
InitDirtySnapshot(SnapshotDirty);
List *stripeMetadataList = ReadDataFileStripeList(relfilenode, &SnapshotDirty);
*highestUsedId = 0;
*highestUsedAddress = 0;
foreach(stripeMetadataCell, stripeMetadataList)
{
StripeMetadata *stripe = lfirst(stripeMetadataCell);
uint64 lastByte = stripe->fileOffset + stripe->dataLength - 1;
*highestUsedAddress = Max(*highestUsedAddress, lastByte);
*highestUsedId = Max(*highestUsedId, stripe->id);
}
}
/*
* ReserveStripe reserves and stripe of given size for the given relation,
* and inserts it into cstore_stripes. It is guaranteed that concurrent
* writes won't overwrite the returned stripe.
*/
StripeMetadata
ReserveStripe(Relation rel, uint64 sizeBytes,
uint64 rowCount, uint64 columnCount,
uint64 blockCount, uint64 blockRowCount)
{
StripeMetadata stripe = { 0 };
uint64 currLogicalHigh = 0;
uint64 highestId = 0;
/*
* We take ShareUpdateExclusiveLock here, so two space
* reservations conflict, space reservation <-> vacuum
* conflict, but space reservation doesn't conflict with
* reads & writes.
*/
LockRelation(rel, ShareUpdateExclusiveLock);
Oid relfilenode = rel->rd_node.relNode;
GetHighestUsedAddressAndId(relfilenode, &currLogicalHigh, &highestId);
SmgrAddr currSmgrHigh = logical_to_smgr(currLogicalHigh);
SmgrAddr resSmgrStart = next_block_start(currSmgrHigh);
uint64 resLogicalStart = smgr_to_logical(resSmgrStart);
uint64 resLogicalEnd = resLogicalStart + sizeBytes - 1;
SmgrAddr resSmgrEnd = logical_to_smgr(resLogicalEnd);
RelationOpenSmgr(rel);
uint64 nblocks = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM);
while (resSmgrEnd.blockno >= nblocks)
{
Buffer newBuffer = ReadBuffer(rel, P_NEW);
ReleaseBuffer(newBuffer);
nblocks = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM);
}
RelationCloseSmgr(rel);
stripe.fileOffset = resLogicalStart;
stripe.dataLength = sizeBytes;
stripe.blockCount = blockCount;
stripe.blockRowCount = blockRowCount;
stripe.columnCount = columnCount;
stripe.rowCount = rowCount;
stripe.id = highestId + 1;
InsertStripeMetadataRow(relfilenode, &stripe);
UnlockRelation(rel, ShareUpdateExclusiveLock);
return stripe;
}
/*
* ReadDataFileStripeList reads the stripe list for a given relfilenode
* in the given snapshot.
*/
static List *
ReadDataFileStripeList(Oid relfilenode, Snapshot snapshot)
{
List *stripeMetadataList = NIL;
ScanKeyData scanKey[1];
HeapTuple heapTuple;
ScanKeyInit(&scanKey[0], Anum_cstore_stripes_relfilenode,
BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(relfilenode));
Oid cstoreStripesOid = CStoreStripesRelationId();
Relation cstoreStripes = heap_open(cstoreStripesOid, AccessShareLock);
Relation index = index_open(CStoreStripesIndexRelationId(), AccessShareLock);
TupleDesc tupleDescriptor = RelationGetDescr(cstoreStripes);
SysScanDesc scanDescriptor = systable_beginscan_ordered(cstoreStripes, index,
snapshot, 1,
scanKey);
while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor)))
{
Datum datumArray[Natts_cstore_stripes];
bool isNullArray[Natts_cstore_stripes];
heap_deform_tuple(heapTuple, tupleDescriptor, datumArray, isNullArray);
StripeMetadata *stripeMetadata = palloc0(sizeof(StripeMetadata));
stripeMetadata->id = DatumGetInt64(datumArray[Anum_cstore_stripes_stripe - 1]);
stripeMetadata->fileOffset = DatumGetInt64(
datumArray[Anum_cstore_stripes_file_offset - 1]);
stripeMetadata->dataLength = DatumGetInt64(
datumArray[Anum_cstore_stripes_data_length - 1]);
stripeMetadata->columnCount = DatumGetInt32(
datumArray[Anum_cstore_stripes_column_count - 1]);
stripeMetadata->blockCount = DatumGetInt32(
datumArray[Anum_cstore_stripes_block_count - 1]);
stripeMetadata->blockRowCount = DatumGetInt32(
datumArray[Anum_cstore_stripes_block_row_count - 1]);
stripeMetadata->rowCount = DatumGetInt64(
datumArray[Anum_cstore_stripes_row_count - 1]);
stripeMetadataList = lappend(stripeMetadataList, stripeMetadata);
}
systable_endscan_ordered(scanDescriptor);
index_close(index, NoLock);
heap_close(cstoreStripes, NoLock);
return stripeMetadataList;
}
/*
* ReadCStoreDataFiles reads corresponding record from cstore_data_files. Returns
* false if table was not found in cstore_data_files.
*/
static bool
ReadCStoreDataFiles(Oid relfilenode, DataFileMetadata *metadata)
{
bool found = false;
ScanKeyData scanKey[1];
ScanKeyInit(&scanKey[0], Anum_cstore_data_files_relfilenode,
BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(relfilenode));
Oid cstoreDataFilesOid = CStoreDataFilesRelationId();
Relation cstoreDataFiles = try_relation_open(cstoreDataFilesOid, AccessShareLock);
if (cstoreDataFiles == NULL)
{
/*
* Extension has been dropped. This can be called while
* dropping extension or database via ObjectAccess().
*/
return false;
}
Relation index = try_relation_open(CStoreDataFilesIndexRelationId(), AccessShareLock);
if (index == NULL)
{
heap_close(cstoreDataFiles, NoLock);
/* extension has been dropped */
return false;
}
TupleDesc tupleDescriptor = RelationGetDescr(cstoreDataFiles);
SysScanDesc scanDescriptor = systable_beginscan_ordered(cstoreDataFiles, index, NULL,
1, scanKey);
HeapTuple heapTuple = systable_getnext(scanDescriptor);
if (HeapTupleIsValid(heapTuple))
{
Datum datumArray[Natts_cstore_data_files];
bool isNullArray[Natts_cstore_data_files];
heap_deform_tuple(heapTuple, tupleDescriptor, datumArray, isNullArray);
if (metadata)
{
metadata->blockRowCount = DatumGetInt32(
datumArray[Anum_cstore_data_files_block_row_count - 1]);
metadata->stripeRowCount = DatumGetInt32(
datumArray[Anum_cstore_data_files_stripe_row_count - 1]);
Name compressionName = DatumGetName(
datumArray[Anum_cstore_data_files_compression - 1]);
metadata->compression = ParseCompressionType(NameStr(*compressionName));
}
found = true;
}
systable_endscan_ordered(scanDescriptor);
index_close(index, NoLock);
heap_close(cstoreDataFiles, NoLock);
return found;
}
/*
* DeleteDataFileMetadataRowIfExists removes the row with given relfilenode from cstore_stripes.
*/
void
DeleteDataFileMetadataRowIfExists(Oid relfilenode)
{
ScanKeyData scanKey[1];
/*
* During a restore for binary upgrade, metadata tables and indexes may or
* may not exist.
*/
if (IsBinaryUpgrade)
{
return;
}
ScanKeyInit(&scanKey[0], Anum_cstore_data_files_relfilenode,
BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(relfilenode));
Oid cstoreDataFilesOid = CStoreDataFilesRelationId();
Relation cstoreDataFiles = try_relation_open(cstoreDataFilesOid, AccessShareLock);
if (cstoreDataFiles == NULL)
{
/* extension has been dropped */
return;
}
Relation index = index_open(CStoreDataFilesIndexRelationId(), AccessShareLock);
SysScanDesc scanDescriptor = systable_beginscan_ordered(cstoreDataFiles, index, NULL,
1, scanKey);
HeapTuple heapTuple = systable_getnext(scanDescriptor);
if (HeapTupleIsValid(heapTuple))
{
ModifyState *modifyState = StartModifyRelation(cstoreDataFiles);
DeleteTupleAndEnforceConstraints(modifyState, heapTuple);
FinishModifyRelation(modifyState);
}
systable_endscan_ordered(scanDescriptor);
index_close(index, NoLock);
heap_close(cstoreDataFiles, NoLock);
}
/*
* StartModifyRelation allocates resources for modifications.
*/
static ModifyState *
StartModifyRelation(Relation rel)
{
EState *estate = create_estate_for_relation(rel);
/* ExecSimpleRelationInsert, ... require caller to open indexes */
ExecOpenIndices(estate->es_result_relation_info, false);
ModifyState *modifyState = palloc(sizeof(ModifyState));
modifyState->rel = rel;
modifyState->estate = estate;
return modifyState;
}
/*
* InsertTupleAndEnforceConstraints inserts a tuple into a relation and makes
* sure constraints are enforced and indexes are updated.
*/
static void
InsertTupleAndEnforceConstraints(ModifyState *state, Datum *values, bool *nulls)
{
TupleDesc tupleDescriptor = RelationGetDescr(state->rel);
HeapTuple tuple = heap_form_tuple(tupleDescriptor, values, nulls);
#if PG_VERSION_NUM >= 120000
TupleTableSlot *slot = ExecInitExtraTupleSlot(state->estate, tupleDescriptor,
&TTSOpsHeapTuple);
ExecStoreHeapTuple(tuple, slot, false);
#else
TupleTableSlot *slot = ExecInitExtraTupleSlot(state->estate, tupleDescriptor);
ExecStoreTuple(tuple, slot, InvalidBuffer, false);
#endif
/* use ExecSimpleRelationInsert to enforce constraints */
ExecSimpleRelationInsert(state->estate, slot);
}
/*
* DeleteTupleAndEnforceConstraints deletes a tuple from a relation and
* makes sure constraints (e.g. FK constraints) are enforced.
*/
static void
DeleteTupleAndEnforceConstraints(ModifyState *state, HeapTuple heapTuple)
{
EState *estate = state->estate;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
ItemPointer tid = &(heapTuple->t_self);
simple_heap_delete(state->rel, tid);
/* execute AFTER ROW DELETE Triggers to enforce constraints */
ExecARDeleteTriggers(estate, resultRelInfo, tid, NULL, NULL);
}
/*
* FinishModifyRelation cleans up resources after modifications are done.
*/
static void
FinishModifyRelation(ModifyState *state)
{
ExecCloseIndices(state->estate->es_result_relation_info);
AfterTriggerEndQuery(state->estate);
ExecCleanUpTriggerState(state->estate);
ExecResetTupleTable(state->estate->es_tupleTable, false);
FreeExecutorState(state->estate);
}
/*
* Based on a similar function from
* postgres/src/backend/replication/logical/worker.c.
*
* Executor state preparation for evaluation of constraint expressions,
* indexes and triggers.
*
* This is based on similar code in copy.c
*/
static EState *
create_estate_for_relation(Relation rel)
{
ResultRelInfo *resultRelInfo;
EState *estate = CreateExecutorState();
RangeTblEntry *rte = makeNode(RangeTblEntry);
rte->rtekind = RTE_RELATION;
rte->relid = RelationGetRelid(rel);
rte->relkind = rel->rd_rel->relkind;
#if PG_VERSION_NUM >= 120000
rte->rellockmode = AccessShareLock;
ExecInitRangeTable(estate, list_make1(rte));
#endif
resultRelInfo = makeNode(ResultRelInfo);
InitResultRelInfo(resultRelInfo, rel, 1, NULL, 0);
estate->es_result_relations = resultRelInfo;
estate->es_num_result_relations = 1;
estate->es_result_relation_info = resultRelInfo;
estate->es_output_cid = GetCurrentCommandId(true);
#if PG_VERSION_NUM < 120000
/* Triggers might need a slot */
if (resultRelInfo->ri_TrigDesc)
{
estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, NULL);
}
#endif
/* Prepare to catch AFTER triggers. */
AfterTriggerBeginQuery();
return estate;
}
/*
* DatumToBytea serializes a datum into a bytea value.
*/
static bytea *
DatumToBytea(Datum value, Form_pg_attribute attrForm)
{
int datumLength = att_addlength_datum(0, attrForm->attlen, value);
bytea *result = palloc0(datumLength + VARHDRSZ);
SET_VARSIZE(result, datumLength + VARHDRSZ);
if (attrForm->attlen > 0)
{
if (attrForm->attbyval)
{
store_att_byval(VARDATA(result), value, attrForm->attlen);
}
else
{
memcpy_s(VARDATA(result), datumLength + VARHDRSZ,
DatumGetPointer(value), attrForm->attlen);
}
}
else
{
memcpy_s(VARDATA(result), datumLength + VARHDRSZ,
DatumGetPointer(value), datumLength);
}
return result;
}
/*
* ByteaToDatum deserializes a value which was previously serialized using
* DatumToBytea.
*/
static Datum
ByteaToDatum(bytea *bytes, Form_pg_attribute attrForm)
{
/*
* We copy the data so the result of this function lives even
* after the byteaDatum is freed.
*/
char *binaryDataCopy = palloc0(VARSIZE_ANY_EXHDR(bytes));
memcpy_s(binaryDataCopy, VARSIZE_ANY_EXHDR(bytes),
VARDATA_ANY(bytes), VARSIZE_ANY_EXHDR(bytes));
return fetch_att(binaryDataCopy, attrForm->attbyval, attrForm->attlen);
}
/*
* CStoreStripesRelationId returns relation id of cstore_stripes.
* TODO: should we cache this similar to citus?
*/
static Oid
CStoreStripesRelationId(void)
{
return get_relname_relid("cstore_stripes", CStoreNamespaceId());
}
/*
* CStoreStripesIndexRelationId returns relation id of cstore_stripes_idx.
* TODO: should we cache this similar to citus?
*/
static Oid
CStoreStripesIndexRelationId(void)
{
return get_relname_relid("cstore_stripes_pkey", CStoreNamespaceId());
}
/*
* CStoreDataFilesRelationId returns relation id of cstore_data_files.
* TODO: should we cache this similar to citus?
*/
static Oid
CStoreDataFilesRelationId(void)
{
return get_relname_relid("cstore_data_files", CStoreNamespaceId());
}
/*
* CStoreDataFilesIndexRelationId returns relation id of cstore_data_files_pkey.
* TODO: should we cache this similar to citus?
*/
static Oid
CStoreDataFilesIndexRelationId(void)
{
return get_relname_relid("cstore_data_files_pkey", CStoreNamespaceId());
}
/*
* CStoreSkipNodesRelationId returns relation id of cstore_skipnodes.
* TODO: should we cache this similar to citus?
*/
static Oid
CStoreSkipNodesRelationId(void)
{
return get_relname_relid("cstore_skipnodes", CStoreNamespaceId());
}
/*
* CStoreSkipNodesIndexRelationId returns relation id of cstore_skipnodes_pkey.
* TODO: should we cache this similar to citus?
*/
static Oid
CStoreSkipNodesIndexRelationId(void)
{
return get_relname_relid("cstore_skipnodes_pkey", CStoreNamespaceId());
}
/*
* CStoreNamespaceId returns namespace id of the schema we store cstore
* related tables.
*/
static Oid
CStoreNamespaceId(void)
{
return get_namespace_oid("cstore", false);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,769 @@
/*-------------------------------------------------------------------------
*
* cstore_writer.c
*
* This file contains function definitions for writing cstore files. This
* includes the logic for writing file level metadata, writing row stripes,
* and calculating block skip nodes.
*
* Copyright (c) 2016, Citus Data, Inc.
*
* $Id$
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "safe_lib.h"
#include "access/heapam.h"
#include "access/nbtree.h"
#include "catalog/pg_am.h"
#include "miscadmin.h"
#include "storage/fd.h"
#include "storage/smgr.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/relfilenodemap.h"
#include "columnar/cstore.h"
#include "columnar/cstore_version_compat.h"
static StripeBuffers * CreateEmptyStripeBuffers(uint32 stripeMaxRowCount,
uint32 blockRowCount,
uint32 columnCount);
static StripeSkipList * CreateEmptyStripeSkipList(uint32 stripeMaxRowCount,
uint32 blockRowCount,
uint32 columnCount);
static void FlushStripe(TableWriteState *writeState);
static StringInfo SerializeBoolArray(bool *boolArray, uint32 boolArrayLength);
static void SerializeSingleDatum(StringInfo datumBuffer, Datum datum,
bool datumTypeByValue, int datumTypeLength,
char datumTypeAlign);
static void SerializeBlockData(TableWriteState *writeState, uint32 blockIndex,
uint32 rowCount);
static void UpdateBlockSkipNodeMinMax(ColumnBlockSkipNode *blockSkipNode,
Datum columnValue, bool columnTypeByValue,
int columnTypeLength, Oid columnCollation,
FmgrInfo *comparisonFunction);
static Datum DatumCopy(Datum datum, bool datumTypeByValue, int datumTypeLength);
static StringInfo CopyStringInfo(StringInfo sourceString);
/*
* CStoreBeginWrite initializes a cstore data load operation and returns a table
* handle. This handle should be used for adding the row values and finishing the
* data load operation. If the cstore footer file already exists, we read the
* footer and then seek to right after the last stripe where the new stripes
* will be added.
*/
TableWriteState *
CStoreBeginWrite(RelFileNode relfilenode,
CompressionType compressionType,
uint64 stripeMaxRowCount, uint32 blockRowCount,
TupleDesc tupleDescriptor)
{
uint32 columnIndex = 0;
/* get comparison function pointers for each of the columns */
uint32 columnCount = tupleDescriptor->natts;
FmgrInfo **comparisonFunctionArray = palloc0(columnCount * sizeof(FmgrInfo *));
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
FmgrInfo *comparisonFunction = NULL;
FormData_pg_attribute *attributeForm = TupleDescAttr(tupleDescriptor,
columnIndex);
if (!attributeForm->attisdropped)
{
Oid typeId = attributeForm->atttypid;
comparisonFunction = GetFunctionInfoOrNull(typeId, BTREE_AM_OID,
BTORDER_PROC);
}
comparisonFunctionArray[columnIndex] = comparisonFunction;
}
/*
* We allocate all stripe specific data in the stripeWriteContext, and
* reset this memory context once we have flushed the stripe to the file.
* This is to avoid memory leaks.
*/
MemoryContext stripeWriteContext = AllocSetContextCreate(CurrentMemoryContext,
"Stripe Write Memory Context",
ALLOCSET_DEFAULT_SIZES);
bool *columnMaskArray = palloc(columnCount * sizeof(bool));
memset(columnMaskArray, true, columnCount);
BlockData *blockData = CreateEmptyBlockData(columnCount, columnMaskArray,
blockRowCount);
TableWriteState *writeState = palloc0(sizeof(TableWriteState));
writeState->relfilenode = relfilenode;
writeState->compressionType = compressionType;
writeState->stripeMaxRowCount = stripeMaxRowCount;
writeState->blockRowCount = blockRowCount;
writeState->tupleDescriptor = CreateTupleDescCopy(tupleDescriptor);
writeState->comparisonFunctionArray = comparisonFunctionArray;
writeState->stripeBuffers = NULL;
writeState->stripeSkipList = NULL;
writeState->stripeWriteContext = stripeWriteContext;
writeState->blockData = blockData;
writeState->compressionBuffer = NULL;
return writeState;
}
/*
* CStoreWriteRow adds a row to the cstore file. If the stripe is not initialized,
* we create structures to hold stripe data and skip list. Then, we serialize and
* append data to serialized value buffer for each of the columns and update
* corresponding skip nodes. Then, whole block data is compressed at every
* rowBlockCount insertion. Then, if row count exceeds stripeMaxRowCount, we flush
* the stripe, and add its metadata to the table footer.
*/
void
CStoreWriteRow(TableWriteState *writeState, Datum *columnValues, bool *columnNulls)
{
uint32 columnIndex = 0;
StripeBuffers *stripeBuffers = writeState->stripeBuffers;
StripeSkipList *stripeSkipList = writeState->stripeSkipList;
uint32 columnCount = writeState->tupleDescriptor->natts;
const uint32 blockRowCount = writeState->blockRowCount;
BlockData *blockData = writeState->blockData;
MemoryContext oldContext = MemoryContextSwitchTo(writeState->stripeWriteContext);
if (stripeBuffers == NULL)
{
stripeBuffers = CreateEmptyStripeBuffers(writeState->stripeMaxRowCount,
blockRowCount, columnCount);
stripeSkipList = CreateEmptyStripeSkipList(writeState->stripeMaxRowCount,
blockRowCount, columnCount);
writeState->stripeBuffers = stripeBuffers;
writeState->stripeSkipList = stripeSkipList;
writeState->compressionBuffer = makeStringInfo();
/*
* serializedValueBuffer lives in stripe write memory context so it needs to be
* initialized when the stripe is created.
*/
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
blockData->valueBufferArray[columnIndex] = makeStringInfo();
}
}
uint32 blockIndex = stripeBuffers->rowCount / blockRowCount;
uint32 blockRowIndex = stripeBuffers->rowCount % blockRowCount;
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
ColumnBlockSkipNode **blockSkipNodeArray = stripeSkipList->blockSkipNodeArray;
ColumnBlockSkipNode *blockSkipNode =
&blockSkipNodeArray[columnIndex][blockIndex];
if (columnNulls[columnIndex])
{
blockData->existsArray[columnIndex][blockRowIndex] = false;
}
else
{
FmgrInfo *comparisonFunction =
writeState->comparisonFunctionArray[columnIndex];
Form_pg_attribute attributeForm =
TupleDescAttr(writeState->tupleDescriptor, columnIndex);
bool columnTypeByValue = attributeForm->attbyval;
int columnTypeLength = attributeForm->attlen;
Oid columnCollation = attributeForm->attcollation;
char columnTypeAlign = attributeForm->attalign;
blockData->existsArray[columnIndex][blockRowIndex] = true;
SerializeSingleDatum(blockData->valueBufferArray[columnIndex],
columnValues[columnIndex], columnTypeByValue,
columnTypeLength, columnTypeAlign);
UpdateBlockSkipNodeMinMax(blockSkipNode, columnValues[columnIndex],
columnTypeByValue, columnTypeLength,
columnCollation, comparisonFunction);
}
blockSkipNode->rowCount++;
}
stripeSkipList->blockCount = blockIndex + 1;
/* last row of the block is inserted serialize the block */
if (blockRowIndex == blockRowCount - 1)
{
SerializeBlockData(writeState, blockIndex, blockRowCount);
}
stripeBuffers->rowCount++;
if (stripeBuffers->rowCount >= writeState->stripeMaxRowCount)
{
CStoreFlushPendingWrites(writeState);
}
MemoryContextSwitchTo(oldContext);
}
/*
* CStoreEndWrite finishes a cstore data load operation. If we have an unflushed
* stripe, we flush it. Then, we sync and close the cstore data file. Last, we
* flush the footer to a temporary file, and atomically rename this temporary
* file to the original footer file.
*/
void
CStoreEndWrite(TableWriteState *writeState)
{
CStoreFlushPendingWrites(writeState);
MemoryContextDelete(writeState->stripeWriteContext);
pfree(writeState->comparisonFunctionArray);
FreeBlockData(writeState->blockData);
pfree(writeState);
}
void
CStoreFlushPendingWrites(TableWriteState *writeState)
{
StripeBuffers *stripeBuffers = writeState->stripeBuffers;
if (stripeBuffers != NULL)
{
MemoryContext oldContext = MemoryContextSwitchTo(writeState->stripeWriteContext);
FlushStripe(writeState);
/* set stripe data and skip list to NULL so they are recreated next time */
writeState->stripeBuffers = NULL;
writeState->stripeSkipList = NULL;
MemoryContextSwitchTo(oldContext);
}
}
/*
* CreateEmptyStripeBuffers allocates an empty StripeBuffers structure with the given
* column count.
*/
static StripeBuffers *
CreateEmptyStripeBuffers(uint32 stripeMaxRowCount, uint32 blockRowCount,
uint32 columnCount)
{
uint32 columnIndex = 0;
uint32 maxBlockCount = (stripeMaxRowCount / blockRowCount) + 1;
ColumnBuffers **columnBuffersArray = palloc0(columnCount * sizeof(ColumnBuffers *));
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
uint32 blockIndex = 0;
ColumnBlockBuffers **blockBuffersArray =
palloc0(maxBlockCount * sizeof(ColumnBlockBuffers *));
for (blockIndex = 0; blockIndex < maxBlockCount; blockIndex++)
{
blockBuffersArray[blockIndex] = palloc0(sizeof(ColumnBlockBuffers));
blockBuffersArray[blockIndex]->existsBuffer = NULL;
blockBuffersArray[blockIndex]->valueBuffer = NULL;
blockBuffersArray[blockIndex]->valueCompressionType = COMPRESSION_NONE;
}
columnBuffersArray[columnIndex] = palloc0(sizeof(ColumnBuffers));
columnBuffersArray[columnIndex]->blockBuffersArray = blockBuffersArray;
}
StripeBuffers *stripeBuffers = palloc0(sizeof(StripeBuffers));
stripeBuffers->columnBuffersArray = columnBuffersArray;
stripeBuffers->columnCount = columnCount;
stripeBuffers->rowCount = 0;
return stripeBuffers;
}
/*
* CreateEmptyStripeSkipList allocates an empty StripeSkipList structure with
* the given column count. This structure has enough blocks to hold statistics
* for stripeMaxRowCount rows.
*/
static StripeSkipList *
CreateEmptyStripeSkipList(uint32 stripeMaxRowCount, uint32 blockRowCount,
uint32 columnCount)
{
uint32 columnIndex = 0;
uint32 maxBlockCount = (stripeMaxRowCount / blockRowCount) + 1;
ColumnBlockSkipNode **blockSkipNodeArray =
palloc0(columnCount * sizeof(ColumnBlockSkipNode *));
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
blockSkipNodeArray[columnIndex] =
palloc0(maxBlockCount * sizeof(ColumnBlockSkipNode));
}
StripeSkipList *stripeSkipList = palloc0(sizeof(StripeSkipList));
stripeSkipList->columnCount = columnCount;
stripeSkipList->blockCount = 0;
stripeSkipList->blockSkipNodeArray = blockSkipNodeArray;
return stripeSkipList;
}
static void
WriteToSmgr(Relation rel, uint64 logicalOffset, char *data, uint32 dataLength)
{
uint64 remaining = dataLength;
Buffer buffer;
while (remaining > 0)
{
SmgrAddr addr = logical_to_smgr(logicalOffset);
RelationOpenSmgr(rel);
BlockNumber nblocks = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM);
Assert(addr.blockno < nblocks);
(void) nblocks; /* keep compiler quiet */
RelationCloseSmgr(rel);
buffer = ReadBuffer(rel, addr.blockno);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
Page page = BufferGetPage(buffer);
PageHeader phdr = (PageHeader) page;
if (PageIsNew(page))
{
PageInit(page, BLCKSZ, 0);
}
/*
* After a transaction has been rolled-back, we might be
* over-writing the rolledback write, so phdr->pd_lower can be
* different from addr.offset.
*
* We reset pd_lower to reset the rolledback write.
*/
if (phdr->pd_lower > addr.offset)
{
ereport(DEBUG1, (errmsg("over-writing page %u", addr.blockno),
errdetail("This can happen after a roll-back.")));
phdr->pd_lower = addr.offset;
}
Assert(phdr->pd_lower == addr.offset);
START_CRIT_SECTION();
uint64 to_write = Min(phdr->pd_upper - phdr->pd_lower, remaining);
memcpy_s(page + phdr->pd_lower, phdr->pd_upper - phdr->pd_lower, data, to_write);
phdr->pd_lower += to_write;
MarkBufferDirty(buffer);
if (RelationNeedsWAL(rel))
{
XLogBeginInsert();
/*
* Since cstore will mostly write whole pages we force the transmission of the
* whole image in the buffer
*/
XLogRegisterBuffer(0, buffer, REGBUF_FORCE_IMAGE);
XLogRecPtr recptr = XLogInsert(RM_GENERIC_ID, 0);
PageSetLSN(page, recptr);
}
END_CRIT_SECTION();
UnlockReleaseBuffer(buffer);
data += to_write;
remaining -= to_write;
logicalOffset += to_write;
}
}
/*
* FlushStripe flushes current stripe data into the file. The function first ensures
* the last data block for each column is properly serialized and compressed. Then,
* the function creates the skip list and footer buffers. Finally, the function
* flushes the skip list, data, and footer buffers to the file.
*/
static void
FlushStripe(TableWriteState *writeState)
{
StripeMetadata stripeMetadata = { 0 };
uint32 columnIndex = 0;
uint32 blockIndex = 0;
StripeBuffers *stripeBuffers = writeState->stripeBuffers;
StripeSkipList *stripeSkipList = writeState->stripeSkipList;
ColumnBlockSkipNode **columnSkipNodeArray = stripeSkipList->blockSkipNodeArray;
TupleDesc tupleDescriptor = writeState->tupleDescriptor;
uint32 columnCount = tupleDescriptor->natts;
uint32 blockCount = stripeSkipList->blockCount;
uint32 blockRowCount = writeState->blockRowCount;
uint32 lastBlockIndex = stripeBuffers->rowCount / blockRowCount;
uint32 lastBlockRowCount = stripeBuffers->rowCount % blockRowCount;
uint64 stripeSize = 0;
uint64 stripeRowCount = 0;
Oid relationId = RelidByRelfilenode(writeState->relfilenode.spcNode,
writeState->relfilenode.relNode);
Relation relation = relation_open(relationId, NoLock);
/*
* check if the last block needs serialization , the last block was not serialized
* if it was not full yet, e.g. (rowCount > 0)
*/
if (lastBlockRowCount > 0)
{
SerializeBlockData(writeState, lastBlockIndex, lastBlockRowCount);
}
/* update buffer sizes in stripe skip list */
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
ColumnBlockSkipNode *blockSkipNodeArray = columnSkipNodeArray[columnIndex];
ColumnBuffers *columnBuffers = stripeBuffers->columnBuffersArray[columnIndex];
for (blockIndex = 0; blockIndex < blockCount; blockIndex++)
{
ColumnBlockBuffers *blockBuffers =
columnBuffers->blockBuffersArray[blockIndex];
uint64 existsBufferSize = blockBuffers->existsBuffer->len;
ColumnBlockSkipNode *blockSkipNode = &blockSkipNodeArray[blockIndex];
blockSkipNode->existsBlockOffset = stripeSize;
blockSkipNode->existsLength = existsBufferSize;
stripeSize += existsBufferSize;
}
for (blockIndex = 0; blockIndex < blockCount; blockIndex++)
{
ColumnBlockBuffers *blockBuffers =
columnBuffers->blockBuffersArray[blockIndex];
uint64 valueBufferSize = blockBuffers->valueBuffer->len;
CompressionType valueCompressionType = blockBuffers->valueCompressionType;
ColumnBlockSkipNode *blockSkipNode = &blockSkipNodeArray[blockIndex];
blockSkipNode->valueBlockOffset = stripeSize;
blockSkipNode->valueLength = valueBufferSize;
blockSkipNode->valueCompressionType = valueCompressionType;
stripeSize += valueBufferSize;
}
}
for (blockIndex = 0; blockIndex < blockCount; blockIndex++)
{
stripeRowCount +=
stripeSkipList->blockSkipNodeArray[0][blockIndex].rowCount;
}
stripeMetadata = ReserveStripe(relation, stripeSize,
stripeRowCount, columnCount, blockCount,
blockRowCount);
uint64 currentFileOffset = stripeMetadata.fileOffset;
/*
* Each stripe has only one section:
* Data section, in which we store data for each column continuously.
* We store data for each for each column in blocks. For each block, we
* store two buffers: "exists" buffer, and "value" buffer. "exists" buffer
* tells which values are not NULL. "value" buffer contains values for
* present values. For each column, we first store all "exists" buffers,
* and then all "value" buffers.
*/
/* flush the data buffers */
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
ColumnBuffers *columnBuffers = stripeBuffers->columnBuffersArray[columnIndex];
for (blockIndex = 0; blockIndex < stripeSkipList->blockCount; blockIndex++)
{
ColumnBlockBuffers *blockBuffers =
columnBuffers->blockBuffersArray[blockIndex];
StringInfo existsBuffer = blockBuffers->existsBuffer;
WriteToSmgr(relation, currentFileOffset,
existsBuffer->data, existsBuffer->len);
currentFileOffset += existsBuffer->len;
}
for (blockIndex = 0; blockIndex < stripeSkipList->blockCount; blockIndex++)
{
ColumnBlockBuffers *blockBuffers =
columnBuffers->blockBuffersArray[blockIndex];
StringInfo valueBuffer = blockBuffers->valueBuffer;
WriteToSmgr(relation, currentFileOffset,
valueBuffer->data, valueBuffer->len);
currentFileOffset += valueBuffer->len;
}
}
/* create skip list and footer buffers */
SaveStripeSkipList(relation->rd_node.relNode,
stripeMetadata.id,
stripeSkipList, tupleDescriptor);
relation_close(relation, NoLock);
}
/*
* SerializeBoolArray serializes the given boolean array and returns the result
* as a StringInfo. This function packs every 8 boolean values into one byte.
*/
static StringInfo
SerializeBoolArray(bool *boolArray, uint32 boolArrayLength)
{
uint32 boolArrayIndex = 0;
uint32 byteCount = (boolArrayLength + 7) / 8;
StringInfo boolArrayBuffer = makeStringInfo();
enlargeStringInfo(boolArrayBuffer, byteCount);
boolArrayBuffer->len = byteCount;
memset(boolArrayBuffer->data, 0, byteCount);
for (boolArrayIndex = 0; boolArrayIndex < boolArrayLength; boolArrayIndex++)
{
if (boolArray[boolArrayIndex])
{
uint32 byteIndex = boolArrayIndex / 8;
uint32 bitIndex = boolArrayIndex % 8;
boolArrayBuffer->data[byteIndex] |= (1 << bitIndex);
}
}
return boolArrayBuffer;
}
/*
* SerializeSingleDatum serializes the given datum value and appends it to the
* provided string info buffer.
*/
static void
SerializeSingleDatum(StringInfo datumBuffer, Datum datum, bool datumTypeByValue,
int datumTypeLength, char datumTypeAlign)
{
uint32 datumLength = att_addlength_datum(0, datumTypeLength, datum);
uint32 datumLengthAligned = att_align_nominal(datumLength, datumTypeAlign);
enlargeStringInfo(datumBuffer, datumLengthAligned);
char *currentDatumDataPointer = datumBuffer->data + datumBuffer->len;
memset(currentDatumDataPointer, 0, datumLengthAligned);
if (datumTypeLength > 0)
{
if (datumTypeByValue)
{
store_att_byval(currentDatumDataPointer, datum, datumTypeLength);
}
else
{
memcpy_s(currentDatumDataPointer, datumBuffer->maxlen - datumBuffer->len,
DatumGetPointer(datum), datumTypeLength);
}
}
else
{
Assert(!datumTypeByValue);
memcpy_s(currentDatumDataPointer, datumBuffer->maxlen - datumBuffer->len,
DatumGetPointer(datum), datumLength);
}
datumBuffer->len += datumLengthAligned;
}
/*
* SerializeBlockData serializes and compresses block data at given block index with given
* compression type for every column.
*/
static void
SerializeBlockData(TableWriteState *writeState, uint32 blockIndex, uint32 rowCount)
{
uint32 columnIndex = 0;
StripeBuffers *stripeBuffers = writeState->stripeBuffers;
BlockData *blockData = writeState->blockData;
CompressionType requestedCompressionType = writeState->compressionType;
const uint32 columnCount = stripeBuffers->columnCount;
StringInfo compressionBuffer = writeState->compressionBuffer;
/* serialize exist values, data values are already serialized */
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
ColumnBuffers *columnBuffers = stripeBuffers->columnBuffersArray[columnIndex];
ColumnBlockBuffers *blockBuffers = columnBuffers->blockBuffersArray[blockIndex];
blockBuffers->existsBuffer =
SerializeBoolArray(blockData->existsArray[columnIndex], rowCount);
}
/*
* check and compress value buffers, if a value buffer is not compressable
* then keep it as uncompressed, store compression information.
*/
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
ColumnBuffers *columnBuffers = stripeBuffers->columnBuffersArray[columnIndex];
ColumnBlockBuffers *blockBuffers = columnBuffers->blockBuffersArray[blockIndex];
CompressionType actualCompressionType = COMPRESSION_NONE;
StringInfo serializedValueBuffer = blockData->valueBufferArray[columnIndex];
/* the only other supported compression type is pg_lz for now */
Assert(requestedCompressionType == COMPRESSION_NONE ||
requestedCompressionType == COMPRESSION_PG_LZ);
/*
* if serializedValueBuffer is be compressed, update serializedValueBuffer
* with compressed data and store compression type.
*/
bool compressed = CompressBuffer(serializedValueBuffer, compressionBuffer,
requestedCompressionType);
if (compressed)
{
serializedValueBuffer = compressionBuffer;
actualCompressionType = COMPRESSION_PG_LZ;
}
/* store (compressed) value buffer */
blockBuffers->valueCompressionType = actualCompressionType;
blockBuffers->valueBuffer = CopyStringInfo(serializedValueBuffer);
/* valueBuffer needs to be reset for next block's data */
resetStringInfo(blockData->valueBufferArray[columnIndex]);
}
}
/*
* UpdateBlockSkipNodeMinMax takes the given column value, and checks if this
* value falls outside the range of minimum/maximum values of the given column
* block skip node. If it does, the function updates the column block skip node
* accordingly.
*/
static void
UpdateBlockSkipNodeMinMax(ColumnBlockSkipNode *blockSkipNode, Datum columnValue,
bool columnTypeByValue, int columnTypeLength,
Oid columnCollation, FmgrInfo *comparisonFunction)
{
bool hasMinMax = blockSkipNode->hasMinMax;
Datum previousMinimum = blockSkipNode->minimumValue;
Datum previousMaximum = blockSkipNode->maximumValue;
Datum currentMinimum = 0;
Datum currentMaximum = 0;
/* if type doesn't have a comparison function, skip min/max values */
if (comparisonFunction == NULL)
{
return;
}
if (!hasMinMax)
{
currentMinimum = DatumCopy(columnValue, columnTypeByValue, columnTypeLength);
currentMaximum = DatumCopy(columnValue, columnTypeByValue, columnTypeLength);
}
else
{
Datum minimumComparisonDatum = FunctionCall2Coll(comparisonFunction,
columnCollation, columnValue,
previousMinimum);
Datum maximumComparisonDatum = FunctionCall2Coll(comparisonFunction,
columnCollation, columnValue,
previousMaximum);
int minimumComparison = DatumGetInt32(minimumComparisonDatum);
int maximumComparison = DatumGetInt32(maximumComparisonDatum);
if (minimumComparison < 0)
{
currentMinimum = DatumCopy(columnValue, columnTypeByValue, columnTypeLength);
}
else
{
currentMinimum = previousMinimum;
}
if (maximumComparison > 0)
{
currentMaximum = DatumCopy(columnValue, columnTypeByValue, columnTypeLength);
}
else
{
currentMaximum = previousMaximum;
}
}
blockSkipNode->hasMinMax = true;
blockSkipNode->minimumValue = currentMinimum;
blockSkipNode->maximumValue = currentMaximum;
}
/* Creates a copy of the given datum. */
static Datum
DatumCopy(Datum datum, bool datumTypeByValue, int datumTypeLength)
{
Datum datumCopy = 0;
if (datumTypeByValue)
{
datumCopy = datum;
}
else
{
uint32 datumLength = att_addlength_datum(0, datumTypeLength, datum);
char *datumData = palloc0(datumLength);
memcpy_s(datumData, datumLength, DatumGetPointer(datum), datumLength);
datumCopy = PointerGetDatum(datumData);
}
return datumCopy;
}
/*
* CopyStringInfo creates a deep copy of given source string allocating only needed
* amount of memory.
*/
static StringInfo
CopyStringInfo(StringInfo sourceString)
{
StringInfo targetString = palloc0(sizeof(StringInfoData));
if (sourceString->len > 0)
{
targetString->data = palloc0(sourceString->len);
targetString->len = sourceString->len;
targetString->maxlen = sourceString->len;
memcpy_s(targetString->data, sourceString->len,
sourceString->data, sourceString->len);
}
return targetString;
}
bool
ContainsPendingWrites(TableWriteState *state)
{
return state->stripeBuffers != NULL && state->stripeBuffers->rowCount != 0;
}

View File

@ -0,0 +1,49 @@
/*-------------------------------------------------------------------------
*
* mod.c
*
* This file contains module-level definitions.
*
* Copyright (c) 2016, Citus Data, Inc.
*
* $Id$
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "fmgr.h"
#include "citus_version.h"
#include "columnar/cstore.h"
#include "columnar/cstore_fdw.h"
#include "columnar/mod.h"
#ifdef HAS_TABLEAM
#include "columnar/cstore_tableam.h"
#endif
void
columnar_init(void)
{
cstore_init();
cstore_fdw_init();
#ifdef HAS_TABLEAM
cstore_tableam_init();
#endif
}
void
columnar_fini(void)
{
cstore_fdw_finish();
#if HAS_TABLEAM
cstore_tableam_finish();
#endif
}

View File

@ -0,0 +1,107 @@
/* columnar--9.5-1--10.0-1.sql */
CREATE SCHEMA cstore;
SET search_path TO cstore;
CREATE FUNCTION cstore_fdw_handler()
RETURNS fdw_handler
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT;
CREATE FUNCTION cstore_fdw_validator(text[], oid)
RETURNS void
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER cstore_fdw
HANDLER cstore_fdw_handler
VALIDATOR cstore_fdw_validator;
CREATE FUNCTION cstore_ddl_event_end_trigger()
RETURNS event_trigger
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT;
CREATE EVENT TRIGGER cstore_ddl_event_end
ON ddl_command_end
EXECUTE PROCEDURE cstore_ddl_event_end_trigger();
CREATE FUNCTION pg_catalog.cstore_table_size(relation regclass)
RETURNS bigint
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT;
CREATE TABLE cstore_data_files (
relfilenode oid NOT NULL,
block_row_count int NOT NULL,
stripe_row_count int NOT NULL,
compression name NOT NULL,
version_major bigint NOT NULL,
version_minor bigint NOT NULL,
PRIMARY KEY (relfilenode)
) WITH (user_catalog_table = true);
COMMENT ON TABLE cstore_data_files IS 'CStore data file wide metadata';
CREATE TABLE cstore_stripes (
relfilenode oid NOT NULL,
stripe bigint NOT NULL,
file_offset bigint NOT NULL,
data_length bigint NOT NULL,
column_count int NOT NULL,
block_count int NOT NULL,
block_row_count int NOT NULL,
row_count bigint NOT NULL,
PRIMARY KEY (relfilenode, stripe),
FOREIGN KEY (relfilenode) REFERENCES cstore_data_files(relfilenode) ON DELETE CASCADE INITIALLY DEFERRED
) WITH (user_catalog_table = true);
COMMENT ON TABLE cstore_stripes IS 'CStore per stripe metadata';
CREATE TABLE cstore_skipnodes (
relfilenode oid NOT NULL,
stripe bigint NOT NULL,
attr int NOT NULL,
block int NOT NULL,
row_count bigint NOT NULL,
minimum_value bytea,
maximum_value bytea,
value_stream_offset bigint NOT NULL,
value_stream_length bigint NOT NULL,
exists_stream_offset bigint NOT NULL,
exists_stream_length bigint NOT NULL,
value_compression_type int NOT NULL,
PRIMARY KEY (relfilenode, stripe, attr, block),
FOREIGN KEY (relfilenode, stripe) REFERENCES cstore_stripes(relfilenode, stripe) ON DELETE CASCADE INITIALLY DEFERRED
) WITH (user_catalog_table = true);
COMMENT ON TABLE cstore_skipnodes IS 'CStore per block metadata';
CREATE VIEW cstore_options AS
SELECT c.oid::regclass regclass,
d.block_row_count,
d.stripe_row_count,
d.compression
FROM pg_class c
JOIN cstore.cstore_data_files d USING(relfilenode);
COMMENT ON VIEW cstore_options IS 'CStore per table settings';
DO $proc$
BEGIN
-- from version 12 and up we have support for tableam's if installed on pg11 we can't
-- create the objects here. Instead we rely on citus_finish_pg_upgrade to be called by the
-- user instead to add the missing objects
IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN
EXECUTE $$
#include "udfs/cstore_tableam_handler/10.0-1.sql"
#include "udfs/alter_cstore_table_set/10.0-1.sql"
#include "udfs/alter_cstore_table_reset/10.0-1.sql"
$$;
END IF;
END$proc$;
#include "udfs/cstore_ensure_objects_exist/10.0-1.sql"
RESET search_path;

View File

@ -0,0 +1,47 @@
/* columnar--10.0-1--9.5-1.sql */
SET search_path TO cstore;
DO $proc$
BEGIN
IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN
EXECUTE $$
DROP FUNCTION pg_catalog.alter_cstore_table_reset(
table_name regclass,
block_row_count bool,
stripe_row_count bool,
compression bool);
DROP FUNCTION pg_catalog.alter_cstore_table_set(
table_name regclass,
block_row_count int,
stripe_row_count int,
compression name);
DROP ACCESS METHOD cstore_tableam;
DROP FUNCTION cstore_tableam_handler(internal);
$$;
END IF;
END$proc$;
DROP VIEW cstore_options;
DROP TABLE cstore_skipnodes;
DROP TABLE cstore_stripes;
DROP TABLE cstore_data_files;
DROP FUNCTION pg_catalog.cstore_table_size(relation regclass);
DROP EVENT TRIGGER cstore_ddl_event_end;
DROP FUNCTION cstore_ddl_event_end_trigger();
DROP FOREIGN DATA WRAPPER cstore_fdw;
DROP FUNCTION cstore_fdw_validator(text[], oid);
DROP FUNCTION cstore_fdw_handler();
DROP FUNCTION citus_internal.cstore_ensure_objects_exist();
RESET search_path;
DROP SCHEMA cstore;

View File

@ -0,0 +1,15 @@
CREATE OR REPLACE FUNCTION pg_catalog.alter_cstore_table_reset(
table_name regclass,
block_row_count bool DEFAULT false,
stripe_row_count bool DEFAULT false,
compression bool DEFAULT false)
RETURNS void
LANGUAGE C
AS 'MODULE_PATHNAME', 'alter_cstore_table_reset';
COMMENT ON FUNCTION pg_catalog.alter_cstore_table_reset(
table_name regclass,
block_row_count bool,
stripe_row_count bool,
compression bool)
IS 'reset on or more options on a cstore table to the system defaults';

View File

@ -0,0 +1,15 @@
CREATE OR REPLACE FUNCTION pg_catalog.alter_cstore_table_reset(
table_name regclass,
block_row_count bool DEFAULT false,
stripe_row_count bool DEFAULT false,
compression bool DEFAULT false)
RETURNS void
LANGUAGE C
AS 'MODULE_PATHNAME', 'alter_cstore_table_reset';
COMMENT ON FUNCTION pg_catalog.alter_cstore_table_reset(
table_name regclass,
block_row_count bool,
stripe_row_count bool,
compression bool)
IS 'reset on or more options on a cstore table to the system defaults';

View File

@ -0,0 +1,15 @@
CREATE OR REPLACE FUNCTION pg_catalog.alter_cstore_table_set(
table_name regclass,
block_row_count int DEFAULT NULL,
stripe_row_count int DEFAULT NULL,
compression name DEFAULT null)
RETURNS void
LANGUAGE C
AS 'MODULE_PATHNAME', 'alter_cstore_table_set';
COMMENT ON FUNCTION pg_catalog.alter_cstore_table_set(
table_name regclass,
block_row_count int,
stripe_row_count int,
compression name)
IS 'set one or more options on a cstore table, when set to NULL no change is made';

View File

@ -0,0 +1,15 @@
CREATE OR REPLACE FUNCTION pg_catalog.alter_cstore_table_set(
table_name regclass,
block_row_count int DEFAULT NULL,
stripe_row_count int DEFAULT NULL,
compression name DEFAULT null)
RETURNS void
LANGUAGE C
AS 'MODULE_PATHNAME', 'alter_cstore_table_set';
COMMENT ON FUNCTION pg_catalog.alter_cstore_table_set(
table_name regclass,
block_row_count int,
stripe_row_count int,
compression name)
IS 'set one or more options on a cstore table, when set to NULL no change is made';

View File

@ -0,0 +1,47 @@
-- citus_internal.cstore_ensure_objects_exist is an internal helper function to create
-- missing objects, anything related to the table access methods.
-- Since the API for table access methods is only available in PG12 we can't create these
-- objects when Citus is installed in PG11. Once citus is installed on PG11 the user can
-- upgrade their database to PG12. Now they require the table access method objects that
-- we couldn't create before.
-- This internal function is called from `citus_finish_pg_upgrade` which the user is
-- required to call after a PG major upgrade.
CREATE OR REPLACE FUNCTION citus_internal.cstore_ensure_objects_exist()
RETURNS void
LANGUAGE plpgsql
SET search_path = pg_catalog
AS $ceoe$
BEGIN
-- when postgres is version 12 or above we need to create the tableam. If the tableam
-- exist we assume all objects have been created.
IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN
IF NOT EXISTS (SELECT 1 FROM pg_am WHERE amname = 'cstore_tableam') THEN
#include "../cstore_tableam_handler/10.0-1.sql"
#include "../alter_cstore_table_set/10.0-1.sql"
#include "../alter_cstore_table_reset/10.0-1.sql"
-- add the missing objects to the extension
ALTER EXTENSION citus ADD FUNCTION cstore.cstore_tableam_handler(internal);
ALTER EXTENSION citus ADD ACCESS METHOD cstore_tableam;
ALTER EXTENSION citus ADD FUNCTION pg_catalog.alter_cstore_table_set(
table_name regclass,
block_row_count int,
stripe_row_count int,
compression name);
ALTER EXTENSION citus ADD FUNCTION pg_catalog.alter_cstore_table_reset(
table_name regclass,
block_row_count bool,
stripe_row_count bool,
compression bool);
END IF;
END IF;
END;
$ceoe$;
COMMENT ON FUNCTION citus_internal.cstore_ensure_objects_exist()
IS 'internal function to be called by pg_catalog.citus_finish_pg_upgrade responsible for creating the columnar objects';

View File

@ -0,0 +1,47 @@
-- citus_internal.cstore_ensure_objects_exist is an internal helper function to create
-- missing objects, anything related to the table access methods.
-- Since the API for table access methods is only available in PG12 we can't create these
-- objects when Citus is installed in PG11. Once citus is installed on PG11 the user can
-- upgrade their database to PG12. Now they require the table access method objects that
-- we couldn't create before.
-- This internal function is called from `citus_finish_pg_upgrade` which the user is
-- required to call after a PG major upgrade.
CREATE OR REPLACE FUNCTION citus_internal.cstore_ensure_objects_exist()
RETURNS void
LANGUAGE plpgsql
SET search_path = pg_catalog
AS $ceoe$
BEGIN
-- when postgres is version 12 or above we need to create the tableam. If the tableam
-- exist we assume all objects have been created.
IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN
IF NOT EXISTS (SELECT 1 FROM pg_am WHERE amname = 'cstore_tableam') THEN
#include "../cstore_tableam_handler/10.0-1.sql"
#include "../alter_cstore_table_set/10.0-1.sql"
#include "../alter_cstore_table_reset/10.0-1.sql"
-- add the missing objects to the extension
ALTER EXTENSION citus ADD FUNCTION cstore.cstore_tableam_handler(internal);
ALTER EXTENSION citus ADD ACCESS METHOD cstore_tableam;
ALTER EXTENSION citus ADD FUNCTION pg_catalog.alter_cstore_table_set(
table_name regclass,
block_row_count int,
stripe_row_count int,
compression name);
ALTER EXTENSION citus ADD FUNCTION pg_catalog.alter_cstore_table_reset(
table_name regclass,
block_row_count bool,
stripe_row_count bool,
compression bool);
END IF;
END IF;
END;
$ceoe$;
COMMENT ON FUNCTION citus_internal.cstore_ensure_objects_exist()
IS 'internal function to be called by pg_catalog.citus_finish_pg_upgrade responsible for creating the columnar objects';

View File

@ -0,0 +1,9 @@
CREATE OR REPLACE FUNCTION cstore.cstore_tableam_handler(internal)
RETURNS table_am_handler
LANGUAGE C
AS 'MODULE_PATHNAME', 'cstore_tableam_handler';
COMMENT ON FUNCTION cstore.cstore_tableam_handler(internal)
IS 'internal function returning the handler for cstore tables';
CREATE ACCESS METHOD cstore_tableam TYPE TABLE HANDLER cstore.cstore_tableam_handler;

View File

@ -0,0 +1,9 @@
CREATE OR REPLACE FUNCTION cstore.cstore_tableam_handler(internal)
RETURNS table_am_handler
LANGUAGE C
AS 'MODULE_PATHNAME', 'cstore_tableam_handler';
COMMENT ON FUNCTION cstore.cstore_tableam_handler(internal)
IS 'internal function returning the handler for cstore tables';
CREATE ACCESS METHOD cstore_tableam TYPE TABLE HANDLER cstore.cstore_tableam_handler;

View File

@ -0,0 +1,384 @@
#include "citus_version.h"
#if HAS_TABLEAM
#include "postgres.h"
#include <math.h>
#include "miscadmin.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/multixact.h"
#include "access/rewriteheap.h"
#include "access/tsmapi.h"
#if PG_VERSION_NUM >= 130000
#include "access/heaptoast.h"
#else
#include "access/tuptoaster.h"
#endif
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/index.h"
#include "catalog/objectaccess.h"
#include "catalog/pg_am.h"
#include "catalog/pg_trigger.h"
#include "catalog/storage.h"
#include "catalog/storage_xlog.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "executor/executor.h"
#include "nodes/makefuncs.h"
#include "optimizer/plancat.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "storage/bufpage.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "tcop/utility.h"
#include "utils/builtins.h"
#include "utils/pg_rusage.h"
#include "utils/rel.h"
#include "utils/syscache.h"
#include "columnar/cstore.h"
#include "columnar/cstore_customscan.h"
#include "columnar/cstore_tableam.h"
#include "columnar/cstore_version_compat.h"
/*
* Mapping from relfilenode to WriteStateMapEntry. This keeps write state for
* each relation.
*/
static HTAB *WriteStateMap = NULL;
/* memory context for allocating WriteStateMap & all write states */
static MemoryContext WriteStateContext = NULL;
/*
* Each member of the writeStateStack in WriteStateMapEntry. This means that
* we did some inserts in the subtransaction subXid, and the state of those
* inserts is stored at writeState. Those writes can be flushed or unflushed.
*/
typedef struct SubXidWriteState
{
SubTransactionId subXid;
TableWriteState *writeState;
struct SubXidWriteState *next;
} SubXidWriteState;
/*
* An entry in WriteStateMap.
*/
typedef struct WriteStateMapEntry
{
/* key of the entry */
Oid relfilenode;
/*
* If a table is dropped, we set dropped to true and set dropSubXid to the
* id of the subtransaction in which the drop happened.
*/
bool dropped;
SubTransactionId dropSubXid;
/*
* Stack of SubXidWriteState where first element is top of the stack. When
* inserts happen, we look at top of the stack. If top of stack belongs to
* current subtransaction, we forward writes to its writeState. Otherwise,
* we create a new stack entry for current subtransaction and push it to
* the stack, and forward writes to that.
*/
SubXidWriteState *writeStateStack;
} WriteStateMapEntry;
/*
* Memory context reset callback so we reset WriteStateMap to NULL at the end
* of transaction. WriteStateMap is allocated in & WriteStateMap, so its
* leaked reference can cause memory issues.
*/
static MemoryContextCallback cleanupCallback;
static void
CleanupWriteStateMap(void *arg)
{
WriteStateMap = NULL;
WriteStateContext = NULL;
}
TableWriteState *
cstore_init_write_state(RelFileNode relfilenode, TupleDesc tupdesc,
SubTransactionId currentSubXid)
{
bool found;
/*
* If this is the first call in current transaction, allocate the hash
* table.
*/
if (WriteStateMap == NULL)
{
WriteStateContext =
AllocSetContextCreate(
TopTransactionContext,
"Column Store Write State Management Context",
ALLOCSET_DEFAULT_SIZES);
HASHCTL info;
uint32 hashFlags = (HASH_ELEM | HASH_CONTEXT);
memset(&info, 0, sizeof(info));
info.keysize = sizeof(Oid);
info.entrysize = sizeof(WriteStateMapEntry);
info.hcxt = WriteStateContext;
WriteStateMap = hash_create("column store write state map",
64, &info, hashFlags);
cleanupCallback.arg = NULL;
cleanupCallback.func = &CleanupWriteStateMap;
cleanupCallback.next = NULL;
MemoryContextRegisterResetCallback(WriteStateContext, &cleanupCallback);
}
WriteStateMapEntry *hashEntry = hash_search(WriteStateMap, &relfilenode.relNode,
HASH_ENTER, &found);
if (!found)
{
hashEntry->writeStateStack = NULL;
hashEntry->dropped = false;
}
Assert(!hashEntry->dropped);
/*
* If top of stack belongs to the current subtransaction, return its
* writeState, ...
*/
if (hashEntry->writeStateStack != NULL)
{
SubXidWriteState *stackHead = hashEntry->writeStateStack;
if (stackHead->subXid == currentSubXid)
{
return stackHead->writeState;
}
}
/*
* ... otherwise we need to create a new stack entry for the current
* subtransaction.
*/
MemoryContext oldContext = MemoryContextSwitchTo(WriteStateContext);
CStoreOptions *cstoreOptions = CStoreTableAMGetOptions(relfilenode.relNode);
SubXidWriteState *stackEntry = palloc0(sizeof(SubXidWriteState));
stackEntry->writeState = CStoreBeginWrite(relfilenode,
cstoreOptions->compressionType,
cstoreOptions->stripeRowCount,
cstoreOptions->blockRowCount,
tupdesc);
stackEntry->subXid = currentSubXid;
stackEntry->next = hashEntry->writeStateStack;
hashEntry->writeStateStack = stackEntry;
MemoryContextSwitchTo(oldContext);
return stackEntry->writeState;
}
/*
* Flushes pending writes for given relfilenode in the given subtransaction.
*/
void
FlushWriteStateForRelfilenode(Oid relfilenode, SubTransactionId currentSubXid)
{
WriteStateMapEntry *entry;
bool found = false;
if (WriteStateMap)
{
entry = hash_search(WriteStateMap, &relfilenode, HASH_FIND, &found);
}
Assert(!found || !entry->dropped);
if (found && entry->writeStateStack != NULL)
{
SubXidWriteState *stackEntry = entry->writeStateStack;
if (stackEntry->subXid == currentSubXid)
{
CStoreFlushPendingWrites(stackEntry->writeState);
}
}
}
/*
* Helper function for FlushWriteStateForAllRels and DiscardWriteStateForAllRels.
* Pops all of write states for current subtransaction, and depending on "commit"
* either flushes them or discards them. This also takes into account dropped
* tables, and either propagates the dropped flag to parent subtransaction or
* rolls back abort.
*/
static void
PopWriteStateForAllRels(SubTransactionId currentSubXid, SubTransactionId parentSubXid,
bool commit)
{
HASH_SEQ_STATUS status;
WriteStateMapEntry *entry;
if (WriteStateMap == NULL)
{
return;
}
hash_seq_init(&status, WriteStateMap);
while ((entry = hash_seq_search(&status)) != 0)
{
if (entry->writeStateStack == NULL)
{
continue;
}
/*
* If the table has been dropped in current subtransaction, either
* commit the drop or roll it back.
*/
if (entry->dropped)
{
if (entry->dropSubXid == currentSubXid)
{
if (commit)
{
/* elevate drop to the upper subtransaction */
entry->dropSubXid = parentSubXid;
}
else
{
/* abort the drop */
entry->dropped = false;
}
}
}
/*
* Otherwise, commit or discard pending writes.
*/
else
{
SubXidWriteState *stackHead = entry->writeStateStack;
if (stackHead->subXid == currentSubXid)
{
if (commit)
{
CStoreEndWrite(stackHead->writeState);
}
entry->writeStateStack = stackHead->next;
}
}
}
}
/*
* Called when current subtransaction is committed.
*/
void
FlushWriteStateForAllRels(SubTransactionId currentSubXid, SubTransactionId parentSubXid)
{
PopWriteStateForAllRels(currentSubXid, parentSubXid, true);
}
/*
* Called when current subtransaction is aborted.
*/
void
DiscardWriteStateForAllRels(SubTransactionId currentSubXid, SubTransactionId parentSubXid)
{
PopWriteStateForAllRels(currentSubXid, parentSubXid, false);
}
/*
* Called when the given relfilenode is dropped.
*/
void
MarkRelfilenodeDropped(Oid relfilenode, SubTransactionId currentSubXid)
{
bool found = false;
if (WriteStateMap == NULL)
{
return;
}
WriteStateMapEntry *entry = hash_search(WriteStateMap, &relfilenode, HASH_FIND,
&found);
if (!found || entry->dropped)
{
return;
}
entry->dropped = true;
entry->dropSubXid = currentSubXid;
}
/*
* Called when the given relfilenode is dropped in non-transactional TRUNCATE.
*/
void
NonTransactionDropWriteState(Oid relfilenode)
{
if (WriteStateMap)
{
hash_search(WriteStateMap, &relfilenode, HASH_REMOVE, false);
}
}
/*
* Returns true if there are any pending writes in upper transactions.
*/
bool
PendingWritesInUpperTransactions(Oid relfilenode, SubTransactionId currentSubXid)
{
WriteStateMapEntry *entry;
bool found = false;
if (WriteStateMap)
{
entry = hash_search(WriteStateMap, &relfilenode, HASH_FIND, &found);
}
if (found && entry->writeStateStack != NULL)
{
SubXidWriteState *stackEntry = entry->writeStateStack;
while (stackEntry != NULL)
{
if (stackEntry->subXid != currentSubXid &&
ContainsPendingWrites(stackEntry->writeState))
{
return true;
}
stackEntry = stackEntry->next;
}
}
return false;
}
#endif

View File

@ -19,6 +19,10 @@ DATA_built = $(generated_sql_files)
# directories with source files # directories with source files
SUBDIRS = . commands connection ddl deparser executor metadata operations planner progress relay safeclib test transaction utils worker SUBDIRS = . commands connection ddl deparser executor metadata operations planner progress relay safeclib test transaction utils worker
# columnar modules
SUBDIRS += ../columnar
# enterprise modules
SUBDIRS +=
# Symlinks are not copied over to the build directory if a separete build # Symlinks are not copied over to the build directory if a separete build
# directory is used during configure (such as on CI) # directory is used during configure (such as on CI)

View File

@ -1,6 +1,6 @@
# Citus extension # Citus extension
comment = 'Citus distributed database' comment = 'Citus distributed database'
default_version = '9.5-1' default_version = '10.0-1'
module_pathname = '$libdir/citus' module_pathname = '$libdir/citus'
relocatable = false relocatable = false
schema = pg_catalog schema = pg_catalog

View File

@ -1560,31 +1560,46 @@ UndistributeTable(Oid relationId)
Relation relation = try_relation_open(relationId, ExclusiveLock); Relation relation = try_relation_open(relationId, ExclusiveLock);
if (relation == NULL) if (relation == NULL)
{ {
ereport(ERROR, (errmsg("Cannot undistribute table"), ereport(ERROR, (errmsg("cannot undistribute table"),
errdetail("No such distributed table exists. " errdetail("because no such distributed table exists")));
"Might have already been undistributed.")));
} }
relation_close(relation, NoLock); relation_close(relation, NoLock);
if (!IsCitusTable(relationId)) if (!IsCitusTable(relationId))
{ {
ereport(ERROR, (errmsg("Cannot undistribute table."), ereport(ERROR, (errmsg("cannot undistribute table "),
errdetail("The table is not distributed."))); errdetail("because the table is not distributed")));
} }
if (TableReferencing(relationId)) if (TableReferencing(relationId))
{ {
ereport(ERROR, (errmsg("Cannot undistribute table " ereport(ERROR, (errmsg("cannot undistribute table "
"because it has a foreign key."))); "because it has a foreign key")));
} }
if (TableReferenced(relationId)) if (TableReferenced(relationId))
{ {
ereport(ERROR, (errmsg("Cannot undistribute table " ereport(ERROR, (errmsg("cannot undistribute table "
"because a foreign key references to it."))); "because a foreign key references to it")));
} }
char relationKind = get_rel_relkind(relationId);
if (relationKind == RELKIND_FOREIGN_TABLE)
{
ereport(ERROR, (errmsg("cannot undistribute table "
"because it is a foreign table")));
}
if (PartitionTable(relationId))
{
Oid parentRelationId = PartitionParentOid(relationId);
char *parentRelationName = get_rel_name(parentRelationId);
ereport(ERROR, (errmsg("cannot undistribute table "
"because it is a partition"),
errhint("undistribute the partitioned table \"%s\" instead",
parentRelationName)));
}
List *preLoadCommands = GetPreLoadTableCreationCommands(relationId, true); List *preLoadCommands = GetPreLoadTableCreationCommands(relationId, true);
List *postLoadCommands = GetPostLoadTableCreationCommands(relationId); List *postLoadCommands = GetPostLoadTableCreationCommands(relationId);
@ -1604,7 +1619,7 @@ UndistributeTable(Oid relationId)
if (PartitionedTable(relationId)) if (PartitionedTable(relationId))
{ {
ereport(NOTICE, (errmsg("Undistributing the partitions of %s", ereport(NOTICE, (errmsg("undistributing the partitions of %s",
quote_qualified_identifier(schemaName, relationName)))); quote_qualified_identifier(schemaName, relationName))));
List *partitionList = PartitionList(relationId); List *partitionList = PartitionList(relationId);
Oid partitionRelationId = InvalidOid; Oid partitionRelationId = InvalidOid;
@ -1635,7 +1650,7 @@ UndistributeTable(Oid relationId)
char *tableCreationCommand = NULL; char *tableCreationCommand = NULL;
ereport(NOTICE, (errmsg("Creating a new local table for %s", ereport(NOTICE, (errmsg("creating a new local table for %s",
quote_qualified_identifier(schemaName, relationName)))); quote_qualified_identifier(schemaName, relationName))));
foreach_ptr(tableCreationCommand, preLoadCommands) foreach_ptr(tableCreationCommand, preLoadCommands)

View File

@ -7763,6 +7763,7 @@ pg_get_triggerdef_worker(Oid trigid, bool pretty)
context.wrapColumn = WRAP_COLUMN_DEFAULT; context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = PRETTYINDENT_STD; context.indentLevel = PRETTYINDENT_STD;
context.special_exprkind = EXPR_KIND_NONE; context.special_exprkind = EXPR_KIND_NONE;
context.appendparents = NULL;
get_rule_expr(qual, &context, false); get_rule_expr(qual, &context, false);

View File

@ -1097,7 +1097,7 @@ GetDependingViews(Oid relationId)
List *dependingViews = NIL; List *dependingViews = NIL;
List *nodeQueue = list_make1(tableNode); List *nodeQueue = list_make1(tableNode);
ViewDependencyNode *node = NULL; ViewDependencyNode *node = NULL;
foreach_ptr(node, nodeQueue) foreach_ptr_append(node, nodeQueue)
{ {
ViewDependencyNode *dependingNode = NULL; ViewDependencyNode *dependingNode = NULL;
foreach_ptr(dependingNode, node->dependingNodes) foreach_ptr(dependingNode, node->dependingNodes)

View File

@ -1804,7 +1804,6 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
MemoryContext oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext); MemoryContext oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext);
bool distributedTable = IsCitusTable(rte->relid); bool distributedTable = IsCitusTable(rte->relid);
bool localTable = !distributedTable;
RelationRestriction *relationRestriction = palloc0(sizeof(RelationRestriction)); RelationRestriction *relationRestriction = palloc0(sizeof(RelationRestriction));
relationRestriction->index = restrictionIndex; relationRestriction->index = restrictionIndex;
@ -1813,15 +1812,12 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
relationRestriction->relOptInfo = relOptInfo; relationRestriction->relOptInfo = relOptInfo;
relationRestriction->distributedRelation = distributedTable; relationRestriction->distributedRelation = distributedTable;
relationRestriction->plannerInfo = root; relationRestriction->plannerInfo = root;
relationRestriction->prunedShardIntervalList = NIL;
/* see comments on GetVarFromAssignedParam() */ /* see comments on GetVarFromAssignedParam() */
relationRestriction->outerPlanParamsList = OuterPlanParamsList(root); relationRestriction->outerPlanParamsList = OuterPlanParamsList(root);
RelationRestrictionContext *relationRestrictionContext = RelationRestrictionContext *relationRestrictionContext =
plannerRestrictionContext->relationRestrictionContext; plannerRestrictionContext->relationRestrictionContext;
relationRestrictionContext->hasDistributedRelation |= distributedTable;
relationRestrictionContext->hasLocalRelation |= localTable;
/* /*
* We're also keeping track of whether all participant * We're also keeping track of whether all participant
@ -2296,11 +2292,40 @@ GetRTEListProperties(List *rangeTableList)
RangeTblEntry *rangeTableEntry = NULL; RangeTblEntry *rangeTableEntry = NULL;
foreach_ptr(rangeTableEntry, rangeTableList) foreach_ptr(rangeTableEntry, rangeTableList)
{ {
if (!(rangeTableEntry->rtekind == RTE_RELATION && if (rangeTableEntry->rtekind != RTE_RELATION)
rangeTableEntry->relkind == RELKIND_RELATION))
{ {
continue; continue;
} }
else if (rangeTableEntry->relkind == RELKIND_VIEW)
{
/*
* Skip over views, distributed tables within (regular) views are
* already in rangeTableList.
*/
continue;
}
if (rangeTableEntry->relkind == RELKIND_MATVIEW)
{
/*
* Record materialized views as they are similar to postgres local tables
* but it is nice to record them separately.
*
* Regular tables, partitioned tables or foreign tables can be a local or
* distributed tables and we can qualify them accurately.
*
* For regular views, we don't care because their definitions are already
* in the same query tree and we can detect what is inside the view definition.
*
* For materialized views, they are just local tables in the queries. But, when
* REFRESH MATERIALIZED VIEW is used, they behave similar to regular views, adds
* the view definition to the query. Hence, it is useful to record it seperately
* and let the callers decide on what to do.
*/
rteListProperties->hasMaterializedView = true;
continue;
}
Oid relationId = rangeTableEntry->relid; Oid relationId = rangeTableEntry->relid;
CitusTableCacheEntry *cacheEntry = LookupCitusTableCacheEntry(relationId); CitusTableCacheEntry *cacheEntry = LookupCitusTableCacheEntry(relationId);

View File

@ -312,8 +312,7 @@ NodeTryGetRteRelid(Node *node)
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) node; RangeTblEntry *rangeTableEntry = (RangeTblEntry *) node;
if (!(rangeTableEntry->rtekind == RTE_RELATION && if (rangeTableEntry->rtekind != RTE_RELATION)
rangeTableEntry->relkind == RELKIND_RELATION))
{ {
return InvalidOid; return InvalidOid;
} }

View File

@ -518,14 +518,13 @@ static DeferredErrorMessage *
ModifyPartialQuerySupported(Query *queryTree, bool multiShardQuery, ModifyPartialQuerySupported(Query *queryTree, bool multiShardQuery,
Oid *distributedTableIdOutput) Oid *distributedTableIdOutput)
{ {
DeferredErrorMessage *deferredError = DeferredErrorMessage *deferredError = DeferErrorIfModifyView(queryTree);
DeferErrorIfUnsupportedModifyQueryWithLocalTable(queryTree);
if (deferredError != NULL) if (deferredError != NULL)
{ {
return deferredError; return deferredError;
} }
deferredError = DeferErrorIfModifyView(queryTree); deferredError = DeferErrorIfUnsupportedModifyQueryWithLocalTable(queryTree);
if (deferredError != NULL) if (deferredError != NULL)
{ {
return deferredError; return deferredError;
@ -2154,8 +2153,6 @@ PlanRouterQuery(Query *originalQuery,
bool replacePrunedQueryWithDummy, bool *multiShardModifyQuery, bool replacePrunedQueryWithDummy, bool *multiShardModifyQuery,
Const **partitionValueConst) Const **partitionValueConst)
{ {
RelationRestrictionContext *relationRestrictionContext =
plannerRestrictionContext->relationRestrictionContext;
bool isMultiShardQuery = false; bool isMultiShardQuery = false;
DeferredErrorMessage *planningError = NULL; DeferredErrorMessage *planningError = NULL;
bool shardsPresent = false; bool shardsPresent = false;
@ -2268,13 +2265,15 @@ PlanRouterQuery(Query *originalQuery,
/* we need anchor shard id for select queries with router planner */ /* we need anchor shard id for select queries with router planner */
uint64 shardId = GetAnchorShardId(*prunedShardIntervalListList); uint64 shardId = GetAnchorShardId(*prunedShardIntervalListList);
bool hasLocalRelation = relationRestrictionContext->hasLocalRelation; /* both Postgres tables and materialized tables are locally avaliable */
RTEListProperties *rteProperties = GetRTEListPropertiesForQuery(originalQuery);
bool hasPostgresLocalRelation =
rteProperties->hasPostgresLocalTable || rteProperties->hasMaterializedView;
List *taskPlacementList = List *taskPlacementList =
CreateTaskPlacementListForShardIntervals(*prunedShardIntervalListList, CreateTaskPlacementListForShardIntervals(*prunedShardIntervalListList,
shardsPresent, shardsPresent,
replacePrunedQueryWithDummy, replacePrunedQueryWithDummy,
hasLocalRelation); hasPostgresLocalRelation);
if (taskPlacementList == NIL) if (taskPlacementList == NIL)
{ {
planningError = DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, planningError = DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
@ -2652,8 +2651,6 @@ TargetShardIntervalsForRestrictInfo(RelationRestrictionContext *restrictionConte
List *joinInfoList = relationRestriction->relOptInfo->joininfo; List *joinInfoList = relationRestriction->relOptInfo->joininfo;
List *pseudoRestrictionList = extract_actual_clauses(joinInfoList, true); List *pseudoRestrictionList = extract_actual_clauses(joinInfoList, true);
relationRestriction->prunedShardIntervalList = NIL;
/* /*
* Queries may have contradiction clauses like 'false', or '1=0' in * Queries may have contradiction clauses like 'false', or '1=0' in
* their filters. Such queries would have pseudo constant 'false' * their filters. Such queries would have pseudo constant 'false'
@ -2683,7 +2680,6 @@ TargetShardIntervalsForRestrictInfo(RelationRestrictionContext *restrictionConte
} }
} }
relationRestriction->prunedShardIntervalList = prunedShardIntervalList;
prunedShardIntervalListList = lappend(prunedShardIntervalListList, prunedShardIntervalListList = lappend(prunedShardIntervalListList,
prunedShardIntervalList); prunedShardIntervalList);
} }
@ -3555,8 +3551,6 @@ CopyRelationRestrictionContext(RelationRestrictionContext *oldContext)
(RelationRestrictionContext *) palloc(sizeof(RelationRestrictionContext)); (RelationRestrictionContext *) palloc(sizeof(RelationRestrictionContext));
ListCell *relationRestrictionCell = NULL; ListCell *relationRestrictionCell = NULL;
newContext->hasDistributedRelation = oldContext->hasDistributedRelation;
newContext->hasLocalRelation = oldContext->hasLocalRelation;
newContext->allReferenceTables = oldContext->allReferenceTables; newContext->allReferenceTables = oldContext->allReferenceTables;
newContext->relationRestrictionList = NIL; newContext->relationRestrictionList = NIL;
@ -3584,7 +3578,6 @@ CopyRelationRestrictionContext(RelationRestrictionContext *oldContext)
/* not copyable, but readonly */ /* not copyable, but readonly */
newRestriction->plannerInfo = oldRestriction->plannerInfo; newRestriction->plannerInfo = oldRestriction->plannerInfo;
newRestriction->prunedShardIntervalList = oldRestriction->prunedShardIntervalList;
newContext->relationRestrictionList = newContext->relationRestrictionList =
lappend(newContext->relationRestrictionList, newRestriction); lappend(newContext->relationRestrictionList, newRestriction);

View File

@ -1820,10 +1820,6 @@ FilterPlannerRestrictionForQuery(PlannerRestrictionContext *plannerRestrictionCo
filteredRelationRestrictionContext->allReferenceTables = filteredRelationRestrictionContext->allReferenceTables =
(totalRelationCount == referenceRelationCount); (totalRelationCount == referenceRelationCount);
/* we currently don't support local relations and we cannot come up to this point */
filteredRelationRestrictionContext->hasLocalRelation = false;
filteredRelationRestrictionContext->hasDistributedRelation = true;
/* finally set the relation and join restriction contexts */ /* finally set the relation and join restriction contexts */
filteredPlannerRestrictionContext->relationRestrictionContext = filteredPlannerRestrictionContext->relationRestrictionContext =
filteredRelationRestrictionContext; filteredRelationRestrictionContext;

View File

@ -486,6 +486,7 @@ PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList,
if (IsLoggableLevel(DEBUG3)) if (IsLoggableLevel(DEBUG3))
{ {
char *relationName = get_rel_name(relationId);
if (foundRestriction && debugLoggedPruningInstances != NIL) if (foundRestriction && debugLoggedPruningInstances != NIL)
{ {
List *deparseCtx = deparse_context_for("unknown", relationId); List *deparseCtx = deparse_context_for("unknown", relationId);
@ -497,10 +498,12 @@ PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList,
} }
else else
{ {
ereport(DEBUG3, (errmsg("no valid constraints found"))); ereport(DEBUG3, (errmsg("no sharding pruning constraints on %s found",
relationName)));
} }
ereport(DEBUG3, (errmsg("shard count: %d", list_length(prunedList)))); ereport(DEBUG3, (errmsg("shard count after pruning for %s: %d", relationName,
list_length(prunedList))));
} }
/* if requested, copy the partition value constant */ /* if requested, copy the partition value constant */

View File

@ -84,6 +84,8 @@
#include "utils/guc_tables.h" #include "utils/guc_tables.h"
#include "utils/varlena.h" #include "utils/varlena.h"
#include "columnar/mod.h"
/* marks shared object as one loadable by the postgres version compiled against */ /* marks shared object as one loadable by the postgres version compiled against */
PG_MODULE_MAGIC; PG_MODULE_MAGIC;
@ -92,6 +94,7 @@ static char *CitusVersion = CITUS_VERSION;
void _PG_init(void); void _PG_init(void);
void _PG_fini(void);
static void DoInitialCleanup(void); static void DoInitialCleanup(void);
static void ResizeStackToMaximumDepth(void); static void ResizeStackToMaximumDepth(void);
@ -311,6 +314,15 @@ _PG_init(void)
{ {
DoInitialCleanup(); DoInitialCleanup();
} }
columnar_init();
}
/* shared library deconstruction function */
void
_PG_fini(void)
{
columnar_fini();
} }

View File

@ -0,0 +1,7 @@
-- citus--9.5-1--10.0-1
-- bump version to 10.0-1
#include "udfs/citus_finish_pg_upgrade/10.0-1.sql"
#include "../../columnar/sql/columnar--9.5-1--10.0-1.sql"

View File

@ -0,0 +1,6 @@
-- citus--10.0-1--9.5-1
-- this is an empty downgrade path since citus--9.5-1--10.0-1.sql is empty for now
#include "../udfs/citus_finish_pg_upgrade/9.5-1.sql"
#include "../../../columnar/sql/downgrades/columnar--10.0-1--9.5-1.sql"

View File

@ -0,0 +1,116 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade()
RETURNS void
LANGUAGE plpgsql
SET search_path = pg_catalog
AS $cppu$
DECLARE
table_name regclass;
command text;
trigger_name text;
BEGIN
--
-- restore citus catalog tables
--
INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition;
INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;
INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node;
INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group;
INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction;
INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation;
-- enterprise catalog tables
INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo;
INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo;
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_enterprise_check_trigger;
INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT
name,
default_strategy,
shard_cost_function::regprocedure::regproc,
node_capacity_function::regprocedure::regproc,
shard_allowed_on_node_function::regprocedure::regproc,
default_threshold,
minimum_threshold
FROM public.pg_dist_rebalance_strategy;
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_enterprise_check_trigger;
--
-- drop backup tables
--
DROP TABLE public.pg_dist_authinfo;
DROP TABLE public.pg_dist_colocation;
DROP TABLE public.pg_dist_local_group;
DROP TABLE public.pg_dist_node;
DROP TABLE public.pg_dist_node_metadata;
DROP TABLE public.pg_dist_partition;
DROP TABLE public.pg_dist_placement;
DROP TABLE public.pg_dist_poolinfo;
DROP TABLE public.pg_dist_shard;
DROP TABLE public.pg_dist_transaction;
DROP TABLE public.pg_dist_rebalance_strategy;
--
-- reset sequences
--
PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false);
PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false);
PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false);
PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false);
PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false);
--
-- register triggers
--
FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition
LOOP
trigger_name := 'truncate_trigger_' || table_name::oid;
command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()';
EXECUTE command;
command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name);
EXECUTE command;
END LOOP;
--
-- set dependencies
--
INSERT INTO pg_depend
SELECT
'pg_class'::regclass::oid as classid,
p.logicalrelid::regclass::oid as objid,
0 as objsubid,
'pg_extension'::regclass::oid as refclassid,
(select oid from pg_extension where extname = 'citus') as refobjid,
0 as refobjsubid ,
'n' as deptype
FROM pg_catalog.pg_dist_partition p;
-- restore pg_dist_object from the stable identifiers
-- DELETE/INSERT to avoid primary key violations
WITH old_records AS (
DELETE FROM
citus.pg_dist_object
RETURNING
type,
object_names,
object_args,
distribution_argument_index,
colocationid
)
INSERT INTO citus.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid)
SELECT
address.classid,
address.objid,
address.objsubid,
naming.distribution_argument_index,
naming.colocationid
FROM
old_records naming,
pg_get_object_address(naming.type, naming.object_names, naming.object_args) address;
PERFORM citus_internal.cstore_ensure_objects_exist();
END;
$cppu$;
COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade()
IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade';

View File

@ -107,6 +107,8 @@ BEGIN
FROM FROM
old_records naming, old_records naming,
pg_get_object_address(naming.type, naming.object_names, naming.object_args) address; pg_get_object_address(naming.type, naming.object_names, naming.object_args) address;
PERFORM citus_internal.cstore_ensure_objects_exist();
END; END;
$cppu$; $cppu$;

View File

@ -31,6 +31,9 @@
/* A string containing the version number, platform, and C compiler */ /* A string containing the version number, platform, and C compiler */
#undef CITUS_VERSION_STR #undef CITUS_VERSION_STR
/* Define to 1 to build with table access method support, pg12 and up */
#undef HAS_TABLEAM
/* Define to 1 if you have the <inttypes.h> header file. */ /* Define to 1 if you have the <inttypes.h> header file. */
#undef HAVE_INTTYPES_H #undef HAVE_INTTYPES_H

View File

@ -26,3 +26,6 @@
/* Base URL for statistics collection and update checks */ /* Base URL for statistics collection and update checks */
#undef REPORTS_BASE_URL #undef REPORTS_BASE_URL
/* columnar table access method capability */
#undef HAS_TABLEAM

View File

@ -0,0 +1,372 @@
/*-------------------------------------------------------------------------
*
* cstore.h
*
* Type and function declarations for CStore
*
* Copyright (c) 2016, Citus Data, Inc.
*
* $Id$
*
*-------------------------------------------------------------------------
*/
#ifndef CSTORE_H
#define CSTORE_H
#include "postgres.h"
#include "fmgr.h"
#include "lib/stringinfo.h"
#include "nodes/parsenodes.h"
#include "storage/bufpage.h"
#include "storage/lockdefs.h"
#include "storage/relfilenode.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
/* Defines for valid option names */
#define OPTION_NAME_COMPRESSION_TYPE "compression"
#define OPTION_NAME_STRIPE_ROW_COUNT "stripe_row_count"
#define OPTION_NAME_BLOCK_ROW_COUNT "block_row_count"
/* Limits for option parameters */
#define STRIPE_ROW_COUNT_MINIMUM 1000
#define STRIPE_ROW_COUNT_MAXIMUM 10000000
#define BLOCK_ROW_COUNT_MINIMUM 1000
#define BLOCK_ROW_COUNT_MAXIMUM 100000
/* String representations of compression types */
#define COMPRESSION_STRING_NONE "none"
#define COMPRESSION_STRING_PG_LZ "pglz"
/* CStore file signature */
#define CSTORE_MAGIC_NUMBER "citus_cstore"
#define CSTORE_VERSION_MAJOR 1
#define CSTORE_VERSION_MINOR 7
/* miscellaneous defines */
#define CSTORE_FDW_NAME "cstore_fdw"
#define CSTORE_TUPLE_COST_MULTIPLIER 10
#define CSTORE_POSTSCRIPT_SIZE_LENGTH 1
#define CSTORE_POSTSCRIPT_SIZE_MAX 256
/* Enumaration for cstore file's compression method */
typedef enum
{
COMPRESSION_TYPE_INVALID = -1,
COMPRESSION_NONE = 0,
COMPRESSION_PG_LZ = 1,
COMPRESSION_COUNT
} CompressionType;
/*
* CStoreFdwOptions holds the option values to be used when reading or writing
* a cstore file. To resolve these values, we first check foreign table's options,
* and if not present, we then fall back to the default values specified above.
*/
typedef struct CStoreOptions
{
CompressionType compressionType;
uint64 stripeRowCount;
uint32 blockRowCount;
} CStoreOptions;
/*
* StripeMetadata represents information about a stripe. This information is
* stored in the cstore file's footer.
*/
typedef struct StripeMetadata
{
uint64 fileOffset;
uint64 dataLength;
uint32 columnCount;
uint32 blockCount;
uint32 blockRowCount;
uint64 rowCount;
uint64 id;
} StripeMetadata;
/* DataFileMetadata represents the metadata of a cstore file. */
typedef struct DataFileMetadata
{
List *stripeMetadataList;
uint64 blockRowCount;
uint64 stripeRowCount;
CompressionType compression;
} DataFileMetadata;
/* ColumnBlockSkipNode contains statistics for a ColumnBlockData. */
typedef struct ColumnBlockSkipNode
{
/* statistics about values of a column block */
bool hasMinMax;
Datum minimumValue;
Datum maximumValue;
uint64 rowCount;
/*
* Offsets and sizes of value and exists streams in the column data.
* These enable us to skip reading suppressed row blocks, and start reading
* a block without reading previous blocks.
*/
uint64 valueBlockOffset;
uint64 valueLength;
uint64 existsBlockOffset;
uint64 existsLength;
CompressionType valueCompressionType;
} ColumnBlockSkipNode;
/*
* StripeSkipList can be used for skipping row blocks. It contains a column block
* skip node for each block of each column. blockSkipNodeArray[column][block]
* is the entry for the specified column block.
*/
typedef struct StripeSkipList
{
ColumnBlockSkipNode **blockSkipNodeArray;
uint32 columnCount;
uint32 blockCount;
} StripeSkipList;
/*
* BlockData represents a block of data for multiple columns. valueArray stores
* the values of data, and existsArray stores whether a value is present.
* valueBuffer is used to store (uncompressed) serialized values
* referenced by Datum's in valueArray. It is only used for by-reference Datum's.
* There is a one-to-one correspondence between valueArray and existsArray.
*/
typedef struct BlockData
{
uint32 rowCount;
uint32 columnCount;
/*
* Following are indexed by [column][row]. If a column is not projected,
* then existsArray[column] and valueArray[column] are NULL.
*/
bool **existsArray;
Datum **valueArray;
/* valueBuffer keeps actual data for type-by-reference datums from valueArray. */
StringInfo *valueBufferArray;
} BlockData;
/*
* ColumnBlockBuffers represents a block of serialized data in a column.
* valueBuffer stores the serialized values of data, and existsBuffer stores
* serialized value of presence information. valueCompressionType contains
* compression type if valueBuffer is compressed. Finally rowCount has
* the number of rows in this block.
*/
typedef struct ColumnBlockBuffers
{
StringInfo existsBuffer;
StringInfo valueBuffer;
CompressionType valueCompressionType;
} ColumnBlockBuffers;
/*
* ColumnBuffers represents data buffers for a column in a row stripe. Each
* column is made of multiple column blocks.
*/
typedef struct ColumnBuffers
{
ColumnBlockBuffers **blockBuffersArray;
} ColumnBuffers;
/* StripeBuffers represents data for a row stripe in a cstore file. */
typedef struct StripeBuffers
{
uint32 columnCount;
uint32 rowCount;
ColumnBuffers **columnBuffersArray;
} StripeBuffers;
/* TableReadState represents state of a cstore file read operation. */
typedef struct TableReadState
{
DataFileMetadata *datafileMetadata;
StripeMetadata *currentStripeMetadata;
TupleDesc tupleDescriptor;
Relation relation;
/*
* List of Var pointers for columns in the query. We use this both for
* getting vector of projected columns, and also when we want to build
* base constraint to find selected row blocks.
*/
List *projectedColumnList;
List *whereClauseList;
MemoryContext stripeReadContext;
StripeBuffers *stripeBuffers;
uint32 readStripeCount;
uint64 stripeReadRowCount;
BlockData *blockData;
int32 deserializedBlockIndex;
} TableReadState;
/* TableWriteState represents state of a cstore file write operation. */
typedef struct TableWriteState
{
CompressionType compressionType;
TupleDesc tupleDescriptor;
FmgrInfo **comparisonFunctionArray;
RelFileNode relfilenode;
MemoryContext stripeWriteContext;
StripeBuffers *stripeBuffers;
StripeSkipList *stripeSkipList;
uint32 stripeMaxRowCount;
uint32 blockRowCount;
BlockData *blockData;
/*
* compressionBuffer buffer is used as temporary storage during
* data value compression operation. It is kept here to minimize
* memory allocations. It lives in stripeWriteContext and gets
* deallocated when memory context is reset.
*/
StringInfo compressionBuffer;
} TableWriteState;
extern int cstore_compression;
extern int cstore_stripe_row_count;
extern int cstore_block_row_count;
extern void cstore_init(void);
extern CompressionType ParseCompressionType(const char *compressionTypeString);
/* Function declarations for writing to a cstore file */
extern TableWriteState * CStoreBeginWrite(RelFileNode relfilenode,
CompressionType compressionType,
uint64 stripeMaxRowCount,
uint32 blockRowCount,
TupleDesc tupleDescriptor);
extern void CStoreWriteRow(TableWriteState *state, Datum *columnValues,
bool *columnNulls);
extern void CStoreFlushPendingWrites(TableWriteState *state);
extern void CStoreEndWrite(TableWriteState *state);
extern bool ContainsPendingWrites(TableWriteState *state);
/* Function declarations for reading from a cstore file */
extern TableReadState * CStoreBeginRead(Relation relation,
TupleDesc tupleDescriptor,
List *projectedColumnList, List *qualConditions);
extern bool CStoreReadFinished(TableReadState *state);
extern bool CStoreReadNextRow(TableReadState *state, Datum *columnValues,
bool *columnNulls);
extern void CStoreRescan(TableReadState *readState);
extern void CStoreEndRead(TableReadState *state);
/* Function declarations for common functions */
extern FmgrInfo * GetFunctionInfoOrNull(Oid typeId, Oid accessMethodId,
int16 procedureId);
extern BlockData * CreateEmptyBlockData(uint32 columnCount, bool *columnMask,
uint32 blockRowCount);
extern void FreeBlockData(BlockData *blockData);
extern uint64 CStoreTableRowCount(Relation relation);
extern bool CompressBuffer(StringInfo inputBuffer, StringInfo outputBuffer,
CompressionType compressionType);
extern StringInfo DecompressBuffer(StringInfo buffer, CompressionType compressionType);
extern char * CompressionTypeStr(CompressionType type);
extern CStoreOptions * CStoreTableAMGetOptions(Oid relfilenode);
/* cstore_metadata_tables.c */
extern void DeleteDataFileMetadataRowIfExists(Oid relfilenode);
extern void InitCStoreDataFileMetadata(Oid relfilenode, int blockRowCount, int
stripeRowCount, CompressionType compression);
extern void UpdateCStoreDataFileMetadata(Oid relfilenode, int blockRowCount, int
stripeRowCount, CompressionType compression);
extern DataFileMetadata * ReadDataFileMetadata(Oid relfilenode, bool missingOk);
extern uint64 GetHighestUsedAddress(Oid relfilenode);
extern StripeMetadata ReserveStripe(Relation rel, uint64 size,
uint64 rowCount, uint64 columnCount,
uint64 blockCount, uint64 blockRowCount);
extern void SaveStripeSkipList(Oid relfilenode, uint64 stripe,
StripeSkipList *stripeSkipList,
TupleDesc tupleDescriptor);
extern StripeSkipList * ReadStripeSkipList(Oid relfilenode, uint64 stripe,
TupleDesc tupleDescriptor,
uint32 blockCount);
/* write_state_management.c */
extern TableWriteState * cstore_init_write_state(RelFileNode relfilenode, TupleDesc
tupdesc,
SubTransactionId currentSubXid);
extern void FlushWriteStateForRelfilenode(Oid relfilenode, SubTransactionId
currentSubXid);
extern void FlushWriteStateForAllRels(SubTransactionId currentSubXid, SubTransactionId
parentSubXid);
extern void DiscardWriteStateForAllRels(SubTransactionId currentSubXid, SubTransactionId
parentSubXid);
extern void MarkRelfilenodeDropped(Oid relfilenode, SubTransactionId currentSubXid);
extern void NonTransactionDropWriteState(Oid relfilenode);
extern bool PendingWritesInUpperTransactions(Oid relfilenode,
SubTransactionId currentSubXid);
typedef struct SmgrAddr
{
BlockNumber blockno;
uint32 offset;
} SmgrAddr;
/*
* Map logical offsets (as tracked in the metadata) to a physical page and
* offset where the data is kept.
*/
static inline SmgrAddr
logical_to_smgr(uint64 logicalOffset)
{
uint64 bytes_per_page = BLCKSZ - SizeOfPageHeaderData;
SmgrAddr addr;
addr.blockno = logicalOffset / bytes_per_page;
addr.offset = SizeOfPageHeaderData + (logicalOffset % bytes_per_page);
return addr;
}
/*
* Map a physical page adnd offset address to a logical address.
*/
static inline uint64
smgr_to_logical(SmgrAddr addr)
{
uint64 bytes_per_page = BLCKSZ - SizeOfPageHeaderData;
return bytes_per_page * addr.blockno + addr.offset - SizeOfPageHeaderData;
}
/*
* Get the first usable address of next block.
*/
static inline SmgrAddr
next_block_start(SmgrAddr addr)
{
SmgrAddr result = {
.blockno = addr.blockno + 1,
.offset = SizeOfPageHeaderData
};
return result;
}
#endif /* CSTORE_H */

View File

@ -0,0 +1,19 @@
/*-------------------------------------------------------------------------
*
* cstore_customscan.h
*
* Forward declarations of functions to hookup the custom scan feature of
* cstore.
*
* $Id$
*
*-------------------------------------------------------------------------
*/
#ifndef CSTORE_FDW_CSTORE_CUSTOMSCAN_H
#define CSTORE_FDW_CSTORE_CUSTOMSCAN_H
void cstore_customscan_init(void);
#endif /*CSTORE_FDW_CSTORE_CUSTOMSCAN_H */

View File

@ -0,0 +1,35 @@
/*-------------------------------------------------------------------------
*
* cstore_fdw.h
*
* Type and function declarations for CStore foreign data wrapper.
*
* Copyright (c) 2016, Citus Data, Inc.
*
* $Id$
*
*-------------------------------------------------------------------------
*/
#ifndef CSTORE_FDW_H
#define CSTORE_FDW_H
#include "postgres.h"
#include "fmgr.h"
void cstore_fdw_init(void);
void cstore_fdw_finish(void);
/* event trigger function declarations */
extern Datum cstore_ddl_event_end_trigger(PG_FUNCTION_ARGS);
/* Function declarations for utility UDFs */
extern Datum cstore_table_size(PG_FUNCTION_ARGS);
extern Datum cstore_clean_table_resources(PG_FUNCTION_ARGS);
/* Function declarations for foreign data wrapper */
extern Datum cstore_fdw_handler(PG_FUNCTION_ARGS);
extern Datum cstore_fdw_validator(PG_FUNCTION_ARGS);
#endif /* CSTORE_FDW_H */

View File

@ -0,0 +1,19 @@
#include "citus_version.h"
#if HAS_TABLEAM
#include "postgres.h"
#include "fmgr.h"
#include "access/tableam.h"
#include "access/skey.h"
#include "nodes/bitmapset.h"
const TableAmRoutine * GetCstoreTableAmRoutine(void);
extern void cstore_tableam_init(void);
extern void cstore_tableam_finish(void);
extern TableScanDesc cstore_beginscan_extended(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key,
ParallelTableScanDesc parallel_scan,
uint32 flags, Bitmapset *attr_needed,
List *scanQual);
#endif

View File

@ -0,0 +1,65 @@
/*-------------------------------------------------------------------------
*
* cstore_version_compat.h
*
* Compatibility macros for writing code agnostic to PostgreSQL versions
*
* Copyright (c) 2018, Citus Data, Inc.
*
* $Id$
*
*-------------------------------------------------------------------------
*/
#ifndef CSTORE_COMPAT_H
#define CSTORE_COMPAT_H
#if PG_VERSION_NUM < 100000
/* Accessor for the i'th attribute of tupdesc. */
#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)])
#endif
#if PG_VERSION_NUM < 110000
#define ALLOCSET_DEFAULT_SIZES ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, \
ALLOCSET_DEFAULT_MAXSIZE
#define ACLCHECK_OBJECT_TABLE ACL_KIND_CLASS
#else
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
#define ExplainPropertyLong(qlabel, value, es) \
ExplainPropertyInteger(qlabel, NULL, value, es)
#endif
#if PG_VERSION_NUM >= 130000
#define CALL_PREVIOUS_UTILITY() \
PreviousProcessUtilityHook(plannedStatement, queryString, context, paramListInfo, \
queryEnvironment, destReceiver, queryCompletion)
#elif PG_VERSION_NUM >= 100000
#define CALL_PREVIOUS_UTILITY() \
PreviousProcessUtilityHook(plannedStatement, queryString, context, paramListInfo, \
queryEnvironment, destReceiver, completionTag)
#else
#define CALL_PREVIOUS_UTILITY() \
PreviousProcessUtilityHook(parseTree, queryString, context, paramListInfo, \
destReceiver, completionTag)
#endif
#if PG_VERSION_NUM < 120000
#define TTS_EMPTY(slot) ((slot)->tts_isempty)
#define ExecForceStoreHeapTuple(tuple, slot, shouldFree) \
ExecStoreTuple(newTuple, tupleSlot, InvalidBuffer, shouldFree);
#define TableScanDesc HeapScanDesc
#define table_beginscan heap_beginscan
#define table_endscan heap_endscan
#endif
#if PG_VERSION_NUM >= 130000
#define heap_open table_open
#define heap_openrv table_openrv
#define heap_close table_close
#endif
#endif /* CSTORE_COMPAT_H */

View File

@ -0,0 +1,21 @@
/*-------------------------------------------------------------------------
*
* mod.h
*
* Type and function declarations for CStore
*
* Copyright (c) 2016, Citus Data, Inc.
*
* $Id$
*
*-------------------------------------------------------------------------
*/
#ifndef MOD_H
#define MOD_H
/* Function declarations for extension loading and unloading */
extern void columnar_init(void);
extern void columnar_fini(void);
#endif /* MOD_H */

View File

@ -40,8 +40,6 @@ extern int PlannerLevel;
typedef struct RelationRestrictionContext typedef struct RelationRestrictionContext
{ {
bool hasDistributedRelation;
bool hasLocalRelation;
bool allReferenceTables; bool allReferenceTables;
List *relationRestrictionList; List *relationRestrictionList;
} RelationRestrictionContext; } RelationRestrictionContext;
@ -66,7 +64,6 @@ typedef struct RelationRestriction
RangeTblEntry *rte; RangeTblEntry *rte;
RelOptInfo *relOptInfo; RelOptInfo *relOptInfo;
PlannerInfo *plannerInfo; PlannerInfo *plannerInfo;
List *prunedShardIntervalList;
/* list of RootPlanParams for all outer nodes */ /* list of RootPlanParams for all outer nodes */
List *outerPlanParamsList; List *outerPlanParamsList;
@ -148,8 +145,10 @@ typedef struct RTEListProperties
/* includes hash, append and range partitioned tables */ /* includes hash, append and range partitioned tables */
bool hasDistributedTable; bool hasDistributedTable;
/* union of above three */ /* union of hasReferenceTable, hasCitusLocalTable and hasDistributedTable */
bool hasCitusTable; bool hasCitusTable;
bool hasMaterializedView;
} RTEListProperties; } RTEListProperties;

View File

@ -80,6 +80,38 @@ typedef struct ListCellAndListWrapper
(((var) = lfirst_oid(var ## CellDoNotUse)) || true); \ (((var) = lfirst_oid(var ## CellDoNotUse)) || true); \
var ## CellDoNotUse = lnext_compat(l, var ## CellDoNotUse)) var ## CellDoNotUse = lnext_compat(l, var ## CellDoNotUse))
/*
* foreach_ptr_append -
* a convenience macro which loops through a pointer List and can append list
* elements without needing a ListCell or and index variable, just a declared
* pointer variable to store the iterated values.
*
* PostgreSQL 13 changed the representation of Lists to expansible arrays,
* not chains of cons-cells. This changes the costs for accessing and
* mutating List contents. Therefore different implementations are provided.
*
* For more information, see postgres commit with sha
* 1cff1b95ab6ddae32faa3efe0d95a820dbfdc164
*/
#if PG_VERSION_NUM >= PG_VERSION_13
/*
* How it works:
* - An index is declared with the name {var}PositionDoNotUse and used
* throughout the for loop using ## to concat.
* - To assign to var it needs to be done in the condition of the for loop,
* because we cannot use the initializer since the index variable is
* declared there.
* - || true is used to always enter the loop even if var is NULL.
*/
#define foreach_ptr_append(var, l) \
for (int var ## PositionDoNotUse = 0; \
(var ## PositionDoNotUse) < list_length(l) && \
(((var) = list_nth(l, var ## PositionDoNotUse)) || true); \
var ## PositionDoNotUse ++)
#else
#define foreach_ptr_append(var, l) foreach_ptr(var, l)
#endif
/* utility functions declaration shared within this module */ /* utility functions declaration shared within this module */
extern List * SortList(List *pointerList, extern List * SortList(List *pointerList,

View File

@ -161,6 +161,25 @@ check-follower-cluster: all
$(pg_regress_multi_check) --load-extension=citus --follower-cluster \ $(pg_regress_multi_check) --load-extension=citus --follower-cluster \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_follower_schedule $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_follower_schedule $(EXTRA_TESTS)
COLUMNAR_SCHEDULES =
COLUMNAR_ISOLATION_SCHEDULES =
# even though we always add the fdw schedules, keep them separate from the declaration
# above for easy removabl when fdw support is removed
COLUMNAR_SCHEDULES += columnar_fdw_schedule
COLUMNAR_ISOLATION_SCHEDULES += columnar_fdw_isolation_schedule
ifeq ($(HAS_TABLEAM),yes)
COLUMNAR_SCHEDULES += columnar_am_schedule
COLUMNAR_ISOLATION_SCHEDULES += columnar_am_isolation_schedule
endif
check-columnar:
$(pg_regress_multi_check) --load-extension=citus \
-- $(MULTI_REGRESS_OPTS) $(addprefix --schedule=$(citus_abs_srcdir)/,$(COLUMNAR_SCHEDULES)) $(EXTRA_TESTS)
check-columnar-isolation: all $(isolation_test_files)
$(pg_regress_multi_check) --load-extension=citus --isolationtester \
-- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build $(addprefix --schedule=$(citus_abs_srcdir)/,$(COLUMNAR_ISOLATION_SCHEDULES)) $(EXTRA_TESTS)
check-failure: all check-failure: all
$(pg_regress_multi_check) --load-extension=citus --mitmproxy \ $(pg_regress_multi_check) --load-extension=citus --mitmproxy \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/failure_schedule $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/failure_schedule $(EXTRA_TESTS)

View File

@ -4,9 +4,10 @@ url = "https://pypi.python.org/simple"
verify_ssl = true verify_ssl = true
[packages] [packages]
mitmproxy = "==4.0.4" mitmproxy = "==5.3.0"
construct = "==2.9.45" construct = "==2.9.45"
docopt = "==0.6.2" docopt = "==0.6.2"
cryptography = "==3.2.1"
[dev-packages] [dev-packages]

View File

@ -1,7 +1,7 @@
{ {
"_meta": { "_meta": {
"hash": { "hash": {
"sha256": "b275a748e05be7bb653f589f10e279838e2401900af6c211a00edb1212252a1c" "sha256": "f8df9d3f82264315551af66b9cbc76a5e0c5ea656feaef30b3c8235e15d64e43"
}, },
"pipfile-spec": 6, "pipfile-spec": 6,
"requires": { "requires": {
@ -16,12 +16,12 @@
] ]
}, },
"default": { "default": {
"asn1crypto": { "asgiref": {
"hashes": [ "hashes": [
"sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87", "sha256:a5098bc870b80e7b872bff60bb363c7f2c2c89078759f6c47b53ff8c525a152e",
"sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49" "sha256:cd88907ecaec59d78e4ac00ea665b03e571cb37e3a0e37b3702af1a9e86c365a"
], ],
"version": "==0.24.0" "version": "==3.3.0"
}, },
"blinker": { "blinker": {
"hashes": [ "hashes": [
@ -29,88 +29,95 @@
], ],
"version": "==1.4" "version": "==1.4"
}, },
"brotlipy": { "brotli": {
"hashes": [ "hashes": [
"sha256:07194f4768eb62a4f4ea76b6d0df6ade185e24ebd85877c351daa0a069f1111a", "sha256:160c78292e98d21e73a4cc7f76a234390e516afcd982fa17e1422f7c6a9ce9c8",
"sha256:091b299bf36dd6ef7a06570dbc98c0f80a504a56c5b797f31934d2ad01ae7d17", "sha256:16d528a45c2e1909c2798f27f7bf0a3feec1dc9e50948e738b961618e38b6a7b",
"sha256:09ec3e125d16749b31c74f021aba809541b3564e5359f8c265cbae442810b41a", "sha256:1c48472a6ba3b113452355b9af0a60da5c2ae60477f8feda8346f8fd48e3e87c",
"sha256:0be698678a114addcf87a4b9496c552c68a2c99bf93cf8e08f5738b392e82057", "sha256:268fe94547ba25b58ebc724680609c8ee3e5a843202e9a381f6f9c5e8bdb5c70",
"sha256:0fa6088a9a87645d43d7e21e32b4a6bf8f7c3939015a50158c10972aa7f425b7", "sha256:269a5743a393c65db46a7bb982644c67ecba4b8d91b392403ad8a861ba6f495f",
"sha256:1379347337dc3d20b2d61456d44ccce13e0625db2611c368023b4194d5e2477f", "sha256:35a3edbe18e876e596553c4007a087f8bcfd538f19bc116917b3c7522fca0429",
"sha256:1ea4e578241504b58f2456a6c69952c88866c794648bdc74baee74839da61d44", "sha256:3b78a24b5fd13c03ee2b7b86290ed20efdc95da75a3557cc06811764d5ad1126",
"sha256:2699945a0a992c04fc7dc7fa2f1d0575a2c8b4b769f2874a08e8eae46bef36ae", "sha256:40d15c79f42e0a2c72892bf407979febd9cf91f36f495ffb333d1d04cebb34e4",
"sha256:2a80319ae13ea8dd60ecdc4f5ccf6da3ae64787765923256b62c598c5bba4121", "sha256:4d1b810aa0ed773f81dceda2cc7b403d01057458730e309856356d4ef4188438",
"sha256:2e5c64522364a9ebcdf47c5744a5ddeb3f934742d31e61ebfbbc095460b47162", "sha256:503fa6af7da9f4b5780bb7e4cbe0c639b010f12be85d02c99452825dd0feef3f",
"sha256:36def0b859beaf21910157b4c33eb3b06d8ce459c942102f16988cca6ea164df", "sha256:56d027eace784738457437df7331965473f2c0da2c70e1a1f6fdbae5402e0389",
"sha256:3a3e56ced8b15fbbd363380344f70f3b438e0fd1fcf27b7526b6172ea950e867", "sha256:5913a1177fc36e30fcf6dc868ce23b0453952c78c04c266d3149b3d39e1410d6",
"sha256:3c1d5e2cf945a46975bdb11a19257fa057b67591eb232f393d260e7246d9e571", "sha256:5b6ef7d9f9c38292df3690fe3e302b5b530999fa90014853dcd0d6902fb59f26",
"sha256:4e4638b49835d567d447a2cfacec109f9a777f219f071312268b351b6839436d", "sha256:5cb1e18167792d7d21e21365d7650b72d5081ed476123ff7b8cac7f45189c0c7",
"sha256:50ca336374131cfad20612f26cc43c637ac0bfd2be3361495e99270883b52962", "sha256:61a7ee1f13ab913897dac7da44a73c6d44d48a4adff42a5701e3239791c96e14",
"sha256:5de6f7d010b7558f72f4b061a07395c5c3fd57f0285c5af7f126a677b976a868", "sha256:68715970f16b6e92c574c30747c95cf8cf62804569647386ff032195dc89a430",
"sha256:637847560d671657f993313ecc6c6c6666a936b7a925779fd044065c7bc035b9", "sha256:6b2ae9f5f67f89aade1fab0f7fd8f2832501311c363a21579d02defa844d9296",
"sha256:653faef61241bf8bf99d73ca7ec4baa63401ba7b2a2aa88958394869379d67c7", "sha256:6c772d6c0a79ac0f414a9f8947cc407e119b8598de7621f39cacadae3cf57d12",
"sha256:786afc8c9bd67de8d31f46e408a3386331e126829114e4db034f91eacb05396d", "sha256:7cb81373984cc0e4682f31bc3d6be9026006d96eecd07ea49aafb06897746452",
"sha256:79aaf217072840f3e9a3b641cccc51f7fc23037496bd71e26211856b93f4b4cb", "sha256:88c63a1b55f352b02c6ffd24b15ead9fc0e8bf781dbe070213039324922a2eea",
"sha256:7e31f7adcc5851ca06134705fcf3478210da45d35ad75ec181e1ce9ce345bb38", "sha256:93130612b837103e15ac3f9cbacb4613f9e348b58b3aad53721d92e57f96d46a",
"sha256:8b39abc3256c978f575df5cd7893153277216474f303e26f0e43ba3d3969ef96", "sha256:97f715cf371b16ac88b8c19da00029804e20e25f30d80203417255d239f228b5",
"sha256:9448227b0df082e574c45c983fa5cd4bda7bfb11ea6b59def0940c1647be0c3c", "sha256:9d12cf2851759b8de8ca5fde36a59c08210a97ffca0eb94c532ce7b17c6a3d1d",
"sha256:96bc59ff9b5b5552843dc67999486a220e07a0522dddd3935da05dc194fa485c", "sha256:afde17ae04d90fbe53afb628f7f2d4ca022797aa093e809de5c3cf276f61bbfa",
"sha256:a07647886e24e2fb2d68ca8bf3ada398eb56fd8eac46c733d4d95c64d17f743b", "sha256:b663f1e02de5d0573610756398e44c130add0eb9a3fc912a09665332942a2efb",
"sha256:af65d2699cb9f13b26ec3ba09e75e80d31ff422c03675fcb36ee4dabe588fdc2", "sha256:c2415d9d082152460f2bd4e382a1e85aed233abc92db5a3880da2257dc7daf7b",
"sha256:b4c98b0d2c9c7020a524ca5bbff42027db1004c6571f8bc7b747f2b843128e7a", "sha256:c83aa123d56f2e060644427a882a36b3c12db93727ad7a7b9efd7d7f3e9cc2c4",
"sha256:c6cc0036b1304dd0073eec416cb2f6b9e37ac8296afd9e481cac3b1f07f9db25", "sha256:db844eb158a87ccab83e868a762ea8024ae27337fc7ddcbfcddd157f841fdfe7",
"sha256:d2c1c724c4ac375feb2110f1af98ecdc0e5a8ea79d068efb5891f621a5b235cb", "sha256:defed7ea5f218a9f2336301e6fd379f55c655bea65ba2476346340a0ce6f74a1",
"sha256:dc6c5ee0df9732a44d08edab32f8a616b769cc5a4155a12d2d010d248eb3fb07", "sha256:f909bbbc433048b499cb9db9e713b5d8d949e8c109a2a548502fb9aa8630f0b1"
"sha256:fd1d1c64214af5d90014d82cee5d8141b13d44c92ada7a0c0ec0679c6f15a471"
], ],
"version": "==0.7.0" "version": "==1.0.9"
}, },
"certifi": { "certifi": {
"hashes": [ "hashes": [
"sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50", "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3",
"sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef" "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41"
], ],
"version": "==2019.9.11" "version": "==2020.6.20"
}, },
"cffi": { "cffi": {
"hashes": [ "hashes": [
"sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774", "sha256:005f2bfe11b6745d726dbb07ace4d53f057de66e336ff92d61b8c7e9c8f4777d",
"sha256:046ef9a22f5d3eed06334d01b1e836977eeef500d9b78e9ef693f9380ad0b83d", "sha256:09e96138280241bd355cd585148dec04dbbedb4f46128f340d696eaafc82dd7b",
"sha256:066bc4c7895c91812eff46f4b1c285220947d4aa46fa0a2651ff85f2afae9c90", "sha256:0b1ad452cc824665ddc682400b62c9e4f5b64736a2ba99110712fdee5f2505c4",
"sha256:066c7ff148ae33040c01058662d6752fd73fbc8e64787229ea8498c7d7f4041b", "sha256:0ef488305fdce2580c8b2708f22d7785ae222d9825d3094ab073e22e93dfe51f",
"sha256:2444d0c61f03dcd26dbf7600cf64354376ee579acad77aef459e34efcb438c63", "sha256:15f351bed09897fbda218e4db5a3d5c06328862f6198d4fb385f3e14e19decb3",
"sha256:300832850b8f7967e278870c5d51e3819b9aad8f0a2c8dbe39ab11f119237f45", "sha256:22399ff4870fb4c7ef19fff6eeb20a8bbf15571913c181c78cb361024d574579",
"sha256:34c77afe85b6b9e967bd8154e3855e847b70ca42043db6ad17f26899a3df1b25", "sha256:23e5d2040367322824605bc29ae8ee9175200b92cb5483ac7d466927a9b3d537",
"sha256:46de5fa00f7ac09f020729148ff632819649b3e05a007d286242c4882f7b1dc3", "sha256:2791f68edc5749024b4722500e86303a10d342527e1e3bcac47f35fbd25b764e",
"sha256:4aa8ee7ba27c472d429b980c51e714a24f47ca296d53f4d7868075b175866f4b", "sha256:2f9674623ca39c9ebe38afa3da402e9326c245f0f5ceff0623dccdac15023e05",
"sha256:4d0004eb4351e35ed950c14c11e734182591465a33e960a4ab5e8d4f04d72647", "sha256:3363e77a6176afb8823b6e06db78c46dbc4c7813b00a41300a4873b6ba63b171",
"sha256:4e3d3f31a1e202b0f5a35ba3bc4eb41e2fc2b11c1eff38b362de710bcffb5016", "sha256:33c6cdc071ba5cd6d96769c8969a0531be2d08c2628a0143a10a7dcffa9719ca",
"sha256:50bec6d35e6b1aaeb17f7c4e2b9374ebf95a8975d57863546fa83e8d31bdb8c4", "sha256:3b8eaf915ddc0709779889c472e553f0d3e8b7bdf62dab764c8921b09bf94522",
"sha256:55cad9a6df1e2a1d62063f79d0881a414a906a6962bc160ac968cc03ed3efcfb", "sha256:3cb3e1b9ec43256c4e0f8d2837267a70b0e1ca8c4f456685508ae6106b1f504c",
"sha256:5662ad4e4e84f1eaa8efce5da695c5d2e229c563f9d5ce5b0113f71321bcf753", "sha256:3eeeb0405fd145e714f7633a5173318bd88d8bbfc3dd0a5751f8c4f70ae629bc",
"sha256:59b4dc008f98fc6ee2bb4fd7fc786a8d70000d058c2bbe2698275bc53a8d3fa7", "sha256:44f60519595eaca110f248e5017363d751b12782a6f2bd6a7041cba275215f5d",
"sha256:73e1ffefe05e4ccd7bcea61af76f36077b914f92b76f95ccf00b0c1b9186f3f9", "sha256:4d7c26bfc1ea9f92084a1d75e11999e97b62d63128bcc90c3624d07813c52808",
"sha256:a1f0fd46eba2d71ce1589f7e50a9e2ffaeb739fb2c11e8192aa2b45d5f6cc41f", "sha256:529c4ed2e10437c205f38f3691a68be66c39197d01062618c55f74294a4a4828",
"sha256:a2e85dc204556657661051ff4bab75a84e968669765c8a2cd425918699c3d0e8", "sha256:6642f15ad963b5092d65aed022d033c77763515fdc07095208f15d3563003869",
"sha256:a5457d47dfff24882a21492e5815f891c0ca35fefae8aa742c6c263dac16ef1f", "sha256:85ba797e1de5b48aa5a8427b6ba62cf69607c18c5d4eb747604b7302f1ec382d",
"sha256:a8dccd61d52a8dae4a825cdbb7735da530179fea472903eb871a5513b5abbfdc", "sha256:8f0f1e499e4000c4c347a124fa6a27d37608ced4fe9f7d45070563b7c4c370c9",
"sha256:ae61af521ed676cf16ae94f30fe202781a38d7178b6b4ab622e4eec8cefaff42", "sha256:a624fae282e81ad2e4871bdb767e2c914d0539708c0f078b5b355258293c98b0",
"sha256:b012a5edb48288f77a63dba0840c92d0504aa215612da4541b7b42d849bc83a3", "sha256:b0358e6fefc74a16f745afa366acc89f979040e0cbc4eec55ab26ad1f6a9bfbc",
"sha256:d2c5cfa536227f57f97c92ac30c8109688ace8fa4ac086d19d0af47d134e2909", "sha256:bbd2f4dfee1079f76943767fce837ade3087b578aeb9f69aec7857d5bf25db15",
"sha256:d42b5796e20aacc9d15e66befb7a345454eef794fdb0737d1af593447c6c8f45", "sha256:bf39a9e19ce7298f1bd6a9758fa99707e9e5b1ebe5e90f2c3913a47bc548747c",
"sha256:dee54f5d30d775f525894d67b1495625dd9322945e7fee00731952e0368ff42d", "sha256:c11579638288e53fc94ad60022ff1b67865363e730ee41ad5e6f0a17188b327a",
"sha256:e070535507bd6aa07124258171be2ee8dfc19119c28ca94c9dfb7efd23564512", "sha256:c150eaa3dadbb2b5339675b88d4573c1be3cb6f2c33a6c83387e10cc0bf05bd3",
"sha256:e1ff2748c84d97b065cc95429814cdba39bcbd77c9c85c89344b317dc0d9cbff", "sha256:c53af463f4a40de78c58b8b2710ade243c81cbca641e34debf3396a9640d6ec1",
"sha256:ed851c75d1e0e043cbf5ca9a8e1b13c4c90f3fbd863dacb01c0808e2b5204201" "sha256:cb763ceceae04803adcc4e2d80d611ef201c73da32d8f2722e9d0ab0c7f10768",
"sha256:cc75f58cdaf043fe6a7a6c04b3b5a0e694c6a9e24050967747251fb80d7bce0d",
"sha256:d80998ed59176e8cba74028762fbd9b9153b9afc71ea118e63bbf5d4d0f9552b",
"sha256:de31b5164d44ef4943db155b3e8e17929707cac1e5bd2f363e67a56e3af4af6e",
"sha256:e66399cf0fc07de4dce4f588fc25bfe84a6d1285cc544e67987d22663393926d",
"sha256:f0620511387790860b249b9241c2f13c3a80e21a73e0b861a2df24e9d6f56730",
"sha256:f4eae045e6ab2bb54ca279733fe4eb85f1effda392666308250714e01907f394",
"sha256:f92cdecb618e5fa4658aeb97d5eb3d2f47aa94ac6477c6daf0f306c5a3b9e6b1",
"sha256:f92f789e4f9241cd262ad7a555ca2c648a98178a953af117ef7fad46aa1d5591"
], ],
"version": "==1.12.3" "version": "==1.14.3"
}, },
"click": { "click": {
"hashes": [ "hashes": [
"sha256:29f99fc6125fbc931b758dc053b3114e55c77a6e4c6c3a2674a2dc986016381d", "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a",
"sha256:f15516df478d5a56180fbf80e68f206010e6d160fc39fa508b65e035fd75130b" "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"
], ],
"version": "==6.7" "version": "==7.1.2"
}, },
"construct": { "construct": {
"hashes": [ "hashes": [
@ -121,27 +128,39 @@
}, },
"cryptography": { "cryptography": {
"hashes": [ "hashes": [
"sha256:02602e1672b62e803e08617ec286041cc453e8d43f093a5f4162095506bc0beb", "sha256:07ca431b788249af92764e3be9a488aa1d39a0bc3be313d826bbec690417e538",
"sha256:10b48e848e1edb93c1d3b797c83c72b4c387ab0eb4330aaa26da8049a6cbede0", "sha256:13b88a0bd044b4eae1ef40e265d006e34dbcde0c2f1e15eb9896501b2d8f6c6f",
"sha256:17db09db9d7c5de130023657be42689d1a5f60502a14f6f745f6f65a6b8195c0", "sha256:32434673d8505b42c0de4de86da8c1620651abd24afe91ae0335597683ed1b77",
"sha256:227da3a896df1106b1a69b1e319dce218fa04395e8cc78be7e31ca94c21254bc", "sha256:3cd75a683b15576cfc822c7c5742b3276e50b21a06672dc3a800a2d5da4ecd1b",
"sha256:2cbaa03ac677db6c821dac3f4cdfd1461a32d0615847eedbb0df54bb7802e1f7", "sha256:4e7268a0ca14536fecfdf2b00297d4e407da904718658c1ff1961c713f90fd33",
"sha256:31db8febfc768e4b4bd826750a70c79c99ea423f4697d1dab764eb9f9f849519", "sha256:545a8550782dda68f8cdc75a6e3bf252017aa8f75f19f5a9ca940772fc0cb56e",
"sha256:4a510d268e55e2e067715d728e4ca6cd26a8e9f1f3d174faf88e6f2cb6b6c395", "sha256:55d0b896631412b6f0c7de56e12eb3e261ac347fbaa5d5e705291a9016e5f8cb",
"sha256:6a88d9004310a198c474d8a822ee96a6dd6c01efe66facdf17cb692512ae5bc0", "sha256:5849d59358547bf789ee7e0d7a9036b2d29e9a4ddf1ce5e06bb45634f995c53e",
"sha256:76936ec70a9b72eb8c58314c38c55a0336a2b36de0c7ee8fb874a4547cadbd39", "sha256:6dc59630ecce8c1f558277ceb212c751d6730bd12c80ea96b4ac65637c4f55e7",
"sha256:7e3b4aecc4040928efa8a7cdaf074e868af32c58ffc9bb77e7bf2c1a16783286", "sha256:7117319b44ed1842c617d0a452383a5a052ec6aa726dfbaffa8b94c910444297",
"sha256:8168bcb08403ef144ff1fb880d416f49e2728101d02aaadfe9645883222c0aa5", "sha256:75e8e6684cf0034f6bf2a97095cb95f81537b12b36a8fedf06e73050bb171c2d",
"sha256:8229ceb79a1792823d87779959184a1bf95768e9248c93ae9f97c7a2f60376a1", "sha256:7b8d9d8d3a9bd240f453342981f765346c87ade811519f98664519696f8e6ab7",
"sha256:8a19e9f2fe69f6a44a5c156968d9fc8df56d09798d0c6a34ccc373bb186cee86", "sha256:a035a10686532b0587d58a606004aa20ad895c60c4d029afa245802347fab57b",
"sha256:8d10113ca826a4c29d5b85b2c4e045ffa8bad74fb525ee0eceb1d38d4c70dfd6", "sha256:a4e27ed0b2504195f855b52052eadcc9795c59909c9d84314c5408687f933fc7",
"sha256:be495b8ec5a939a7605274b6e59fbc35e76f5ad814ae010eb679529671c9e119", "sha256:a733671100cd26d816eed39507e585c156e4498293a907029969234e5e634bc4",
"sha256:dc2d3f3b1548f4d11786616cf0f4415e25b0fbecb8a1d2cd8c07568f13fdde38", "sha256:a75f306a16d9f9afebfbedc41c8c2351d8e61e818ba6b4c40815e2b5740bb6b8",
"sha256:e4aecdd9d5a3d06c337894c9a6e2961898d3f64fe54ca920a72234a3de0f9cb3", "sha256:bd717aa029217b8ef94a7d21632a3bb5a4e7218a4513d2521c2a2fd63011e98b",
"sha256:e79ab4485b99eacb2166f3212218dd858258f374855e1568f728462b0e6ee0d9", "sha256:d25cecbac20713a7c3bc544372d42d8eafa89799f492a43b79e1dfd650484851",
"sha256:f995d3667301e1754c57b04e0bae6f0fa9d710697a9f8d6712e8cca02550910f" "sha256:d26a2557d8f9122f9bf445fc7034242f4375bd4e95ecda007667540270965b13",
"sha256:d3545829ab42a66b84a9aaabf216a4dce7f16dbc76eb69be5c302ed6b8f4a29b",
"sha256:d3d5e10be0cf2a12214ddee45c6bd203dab435e3d83b4560c03066eda600bfe3",
"sha256:efe15aca4f64f3a7ea0c09c87826490e50ed166ce67368a68f315ea0807a20df"
], ],
"version": "==2.3.1" "index": "pypi",
"version": "==3.2.1"
},
"dataclasses": {
"hashes": [
"sha256:3459118f7ede7c8bea0fe795bff7c6c2ce287d01dd226202f7c9ebc0610a7836",
"sha256:494a6dcae3b8bcf80848eea2ef64c0cc5cd307ffc263e17cdf42f3e5420808e6"
],
"markers": "python_version < '3.7'",
"version": "==0.7"
}, },
"docopt": { "docopt": {
"hashes": [ "hashes": [
@ -150,162 +169,378 @@
"index": "pypi", "index": "pypi",
"version": "==0.6.2" "version": "==0.6.2"
}, },
"flask": {
"hashes": [
"sha256:4efa1ae2d7c9865af48986de8aeb8504bf32c7f3d6fdc9353d34b21f4b127060",
"sha256:8a4fdd8936eba2512e9c85df320a37e694c93945b33ef33c89946a340a238557"
],
"version": "==1.1.2"
},
"h11": { "h11": {
"hashes": [ "hashes": [
"sha256:1c0fbb1cba6f809fe3e6b27f8f6d517ca171f848922708871403636143d530d9", "sha256:3c6c61d69c6f13d41f1b80ab0322f1872702a3ba26e12aa864c928f6a43fbaab",
"sha256:af77d5d82fa027c032650fb8afdef3cd0a3735ba01480bee908cddad9be1bdce" "sha256:ab6c335e1b6ef34b205d5ca3e228c9299cc7218b049819ec84a388c2525e5d87"
], ],
"version": "==0.7.0" "version": "==0.11.0"
}, },
"h2": { "h2": {
"hashes": [ "hashes": [
"sha256:ac377fcf586314ef3177bfd90c12c7826ab0840edeb03f0f24f511858326049e", "sha256:ac9e293a1990b339d5d71b19c5fe630e3dd4d768c620d1730d355485323f1b25",
"sha256:b8a32bd282594424c0ac55845377eea13fa54fe4a8db012f3a198ed923dc3ab4" "sha256:bb7ac7099dd67a857ed52c815a6192b6b1f5ba6b516237fc24a085341340593d"
], ],
"version": "==3.1.1" "markers": "python_version >= '3.6.0'",
"version": "==4.0.0"
}, },
"hpack": { "hpack": {
"hashes": [ "hashes": [
"sha256:0edd79eda27a53ba5be2dfabf3b15780928a0dff6eb0c60a3d6767720e970c89", "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c",
"sha256:8eec9c1f4bfae3408a3f30500261f7e6a65912dc138526ea054f9ad98892e9d2" "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"
], ],
"version": "==3.0.0" "version": "==4.0.0"
}, },
"hyperframe": { "hyperframe": {
"hashes": [ "hashes": [
"sha256:5187962cb16dcc078f23cb5a4b110098d546c3f41ff2d4038a9896893bbd0b40", "sha256:742d2a4bc3152a340a49d59f32e33ec420aa8e7054c1444ef5c7efff255842f1",
"sha256:a9f5c17f2cc3c719b917c4f33ed1c61bd1f8dfac4b1bd23b7c80b3400971b41f" "sha256:a51026b1591cac726fc3d0b7994fbc7dc5efab861ef38503face2930fd7b2d34"
], ],
"version": "==5.2.0" "markers": "python_version >= '3.6.0'",
"version": "==6.0.0"
}, },
"idna": { "itsdangerous": {
"hashes": [ "hashes": [
"sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407", "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19",
"sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c" "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749"
], ],
"version": "==2.8" "version": "==1.1.0"
},
"jinja2": {
"hashes": [
"sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0",
"sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035"
],
"version": "==2.11.2"
}, },
"kaitaistruct": { "kaitaistruct": {
"hashes": [ "hashes": [
"sha256:d1d17c7f6839b3d28fc22b21295f787974786c2201e8788975e72e2a1d109ff5" "sha256:3d5845817ec8a4d5504379cc11bd570b038850ee49c4580bc0998c8fb1d327ad"
], ],
"version": "==0.8" "version": "==0.9"
}, },
"ldap3": { "ldap3": {
"hashes": [ "hashes": [
"sha256:3f67c83185b1f0df8fdf6b52fa42c55bc9e9b7120c8b7fec60f0d6003c536d18", "sha256:37d633e20fa360c302b1263c96fe932d40622d0119f1bddcb829b03462eeeeb7",
"sha256:dd9be8ea27773c4ffc18ede0b95c3ca1eb12513a184590b9f8ae423db3f71eb9" "sha256:7c3738570766f5e5e74a56fade15470f339d5c436d821cf476ef27da0a4de8b0"
], ],
"version": "==2.5.2" "version": "==2.8.1"
},
"markupsafe": {
"hashes": [
"sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473",
"sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161",
"sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235",
"sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5",
"sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42",
"sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff",
"sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b",
"sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1",
"sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e",
"sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183",
"sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66",
"sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b",
"sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1",
"sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15",
"sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1",
"sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e",
"sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b",
"sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905",
"sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735",
"sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d",
"sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e",
"sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d",
"sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c",
"sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21",
"sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2",
"sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5",
"sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b",
"sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6",
"sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f",
"sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f",
"sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2",
"sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7",
"sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be"
],
"version": "==1.1.1"
}, },
"mitmproxy": { "mitmproxy": {
"hashes": [ "hashes": [
"sha256:e74869c7bf4e5b988fbe3a3d0039f430d1e1eeb5927abf2097183a711bf5b312" "sha256:481940365fc08fc2318343e530ef01d35084e8b56d1c61b5e1a7b6ed9b664d24"
], ],
"index": "pypi", "index": "pypi",
"version": "==4.0.4" "version": "==5.3.0"
},
"msgpack": {
"hashes": [
"sha256:002a0d813e1f7b60da599bdf969e632074f9eec1b96cbed8fb0973a63160a408",
"sha256:25b3bc3190f3d9d965b818123b7752c5dfb953f0d774b454fd206c18fe384fb8",
"sha256:271b489499a43af001a2e42f42d876bb98ccaa7e20512ff37ca78c8e12e68f84",
"sha256:39c54fdebf5fa4dda733369012c59e7d085ebdfe35b6cf648f09d16708f1be5d",
"sha256:4233b7f86c1208190c78a525cd3828ca1623359ef48f78a6fea4b91bb995775a",
"sha256:5bea44181fc8e18eed1d0cd76e355073f00ce232ff9653a0ae88cb7d9e643322",
"sha256:5dba6d074fac9b24f29aaf1d2d032306c27f04187651511257e7831733293ec2",
"sha256:7a22c965588baeb07242cb561b63f309db27a07382825fc98aecaf0827c1538e",
"sha256:908944e3f038bca67fcfedb7845c4a257c7749bf9818632586b53bcf06ba4b97",
"sha256:9534d5cc480d4aff720233411a1f765be90885750b07df772380b34c10ecb5c0",
"sha256:aa5c057eab4f40ec47ea6f5a9825846be2ff6bf34102c560bad5cad5a677c5be",
"sha256:b3758dfd3423e358bbb18a7cccd1c74228dffa7a697e5be6cb9535de625c0dbf",
"sha256:c901e8058dd6653307906c5f157f26ed09eb94a850dddd989621098d347926ab",
"sha256:cec8bf10981ed70998d98431cd814db0ecf3384e6b113366e7f36af71a0fca08",
"sha256:db685187a415f51d6b937257474ca72199f393dad89534ebbdd7d7a3b000080e",
"sha256:e35b051077fc2f3ce12e7c6a34cf309680c63a842db3a0616ea6ed25ad20d272",
"sha256:e7bbdd8e2b277b77782f3ce34734b0dfde6cbe94ddb74de8d733d603c7f9e2b1",
"sha256:ea41c9219c597f1d2bf6b374d951d310d58684b5de9dc4bd2976db9e1e22c140"
],
"version": "==1.0.0"
}, },
"passlib": { "passlib": {
"hashes": [ "hashes": [
"sha256:3d948f64138c25633613f303bcc471126eae67c04d5e3f6b7b8ce6242f8653e0", "sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1",
"sha256:43526aea08fa32c6b6dbbbe9963c4c767285b78147b7437597f992812f69d280" "sha256:defd50f72b65c5402ab2c573830a6978e5f202ad0d984793c8dde2c4152ebe04"
], ],
"version": "==1.7.1" "version": "==1.7.4"
},
"protobuf": {
"hashes": [
"sha256:0bba42f439bf45c0f600c3c5993666fcb88e8441d011fad80a11df6f324eef33",
"sha256:1e834076dfef9e585815757a2c7e4560c7ccc5962b9d09f831214c693a91b463",
"sha256:339c3a003e3c797bc84499fa32e0aac83c768e67b3de4a5d7a5a9aa3b0da634c",
"sha256:361acd76f0ad38c6e38f14d08775514fbd241316cce08deb2ce914c7dfa1184a",
"sha256:3dee442884a18c16d023e52e32dd34a8930a889e511af493f6dc7d4d9bf12e4f",
"sha256:4d1174c9ed303070ad59553f435846a2f877598f59f9afc1b89757bdf846f2a7",
"sha256:5db9d3e12b6ede5e601b8d8684a7f9d90581882925c96acf8495957b4f1b204b",
"sha256:6a82e0c8bb2bf58f606040cc5814e07715b2094caeba281e2e7d0b0e2e397db5",
"sha256:8c35bcbed1c0d29b127c886790e9d37e845ffc2725cc1db4bd06d70f4e8359f4",
"sha256:91c2d897da84c62816e2f473ece60ebfeab024a16c1751aaf31100127ccd93ec",
"sha256:9c2e63c1743cba12737169c447374fab3dfeb18111a460a8c1a000e35836b18c",
"sha256:9edfdc679a3669988ec55a989ff62449f670dfa7018df6ad7f04e8dbacb10630",
"sha256:c0c5ab9c4b1eac0a9b838f1e46038c3175a95b0f2d944385884af72876bd6bc7",
"sha256:c8abd7605185836f6f11f97b21200f8a864f9cb078a193fe3c9e235711d3ff1e",
"sha256:d69697acac76d9f250ab745b46c725edf3e98ac24763990b24d58c16c642947a",
"sha256:df3932e1834a64b46ebc262e951cd82c3cf0fa936a154f0a42231140d8237060",
"sha256:e7662437ca1e0c51b93cadb988f9b353fa6b8013c0385d63a70c8a77d84da5f9",
"sha256:f68eb9d03c7d84bd01c790948320b768de8559761897763731294e3bc316decb"
],
"version": "==3.13.0"
},
"publicsuffix2": {
"hashes": [
"sha256:00f8cc31aa8d0d5592a5ced19cccba7de428ebca985db26ac852d920ddd6fe7b",
"sha256:786b5e36205b88758bd3518725ec8cfe7a8173f5269354641f581c6b80a99893"
],
"version": "==2.20191221"
}, },
"pyasn1": { "pyasn1": {
"hashes": [ "hashes": [
"sha256:62cdade8b5530f0b185e09855dd422bc05c0bbff6b72ff61381c09dac7befd8c", "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d",
"sha256:a9495356ca1d66ed197a0f72b41eb1823cf7ea8b5bd07191673e8147aecf8604" "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"
], ],
"version": "==0.4.7" "version": "==0.4.8"
}, },
"pycparser": { "pycparser": {
"hashes": [ "hashes": [
"sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3" "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0",
"sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"
], ],
"version": "==2.19" "version": "==2.20"
}, },
"pyopenssl": { "pyopenssl": {
"hashes": [ "hashes": [
"sha256:26ff56a6b5ecaf3a2a59f132681e2a80afcc76b4f902f612f518f92c2a1bf854", "sha256:621880965a720b8ece2f1b2f54ea2071966ab00e2970ad2ce11d596102063504",
"sha256:6488f1423b00f73b7ad5167885312bb0ce410d3312eb212393795b53c8caa580" "sha256:9a24494b2602aaf402be5c9e30a0b82d4a5c67528fe8fb475e3f3bc00dd69507"
], ],
"version": "==18.0.0" "version": "==19.1.0"
}, },
"pyparsing": { "pyparsing": {
"hashes": [ "hashes": [
"sha256:bc6c7146b91af3f567cf6daeaec360bc07d45ffec4cf5353f4d7a208ce7ca30a", "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:d29593d8ebe7b57d6967b62494f8c72b03ac0262b1eed63826c6f788b3606401" "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
], ],
"version": "==2.2.2" "version": "==2.4.7"
}, },
"pyperclip": { "pyperclip": {
"hashes": [ "hashes": [
"sha256:406bc020d4b8e60d8673876271b815befc4c02fd8d919e4aacc667d69fab99ea" "sha256:9abef1e79ce635eb62309ecae02dfb5a3eb952fa7d6dce09c1aef063f81424d3"
], ],
"version": "==1.6.5" "version": "==1.8.1"
}, },
"ruamel.yaml": { "ruamel.yaml": {
"hashes": [ "hashes": [
"sha256:08aaaa74ff66565024ecabf9ba2db212712382a21c0458f9a91c623a1fa83b34", "sha256:012b9470a0ea06e4e44e99e7920277edf6b46eee0232a04487ea73a7386340a5",
"sha256:23f2efb872d2ebe3d5428b4f1a8f30cbf59f56e780c4981c155411ee65572673", "sha256:076cc0bc34f1966d920a49f18b52b6ad559fbe656a0748e3535cf7b3f29ebf9e"
"sha256:38718e69270141c403b5fc539f774ed394568f8a5195b507991f5b690356facb",
"sha256:44da2be1153e173f90ad8775d4ac4237a3c06cfbb9660c1c1980271621833faa",
"sha256:4b1674a936cdae9735578d4fd64bcbc6cfbb77a1a8f7037a50c6e3874ba4c9d8",
"sha256:51d49c870aca850e652e2cd1c9bea9b52b77d13ad52b0556de496c1d264ea65f",
"sha256:63dc8c6147a4cf77efadf2ae0f34e89e03de79289298bb941b7ae333d5d4020b",
"sha256:6672798c6b52a976a7b24e20665055852388c83198d88029d3c76e2197ac221a",
"sha256:6b6025f9b6a557e15e9fdfda4d9af0b57cd8d59ff98e23a0097ab2d7c0540f07",
"sha256:7b750252e3d1ec5b53d03be508796c04a907060900c7d207280b7456650ebbfc",
"sha256:847177699994f9c31adf78d1ef1ff8f069ef0241e744a3ee8b30fbdaa914cc1e",
"sha256:8e42f3067a59e819935a2926e247170ed93c8f0b2ab64526f888e026854db2e4",
"sha256:922d9e483c05d9000256640026f277fcc0c2e1e9271d05acada8e6cfb4c8b721",
"sha256:92a8ca79f9173cca29ca9663b49d9c936aefc4c8a76f39318b0218c8f3626438",
"sha256:ab8eeca4de4decf0d0a42cb6949d354da9fc70a2d9201f0dd55186c599b2e3a5",
"sha256:bd4b60b649f4a81086f70cd56eff4722018ef36a28094c396f1a53bf450bd579",
"sha256:fc6471ef15b69e454cca82433ac5f84929d9f3e2d72b9e54b06850b6b7133cc0",
"sha256:ffc89770339191acbe5a15041950b5ad9daec7d659619b0ed9dad8c9c80c26f3"
], ],
"version": "==0.15.100" "version": "==0.16.12"
},
"ruamel.yaml.clib": {
"hashes": [
"sha256:058a1cc3df2a8aecc12f983a48bda99315cebf55a3b3a5463e37bb599b05727b",
"sha256:2602e91bd5c1b874d6f93d3086f9830f3e907c543c7672cf293a97c3fabdcd91",
"sha256:28116f204103cb3a108dfd37668f20abe6e3cafd0d3fd40dba126c732457b3cc",
"sha256:2d24bd98af676f4990c4d715bcdc2a60b19c56a3fb3a763164d2d8ca0e806ba7",
"sha256:30dca9bbcbb1cc858717438218d11eafb78666759e5094dd767468c0d577a7e7",
"sha256:44c7b0498c39f27795224438f1a6be6c5352f82cb887bc33d962c3a3acc00df6",
"sha256:464e66a04e740d754170be5e740657a3b3b6d2bcc567f0c3437879a6e6087ff6",
"sha256:4df5019e7783d14b79217ad9c56edf1ba7485d614ad5a385d1b3c768635c81c0",
"sha256:4e52c96ca66de04be42ea2278012a2342d89f5e82b4512fb6fb7134e377e2e62",
"sha256:5254af7d8bdf4d5484c089f929cb7f5bafa59b4f01d4f48adda4be41e6d29f99",
"sha256:52ae5739e4b5d6317b52f5b040b1b6639e8af68a5b8fd606a8b08658fbd0cab5",
"sha256:53b9dd1abd70e257a6e32f934ebc482dac5edb8c93e23deb663eac724c30b026",
"sha256:73b3d43e04cc4b228fa6fa5d796409ece6fcb53a6c270eb2048109cbcbc3b9c2",
"sha256:74161d827407f4db9072011adcfb825b5258a5ccb3d2cd518dd6c9edea9e30f1",
"sha256:839dd72545ef7ba78fd2aa1a5dd07b33696adf3e68fae7f31327161c1093001b",
"sha256:8e8fd0a22c9d92af3a34f91e8a2594eeb35cba90ab643c5e0e643567dc8be43e",
"sha256:a873e4d4954f865dcb60bdc4914af7eaae48fb56b60ed6daa1d6251c72f5337c",
"sha256:ab845f1f51f7eb750a78937be9f79baea4a42c7960f5a94dde34e69f3cce1988",
"sha256:b1e981fe1aff1fd11627f531524826a4dcc1f26c726235a52fcb62ded27d150f",
"sha256:b4b0d31f2052b3f9f9b5327024dc629a253a83d8649d4734ca7f35b60ec3e9e5",
"sha256:c6ac7e45367b1317e56f1461719c853fd6825226f45b835df7436bb04031fd8a",
"sha256:daf21aa33ee9b351f66deed30a3d450ab55c14242cfdfcd377798e2c0d25c9f1",
"sha256:e9f7d1d8c26a6a12c23421061f9022bb62704e38211fe375c645485f38df34a2",
"sha256:f6061a31880c1ed6b6ce341215336e2f3d0c1deccd84957b6fa8ca474b41e89f"
],
"markers": "platform_python_implementation == 'CPython' and python_version < '3.9'",
"version": "==0.2.2"
}, },
"six": { "six": {
"hashes": [ "hashes": [
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c", "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
"sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73" "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
], ],
"version": "==1.12.0" "version": "==1.15.0"
}, },
"sortedcontainers": { "sortedcontainers": {
"hashes": [ "hashes": [
"sha256:220bb2e3e1886297fd7cdd6d164cb5cf237be1cfae1a3a3e526d149c52816682", "sha256:4e73a757831fc3ca4de2859c422564239a31d8213d09a2a666e375807034d2ba",
"sha256:b74f2756fb5e23512572cc76f0fe0832fd86310f77dfee54335a35fb33f6b950" "sha256:c633ebde8580f241f274c1f8994a665c0e54a17724fecd0cae2f079e09c36d3f"
], ],
"version": "==2.0.5" "version": "==2.2.2"
}, },
"tornado": { "tornado": {
"hashes": [ "hashes": [
"sha256:0662d28b1ca9f67108c7e3b77afabfb9c7e87bde174fbda78186ecedc2499a9d", "sha256:0a00ff4561e2929a2c37ce706cb8233b7907e0cdc22eab98888aca5dd3775feb",
"sha256:4e5158d97583502a7e2739951553cbd88a72076f152b4b11b64b9a10c4c49409", "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c",
"sha256:732e836008c708de2e89a31cb2fa6c0e5a70cb60492bee6f1ea1047500feaf7f", "sha256:1e8225a1070cd8eec59a996c43229fe8f95689cb16e552d130b9793cb570a288",
"sha256:8154ec22c450df4e06b35f131adc4f2f3a12ec85981a203301d310abf580500f", "sha256:20241b3cb4f425e971cb0a8e4ffc9b0a861530ae3c52f2b0434e6c1b57e9fd95",
"sha256:8e9d728c4579682e837c92fdd98036bd5cdefa1da2aaf6acf26947e6dd0c01c5", "sha256:25ad220258349a12ae87ede08a7b04aca51237721f63b1808d39bdb4b2164558",
"sha256:d4b3e5329f572f055b587efc57d29bd051589fb5a43ec8898c77a47ec2fa2bbb", "sha256:33892118b165401f291070100d6d09359ca74addda679b60390b09f8ef325ffe",
"sha256:e5f2585afccbff22390cddac29849df463b252b711aa2ce7c5f3f342a5b3b444" "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791",
"sha256:3447475585bae2e77ecb832fc0300c3695516a47d46cefa0528181a34c5b9d3d",
"sha256:34ca2dac9e4d7afb0bed4677512e36a52f09caa6fded70b4e3e1c89dbd92c326",
"sha256:3e63498f680547ed24d2c71e6497f24bca791aca2fe116dbc2bd0ac7f191691b",
"sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4",
"sha256:6196a5c39286cc37c024cd78834fb9345e464525d8991c21e908cc046d1cc02c",
"sha256:61b32d06ae8a036a6607805e6720ef00a3c98207038444ba7fd3d169cd998910",
"sha256:6286efab1ed6e74b7028327365cf7346b1d777d63ab30e21a0f4d5b275fc17d5",
"sha256:65d98939f1a2e74b58839f8c4dab3b6b3c1ce84972ae712be02845e65391ac7c",
"sha256:66324e4e1beede9ac79e60f88de548da58b1f8ab4b2f1354d8375774f997e6c0",
"sha256:6c77c9937962577a6a76917845d06af6ab9197702a42e1346d8ae2e76b5e3675",
"sha256:70dec29e8ac485dbf57481baee40781c63e381bebea080991893cd297742b8fd",
"sha256:7250a3fa399f08ec9cb3f7b1b987955d17e044f1ade821b32e5f435130250d7f",
"sha256:748290bf9112b581c525e6e6d3820621ff020ed95af6f17fedef416b27ed564c",
"sha256:7da13da6f985aab7f6f28debab00c67ff9cbacd588e8477034c0652ac141feea",
"sha256:8f959b26f2634a091bb42241c3ed8d3cedb506e7c27b8dd5c7b9f745318ddbb6",
"sha256:9de9e5188a782be6b1ce866e8a51bc76a0fbaa0e16613823fc38e4fc2556ad05",
"sha256:a48900ecea1cbb71b8c71c620dee15b62f85f7c14189bdeee54966fbd9a0c5bd",
"sha256:b87936fd2c317b6ee08a5741ea06b9d11a6074ef4cc42e031bc6403f82a32575",
"sha256:c77da1263aa361938476f04c4b6c8916001b90b2c2fdd92d8d535e1af48fba5a",
"sha256:cb5ec8eead331e3bb4ce8066cf06d2dfef1bfb1b2a73082dfe8a161301b76e37",
"sha256:cc0ee35043162abbf717b7df924597ade8e5395e7b66d18270116f8745ceb795",
"sha256:d14d30e7f46a0476efb0deb5b61343b1526f73ebb5ed84f23dc794bdb88f9d9f",
"sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32",
"sha256:d3d20ea5782ba63ed13bc2b8c291a053c8d807a8fa927d941bd718468f7b950c",
"sha256:d3f7594930c423fd9f5d1a76bee85a2c36fd8b4b16921cae7e965f22575e9c01",
"sha256:dcef026f608f678c118779cd6591c8af6e9b4155c44e0d1bc0c87c036fb8c8c4",
"sha256:e0791ac58d91ac58f694d8d2957884df8e4e2f6687cdf367ef7eb7497f79eaa2",
"sha256:e385b637ac3acaae8022e7e47dfa7b83d3620e432e3ecb9a3f7f58f150e50921",
"sha256:e519d64089b0876c7b467274468709dadf11e41d65f63bba207e04217f47c085",
"sha256:e7229e60ac41a1202444497ddde70a48d33909e484f96eb0da9baf8dc68541df",
"sha256:ed3ad863b1b40cd1d4bd21e7498329ccaece75db5a5bf58cd3c9f130843e7102",
"sha256:f0ba29bafd8e7e22920567ce0d232c26d4d47c8b5cf4ed7b562b5db39fa199c5",
"sha256:fa2ba70284fa42c2a5ecb35e322e68823288a4251f9ba9cc77be04ae15eada68",
"sha256:fba85b6cd9c39be262fcd23865652920832b61583de2a2ca907dbd8e8a8c81e5"
], ],
"version": "==5.1.1" "version": "==6.1"
}, },
"urwid": { "urwid": {
"hashes": [ "hashes": [
"sha256:644d3e3900867161a2fc9287a9762753d66bd194754679adb26aede559bcccbc" "sha256:588bee9c1cb208d0906a9f73c613d2bd32c3ed3702012f51efe318a3f2127eae"
], ],
"version": "==2.0.1" "version": "==2.1.2"
},
"werkzeug": {
"hashes": [
"sha256:2de2a5db0baeae7b2d2664949077c2ac63fbd16d98da0ff71837f7d1dea3fd43",
"sha256:6c80b1e5ad3665290ea39320b91e1be1e0d5f60652b964a3070216de83d2e47c"
],
"version": "==1.0.1"
}, },
"wsproto": { "wsproto": {
"hashes": [ "hashes": [
"sha256:02f214f6bb43cda62a511e2e8f1d5fa4703ed83d376d18d042bd2bbf2e995824", "sha256:614798c30e5dc2b3f65acc03d2d50842b97621487350ce79a80a711229edfa9d",
"sha256:d2a7f718ab3144ec956a3267d57b5c172f0668827f5803e7d670837b0125b9fa" "sha256:e3d190a11d9307112ba23bbe60055604949b172143969c8f641318476a9b6f1d"
], ],
"version": "==0.11.0" "version": "==0.15.0"
},
"zstandard": {
"hashes": [
"sha256:0646bd506cd1c83b94a5057568cbc7868f656c79ac22d2e19e9d280f64451a0c",
"sha256:0c3ea262cee9c8a624ae22760466a8144c3c2b62da6f2b2671f47d9f74d8315f",
"sha256:22362a1b5bf8693692be1d1609a25159cd67d5ff93200a2978aea815a63739e8",
"sha256:25ec0734f8c2eee8fd140cae3cde0ffc531ab6730be1f48b2b868a409a1a233d",
"sha256:2e66459d260d2332c5044625dc9f50ef883fe4366c15915d4d0deedb3b1dcba6",
"sha256:2f491936999f43301c424aaa9e03461ea218d9bb8574c1672a09260d30a4096e",
"sha256:39339ed8e0351e3a1d9e0792c5a77ac7da2091279dd78f3458d456bdc3cbb25e",
"sha256:3b41598ffc3cb3497bd6019aeeb1a55e272d3106f15d7855339eab92ed7659e8",
"sha256:4286cd5d76c9a2bf7cb9f9065c8f68b12221ddbcfba754577692442dce563995",
"sha256:45a3b64812152bf188044a1170bcaaeaee2175ec5340ea6a6810bf94b088886e",
"sha256:45e96e1b3bcf8f1060fad174938bfc9825f5d864ddc717b3dda1d876ab59eaaf",
"sha256:50f7692f32ebd86b87133f25211850f5025e730f75b364dfaab30e817a7780a1",
"sha256:6525190e90d49e07c88f88ee7cf02e1af76f9bf32a693e8dd6b8a5fe01b65079",
"sha256:68840f8117d087ecb82c2dfb7f32de237261220a569ea93a8bc0afeffb03ab58",
"sha256:68d15b407ac1f18e03fb89c93ade275cca766cb7eff03b26b40fdf9dba100679",
"sha256:754bcb077e2f946868e77670fb59907ac291542a14c836f89716376cd099107c",
"sha256:83f81d7c2e45e65654ea881683e7e597e813a862ba8e0596945de46657fbc285",
"sha256:85f59177e6a3cab285471a0e7ce048d07f6d39080b9766f8eaaf274f979f0afc",
"sha256:86494400d3923917124bd5f50b8e096de1dd7cfd890b164253bcd2283ef19539",
"sha256:8cb4cd3bb2e7213dd09432f8182d9acc8997bcd34fa3be44dffbb3f82d8d6dfd",
"sha256:9052398da52e8702cf9929999c8986b0f68b18c793e309cd8dff5cb7863d7652",
"sha256:9052870eeebbf4787fc9fc20703d16b6c32b4fffa1446045d05c64a8cb34f614",
"sha256:9119a52758dce523e82318433d41bc8053051af6d7dadd2ff3ada24d1cbf28cf",
"sha256:9572d3047579220f950e7fd6af647cc95e361dc671d10ad63215e07f147eec31",
"sha256:9d7d49b2d46233280c0a0d27046ab9321ceae329c4cbe8cffddfebb53dff3da2",
"sha256:a012f237fa5b00708f00e362035c032d1af5536796f9b410e76e61722176f607",
"sha256:a1ea3108dde195f9fb18fe99ee1674f85a99056793d2ea72fb3965eb48a0bd8f",
"sha256:a79db6a7db4ff91e7c5238d020d85aee1f4849ea357236899f9ed1773c5b66b4",
"sha256:a927f60735fcb5c19586c846c5f28da5edf8549142e4dd62ddf4b9579800a23c",
"sha256:ae4cfd9e023702609c59f5535d95d7b19d54d42902514fe4ece8792b65b3a0af",
"sha256:b021d3321107cdeba427a514d4faa35429525192e902e5b6608f346ef5ba5c8a",
"sha256:b3ac3401ae1945f3dab138819f58830fd658410aa2a53583c0a9af3e8809117d",
"sha256:b637e58757a9153ad562b530b82140dad5e505ae14d806b264a0802f343bd5dd",
"sha256:b711ee17b8676f367282ee654b8de750e2dfa2262e2eb07b7178b1524a273d44",
"sha256:b7e51d0d48153ece2db2c4e6bb2a71e781879027201dc7b718b3f27130547410",
"sha256:b8a1986ba41f6cf61f1234779ed492d026f87ab327cc6bf9e82d2e7a3f0b5b9c",
"sha256:c9da20d5e16f246861158b15cc908797ee6ceb5a799c8a3b97fe6c665627f0e5",
"sha256:dd156961934f7869aecfdf68da6f3f0fa48ad01923d64e9662038dff83f314d4",
"sha256:e149711b256fa8facbbce09b503a744c10fc03325742a9399c69c8569f0e9fe8",
"sha256:ece7f7ec03997357d61c44c50e6543123c0b7c2bdedc972b165d6832bf8868ad",
"sha256:ef36cb399ebc0941f68a4d3a675b13ad75a6037270ec3915ee337227b8bfec90",
"sha256:f1bfdbb37ada30bf6a08671a530e46ab24426bfad61efd28e5dc2beeb4f5b78d",
"sha256:f1c25e52e963dbe23a3ebc79ab904705eddcc15e14093fcde5059251090f01a6",
"sha256:f532d4c65c6ed6202b2c8bfc166648ec2c2ec2dc1d0fb06de643e87ce0a222c8",
"sha256:f559281d181c30ba14f0446a9e1a1ea6c4980792d7249bacbc575fcbcebde4b3",
"sha256:f5eccca127169257d8356069d298701fc612b05f6b768aa9ffc6e652c5169bd6",
"sha256:fa660370fe5b5e4f3c3952732aea358540e56e91c9233d55a6b6e508e047b315",
"sha256:fff79a30845c2591718cb8798196d117402b2d5d7506b5f3bb691972731c30b3"
],
"version": "==0.14.0"
} }
}, },
"develop": {} "develop": {}

View File

@ -1 +1 @@
test: upgrade_basic_after upgrade_type_after upgrade_ref2ref_after upgrade_distributed_function_after upgrade_rebalance_strategy_after test: upgrade_basic_after upgrade_type_after upgrade_ref2ref_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects

View File

@ -161,8 +161,26 @@ s/Citus.*currently supports/Citus currently supports/g
s/prepared transaction with identifier .* does not exist/prepared transaction with identifier "citus_x_yyyyyy_zzz_w" does not exist/g s/prepared transaction with identifier .* does not exist/prepared transaction with identifier "citus_x_yyyyyy_zzz_w" does not exist/g
s/failed to roll back prepared transaction '.*'/failed to roll back prepared transaction 'citus_x_yyyyyy_zzz_w'/g s/failed to roll back prepared transaction '.*'/failed to roll back prepared transaction 'citus_x_yyyyyy_zzz_w'/g
# Table aliases for partitioned tables in explain outputs might change
# regardless of postgres appended an _int suffix to alias, we always append _xxx suffix
# Can be removed when we remove support for pg11 and pg12.
# "-> <scanMethod> Scan on <tableName>_<partitionId>_<shardId> <tableName>_<aliasId>" and
# "-> <scanMethod> Scan on <tableName>_<partitionId>_<shardId> <tableName>" becomes
# "-> <scanMethod> Scan on <tableName>_<partitionId>_<shardId> <tableName>_xxx"
s/(->.*Scan on\ +)(.*)(_[0-9]+)(_[0-9]+) \2(_[0-9]+|_xxx)?/\1\2\3\4 \2_xxx/g
# Table aliases for partitioned tables in "Hash Cond:" lines of explain outputs might change
# This is only for multi_partitioning.sql test file
# regardless of postgres appended an _int suffix to alias, we always append _xxx suffix
# Can be removed when we remove support for pg11 and pg12.
s/(partitioning_hash_join_test)(_[0-9]|_xxx)?(\.[a-zA-Z]+)/\1_xxx\3/g
s/(partitioning_hash_test)(_[0-9]|_xxx)?(\.[a-zA-Z]+)/\1_xxx\3/g
# Errors with binary decoding where OIDs should be normalized # Errors with binary decoding where OIDs should be normalized
s/wrong data type: [0-9]+, expected [0-9]+/wrong data type: XXXX, expected XXXX/g s/wrong data type: [0-9]+, expected [0-9]+/wrong data type: XXXX, expected XXXX/g
# Errors with relation OID does not exist # Errors with relation OID does not exist
s/relation with OID [0-9]+ does not exist/relation with OID XXXX does not exist/g s/relation with OID [0-9]+ does not exist/relation with OID XXXX does not exist/g
# ignore event triggers, mainly due to the event trigger for columnar
/^DEBUG: EventTriggerInvoke [0-9]+$/d

View File

@ -0,0 +1,2 @@
test: am_write_concurrency
test: am_vacuum_vs_insert

View File

@ -0,0 +1,21 @@
test: am_create
test: am_load
test: am_query
test: am_analyze
test: am_data_types
test: am_functions
test: am_drop
test: am_insert
test: am_copyto
test: am_alter
test: am_rollback
test: am_truncate
test: am_vacuum
test: am_clean
test: am_block_filtering
test: am_join
test: am_trigger
test: am_tableoptions
test: am_recursive
test: am_transactions
test: am_matview

View File

@ -0,0 +1 @@
# just an empty file now, please remove when we have a test

View File

@ -0,0 +1,14 @@
test: fdw_create
test: fdw_load
test: fdw_query
test: fdw_analyze
test: fdw_data_types
test: fdw_functions
test: fdw_block_filtering
test: fdw_drop
test: fdw_insert
test: fdw_copyto
test: fdw_alter
test: fdw_rollback
test: fdw_truncate
test: fdw_clean

View File

@ -0,0 +1,3 @@
"{1,2,3}","{1,2,3}","{a,b,c}"
{},{},{}
"{-2147483648,2147483647}","{-9223372036854775808,9223372036854775807}","{""""}"
1 {1,2,3} {1,2,3} {a,b,c}
2 {} {} {}
3 {-2147483648,2147483647} {-9223372036854775808,9223372036854775807} {""}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
a,1990-01-10,2090,97.1,XA ,{a}
b,1990-11-01,2203,98.1,XA ,"{a,b}"
c,1988-11-01,2907,99.4,XB ,"{w,y}"
d,1985-05-05,2314,98.3,XB ,{}
e,1995-05-05,2236,98.2,XC ,{a}
1 a 1990-01-10 2090 97.1 XA {a}
2 b 1990-11-01 2203 98.1 XA {a,b}
3 c 1988-11-01 2907 99.4 XB {w,y}
4 d 1985-05-05 2314 98.3 XB {}
5 e 1995-05-05 2236 98.2 XC {a}

View File

@ -0,0 +1,3 @@
f,1983-04-02,3090,99.6,XD ,"{a,b,c,y}"
g,1991-12-13,1803,85.1,XD ,"{a,c}"
h,1987-10-26,2112,95.4,XD ,"{w,a}"
1 f 1983-04-02 3090 99.6 XD {a,b,c,y}
2 g 1991-12-13 1803 85.1 XD {a,c}
3 h 1987-10-26 2112 95.4 XD {w,a}

View File

@ -0,0 +1,2 @@
2000-01-02 04:05:06,1999-01-08 14:05:06+02,2000-01-02,04:05:06,04:00:00
1970-01-01 00:00:00,infinity,-infinity,00:00:00,00:00:00
1 2000-01-02 04:05:06 1999-01-08 14:05:06+02 2000-01-02 04:05:06 04:00:00
2 1970-01-01 00:00:00 infinity -infinity 00:00:00 00:00:00

View File

@ -0,0 +1,2 @@
a,"(2,b)"
b,"(3,c)"
1 a (2,b)
2 b (3,c)

View File

@ -0,0 +1,2 @@
,{NULL},"(,)"
,,
1 {NULL} (,)
2

View File

@ -0,0 +1,2 @@
f,\xdeadbeef,$1.00,192.168.1.2,10101,a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11,"{""key"": ""value""}"
t,\xcdb0,$1.50,127.0.0.1,"",a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11,[]
1 f \xdeadbeef $1.00 192.168.1.2 10101 a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 {"key": "value"}
2 t \xcdb0 $1.50 127.0.0.1 a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 []

View File

@ -0,0 +1,2 @@
"[1,3)","[1,3)","[1,3)","[""2000-01-02 00:30:00"",""2010-02-03 12:30:00"")"
empty,"[1,)","(,)",empty
1 [1,3) [1,3) [1,3) ["2000-01-02 00:30:00","2010-02-03 12:30:00")
2 empty [1,) (,) empty

View File

@ -21,3 +21,13 @@
/multi_behavioral_analytics_create_table.out /multi_behavioral_analytics_create_table.out
/multi_insert_select_behavioral_analytics_create_table.out /multi_insert_select_behavioral_analytics_create_table.out
/hyperscale_tutorial.out /hyperscale_tutorial.out
/am_block_filtering.out
/am_copyto.out
/am_create.out
/am_data_types.out
/am_load.out
/fdw_block_filtering.out
/fdw_copyto.out
/fdw_create.out
/fdw_data_types.out
/fdw_load.out

View File

@ -0,0 +1,173 @@
--
-- Testing ALTER TABLE on cstore_fdw tables.
--
CREATE TABLE test_alter_table (a int, b int, c int) USING cstore_tableam;
WITH sample_data AS (VALUES
(1, 2, 3),
(4, 5, 6),
(7, 8, 9)
)
INSERT INTO test_alter_table SELECT * FROM sample_data;
-- drop a column
ALTER TABLE test_alter_table DROP COLUMN a;
-- test analyze
ANALYZE test_alter_table;
-- verify select queries run as expected
SELECT * FROM test_alter_table;
b | c
---------------------------------------------------------------------
2 | 3
5 | 6
8 | 9
(3 rows)
SELECT a FROM test_alter_table;
ERROR: column "a" does not exist
SELECT b FROM test_alter_table;
b
---------------------------------------------------------------------
2
5
8
(3 rows)
-- verify insert runs as expected
INSERT INTO test_alter_table (SELECT 3, 5, 8);
ERROR: INSERT has more expressions than target columns
INSERT INTO test_alter_table (SELECT 5, 8);
-- add a column with no defaults
ALTER TABLE test_alter_table ADD COLUMN d int;
SELECT * FROM test_alter_table;
b | c | d
---------------------------------------------------------------------
2 | 3 |
5 | 6 |
8 | 9 |
5 | 8 |
(4 rows)
INSERT INTO test_alter_table (SELECT 3, 5, 8);
SELECT * FROM test_alter_table;
b | c | d
---------------------------------------------------------------------
2 | 3 |
5 | 6 |
8 | 9 |
5 | 8 |
3 | 5 | 8
(5 rows)
-- add a fixed-length column with default value
ALTER TABLE test_alter_table ADD COLUMN e int default 3;
SELECT * from test_alter_table;
b | c | d | e
---------------------------------------------------------------------
2 | 3 | | 3
5 | 6 | | 3
8 | 9 | | 3
5 | 8 | | 3
3 | 5 | 8 | 3
(5 rows)
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8);
SELECT * from test_alter_table;
b | c | d | e
---------------------------------------------------------------------
2 | 3 | | 3
5 | 6 | | 3
8 | 9 | | 3
5 | 8 | | 3
3 | 5 | 8 | 3
1 | 2 | 4 | 8
(6 rows)
-- add a variable-length column with default value
ALTER TABLE test_alter_table ADD COLUMN f text DEFAULT 'TEXT ME';
SELECT * from test_alter_table;
b | c | d | e | f
---------------------------------------------------------------------
2 | 3 | | 3 | TEXT ME
5 | 6 | | 3 | TEXT ME
8 | 9 | | 3 | TEXT ME
5 | 8 | | 3 | TEXT ME
3 | 5 | 8 | 3 | TEXT ME
1 | 2 | 4 | 8 | TEXT ME
(6 rows)
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8, 'ABCDEF');
SELECT * from test_alter_table;
b | c | d | e | f
---------------------------------------------------------------------
2 | 3 | | 3 | TEXT ME
5 | 6 | | 3 | TEXT ME
8 | 9 | | 3 | TEXT ME
5 | 8 | | 3 | TEXT ME
3 | 5 | 8 | 3 | TEXT ME
1 | 2 | 4 | 8 | TEXT ME
1 | 2 | 4 | 8 | ABCDEF
(7 rows)
-- drop couple of columns
ALTER TABLE test_alter_table DROP COLUMN c;
ALTER TABLE test_alter_table DROP COLUMN e;
ANALYZE test_alter_table;
SELECT * from test_alter_table;
b | d | f
---------------------------------------------------------------------
2 | | TEXT ME
5 | | TEXT ME
8 | | TEXT ME
5 | | TEXT ME
3 | 8 | TEXT ME
1 | 4 | TEXT ME
1 | 4 | ABCDEF
(7 rows)
SELECT count(*) from test_alter_table;
count
---------------------------------------------------------------------
7
(1 row)
SELECT count(t.*) from test_alter_table t;
count
---------------------------------------------------------------------
7
(1 row)
-- unsupported default values
ALTER TABLE test_alter_table ADD COLUMN g boolean DEFAULT isfinite(current_date);
ALTER TABLE test_alter_table ADD COLUMN h DATE DEFAULT current_date;
SELECT * FROM test_alter_table;
ERROR: unsupported default value for column "g"
HINT: Expression is either mutable or does not evaluate to constant value
ALTER TABLE test_alter_table ALTER COLUMN g DROP DEFAULT;
SELECT * FROM test_alter_table;
ERROR: unsupported default value for column "h"
HINT: Expression is either mutable or does not evaluate to constant value
ALTER TABLE test_alter_table ALTER COLUMN h DROP DEFAULT;
ANALYZE test_alter_table;
SELECT * FROM test_alter_table;
b | d | f | g | h
---------------------------------------------------------------------
2 | | TEXT ME | |
5 | | TEXT ME | |
8 | | TEXT ME | |
5 | | TEXT ME | |
3 | 8 | TEXT ME | |
1 | 4 | TEXT ME | |
1 | 4 | ABCDEF | |
(7 rows)
-- unsupported type change
ALTER TABLE test_alter_table ADD COLUMN i int;
ALTER TABLE test_alter_table ADD COLUMN j float;
ALTER TABLE test_alter_table ADD COLUMN k text;
-- this is valid type change
ALTER TABLE test_alter_table ALTER COLUMN i TYPE float;
-- this is not valid
ALTER TABLE test_alter_table ALTER COLUMN j TYPE int;
-- text / varchar conversion is valid both ways
ALTER TABLE test_alter_table ALTER COLUMN k TYPE varchar(20);
ALTER TABLE test_alter_table ALTER COLUMN k TYPE text;
DROP TABLE test_alter_table;

View File

@ -0,0 +1,19 @@
--
-- Test the ANALYZE command for cstore_fdw tables.
--
-- ANALYZE uncompressed table
ANALYZE contestant;
SELECT count(*) FROM pg_stats WHERE tablename='contestant';
count
---------------------------------------------------------------------
6
(1 row)
-- ANALYZE compressed table
ANALYZE contestant_compressed;
SELECT count(*) FROM pg_stats WHERE tablename='contestant_compressed';
count
---------------------------------------------------------------------
6
(1 row)

View File

@ -0,0 +1,8 @@
DROP TABLE test_null_values;
DROP TABLE test_other_types;
DROP TABLE test_range_types;
DROP TABLE test_enum_and_composite_types;
DROP TYPE composite_type;
DROP TYPE enum_type;
DROP TABLE test_datetime_types;
DROP TABLE test_array_types;

View File

@ -0,0 +1,54 @@
--
-- Tests the different DROP commands for cstore_fdw tables.
--
-- DROP TABL
-- DROP SCHEMA
-- DROP EXTENSION
-- DROP DATABASE
--
-- Note that travis does not create
-- cstore_fdw extension in default database (postgres). This has caused
-- different behavior between travis tests and local tests. Thus
-- 'postgres' directory is excluded from comparison to have the same result.
-- store postgres database oid
SELECT oid postgres_oid FROM pg_database WHERE datname = 'postgres' \gset
SELECT count(*) AS cstore_data_files_before_drop FROM cstore.cstore_data_files \gset
-- DROP cstore_fdw tables
DROP TABLE contestant;
DROP TABLE contestant_compressed;
-- make sure DROP deletes metadata
SELECT :cstore_data_files_before_drop - count(*) FROM cstore.cstore_data_files;
?column?
---------------------------------------------------------------------
2
(1 row)
-- Create a cstore_fdw table under a schema and drop it.
CREATE SCHEMA test_schema;
CREATE TABLE test_schema.test_table(data int) USING cstore_tableam;
SELECT count(*) AS cstore_data_files_before_drop FROM cstore.cstore_data_files \gset
DROP SCHEMA test_schema CASCADE;
NOTICE: drop cascades to table test_schema.test_table
SELECT :cstore_data_files_before_drop - count(*) FROM cstore.cstore_data_files;
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT current_database() datname \gset
CREATE DATABASE db_to_drop;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
\c db_to_drop
CREATE EXTENSION citus;
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
CREATE TABLE test_table(data int) USING cstore_tableam;
DROP EXTENSION citus CASCADE;
NOTICE: drop cascades to table test_table
-- test database drop
CREATE EXTENSION citus;
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
CREATE TABLE test_table(data int) USING cstore_tableam;
\c :datname
DROP DATABASE db_to_drop;

View File

@ -0,0 +1,18 @@
--
-- Test utility functions for cstore_fdw tables.
--
CREATE TABLE empty_table (a int) USING cstore_tableam;
CREATE TABLE table_with_data (a int) USING cstore_tableam;
CREATE TABLE non_cstore_table (a int);
COPY table_with_data FROM STDIN;
SELECT pg_relation_size('empty_table') < pg_relation_size('table_with_data');
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT cstore_table_size('non_cstore_table');
ERROR: relation is not a cstore table
DROP TABLE empty_table;
DROP TABLE table_with_data;
DROP TABLE non_cstore_table;

View File

@ -0,0 +1,86 @@
--
-- Testing insert on cstore_fdw tables.
--
CREATE TABLE test_insert_command (a int) USING cstore_tableam;
-- test single row inserts fail
select count(*) from test_insert_command;
count
---------------------------------------------------------------------
0
(1 row)
insert into test_insert_command values(1);
select count(*) from test_insert_command;
count
---------------------------------------------------------------------
1
(1 row)
insert into test_insert_command default values;
select count(*) from test_insert_command;
count
---------------------------------------------------------------------
2
(1 row)
-- test inserting from another table succeed
CREATE TABLE test_insert_command_data (a int);
select count(*) from test_insert_command_data;
count
---------------------------------------------------------------------
0
(1 row)
insert into test_insert_command_data values(1);
select count(*) from test_insert_command_data;
count
---------------------------------------------------------------------
1
(1 row)
insert into test_insert_command select * from test_insert_command_data;
select count(*) from test_insert_command;
count
---------------------------------------------------------------------
3
(1 row)
drop table test_insert_command_data;
drop table test_insert_command;
-- test long attribute value insertion
-- create sufficiently long text so that data is stored in toast
CREATE TABLE test_long_text AS
SELECT a as int_val, string_agg(random()::text, '') as text_val
FROM generate_series(1, 10) a, generate_series(1, 1000) b
GROUP BY a ORDER BY a;
-- store hash values of text for later comparison
CREATE TABLE test_long_text_hash AS
SELECT int_val, md5(text_val) AS hash
FROM test_long_text;
CREATE TABLE test_cstore_long_text(int_val int, text_val text)
USING cstore_tableam;
-- store long text in cstore table
INSERT INTO test_cstore_long_text SELECT * FROM test_long_text;
-- drop source table to remove original text from toast
DROP TABLE test_long_text;
-- check if text data is still available in cstore table
-- by comparing previously stored hash.
SELECT a.int_val
FROM test_long_text_hash a, test_cstore_long_text c
WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val);
int_val
---------------------------------------------------------------------
1
2
3
4
5
6
7
8
9
10
(10 rows)
DROP TABLE test_long_text_hash;
DROP TABLE test_cstore_long_text;

View File

@ -0,0 +1,37 @@
CREATE SCHEMA am_cstore_join;
SET search_path TO am_cstore_join;
CREATE TABLE users (id int, name text) USING cstore_tableam;
INSERT INTO users SELECT a, 'name' || a FROM generate_series(0,30-1) AS a;
CREATE TABLE things (id int, user_id int, name text) USING cstore_tableam;
INSERT INTO things SELECT a, a % 30, 'thing' || a FROM generate_series(1,300) AS a;
-- force the nested loop to rescan the table
SET enable_material TO off;
SET enable_hashjoin TO off;
SET enable_mergejoin TO off;
SELECT count(*)
FROM users
JOIN things ON (users.id = things.user_id)
WHERE things.id > 290;
count
---------------------------------------------------------------------
10
(1 row)
-- verify the join uses a nested loop to trigger the rescan behaviour
EXPLAIN (COSTS OFF)
SELECT count(*)
FROM users
JOIN things ON (users.id = things.user_id)
WHERE things.id > 299990;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Nested Loop
Join Filter: (users.id = things.user_id)
-> Custom Scan (CStoreScan) on things
Filter: (id > 299990)
-> Custom Scan (CStoreScan) on users
(6 rows)
SET client_min_messages TO warning;
DROP SCHEMA am_cstore_join CASCADE;

View File

@ -0,0 +1,77 @@
--
-- Testing we materialized views properly
--
CREATE TABLE t(a int, b int) USING cstore_tableam;
INSERT INTO t SELECT floor(i / 4), 2 * i FROM generate_series(1, 10) i;
CREATE MATERIALIZED VIEW t_view(a, bsum, cnt) USING cstore_tableam AS
SELECT a, sum(b), count(*) FROM t GROUP BY a;
SELECT * FROM t_view a ORDER BY a;
a | bsum | cnt
---------------------------------------------------------------------
0 | 12 | 3
1 | 44 | 4
2 | 54 | 3
(3 rows)
INSERT INTO t SELECT floor(i / 4), 2 * i FROM generate_series(11, 20) i;
SELECT * FROM t_view a ORDER BY a;
a | bsum | cnt
---------------------------------------------------------------------
0 | 12 | 3
1 | 44 | 4
2 | 54 | 3
(3 rows)
REFRESH MATERIALIZED VIEW t_view;
SELECT * FROM t_view a ORDER BY a;
a | bsum | cnt
---------------------------------------------------------------------
0 | 12 | 3
1 | 44 | 4
2 | 76 | 4
3 | 108 | 4
4 | 140 | 4
5 | 40 | 1
(6 rows)
-- verify that we have created metadata entries for the materialized view
SELECT relfilenode FROM pg_class WHERE relname='t_view' \gset
SELECT count(*) FROM cstore.cstore_data_files WHERE relfilenode=:relfilenode;
count
---------------------------------------------------------------------
1
(1 row)
SELECT count(*) FROM cstore.cstore_stripes WHERE relfilenode=:relfilenode;
count
---------------------------------------------------------------------
1
(1 row)
SELECT count(*) FROM cstore.cstore_skipnodes WHERE relfilenode=:relfilenode;
count
---------------------------------------------------------------------
3
(1 row)
DROP TABLE t CASCADE;
NOTICE: drop cascades to materialized view t_view
-- dropping must remove metadata
SELECT count(*) FROM cstore.cstore_data_files WHERE relfilenode=:relfilenode;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM cstore.cstore_stripes WHERE relfilenode=:relfilenode;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM cstore.cstore_skipnodes WHERE relfilenode=:relfilenode;
count
---------------------------------------------------------------------
0
(1 row)

View File

@ -0,0 +1,105 @@
--
-- Test querying cstore_fdw tables.
--
-- Settings to make the result deterministic
SET datestyle = "ISO, YMD";
-- Query uncompressed data
SELECT count(*) FROM contestant;
count
---------------------------------------------------------------------
8
(1 row)
SELECT avg(rating), stddev_samp(rating) FROM contestant;
avg | stddev_samp
---------------------------------------------------------------------
2344.3750000000000000 | 433.746119785032
(1 row)
SELECT country, avg(rating) FROM contestant WHERE rating > 2200
GROUP BY country ORDER BY country;
country | avg
---------------------------------------------------------------------
XA | 2203.0000000000000000
XB | 2610.5000000000000000
XC | 2236.0000000000000000
XD | 3090.0000000000000000
(4 rows)
SELECT * FROM contestant ORDER BY handle;
handle | birthdate | rating | percentile | country | achievements
---------------------------------------------------------------------
a | 1990-01-10 | 2090 | 97.1 | XA | {a}
b | 1990-11-01 | 2203 | 98.1 | XA | {a,b}
c | 1988-11-01 | 2907 | 99.4 | XB | {w,y}
d | 1985-05-05 | 2314 | 98.3 | XB | {}
e | 1995-05-05 | 2236 | 98.2 | XC | {a}
f | 1983-04-02 | 3090 | 99.6 | XD | {a,b,c,y}
g | 1991-12-13 | 1803 | 85.1 | XD | {a,c}
h | 1987-10-26 | 2112 | 95.4 | XD | {w,a}
(8 rows)
-- Query compressed data
SELECT count(*) FROM contestant_compressed;
count
---------------------------------------------------------------------
8
(1 row)
SELECT avg(rating), stddev_samp(rating) FROM contestant_compressed;
avg | stddev_samp
---------------------------------------------------------------------
2344.3750000000000000 | 433.746119785032
(1 row)
SELECT country, avg(rating) FROM contestant_compressed WHERE rating > 2200
GROUP BY country ORDER BY country;
country | avg
---------------------------------------------------------------------
XA | 2203.0000000000000000
XB | 2610.5000000000000000
XC | 2236.0000000000000000
XD | 3090.0000000000000000
(4 rows)
SELECT * FROM contestant_compressed ORDER BY handle;
handle | birthdate | rating | percentile | country | achievements
---------------------------------------------------------------------
a | 1990-01-10 | 2090 | 97.1 | XA | {a}
b | 1990-11-01 | 2203 | 98.1 | XA | {a,b}
c | 1988-11-01 | 2907 | 99.4 | XB | {w,y}
d | 1985-05-05 | 2314 | 98.3 | XB | {}
e | 1995-05-05 | 2236 | 98.2 | XC | {a}
f | 1983-04-02 | 3090 | 99.6 | XD | {a,b,c,y}
g | 1991-12-13 | 1803 | 85.1 | XD | {a,c}
h | 1987-10-26 | 2112 | 95.4 | XD | {w,a}
(8 rows)
-- Verify that we handle whole-row references correctly
SELECT to_json(v) FROM contestant v ORDER BY rating LIMIT 1;
to_json
---------------------------------------------------------------------
{"handle":"g","birthdate":"1991-12-13","rating":1803,"percentile":85.1,"country":"XD ","achievements":["a","c"]}
(1 row)
-- Test variables used in expressions
CREATE TABLE union_first (a int, b int) USING cstore_tableam;
CREATE TABLE union_second (a int, b int) USING cstore_tableam;
INSERT INTO union_first SELECT a, a FROM generate_series(1, 5) a;
INSERT INTO union_second SELECT a, a FROM generate_series(11, 15) a;
(SELECT a*1, b FROM union_first) union all (SELECT a*1, b FROM union_second);
?column? | b
---------------------------------------------------------------------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
11 | 11
12 | 12
13 | 13
14 | 14
15 | 15
(10 rows)
DROP TABLE union_first, union_second;

View File

@ -0,0 +1,252 @@
CREATE TABLE t1(a int, b int) USING cstore_tableam;
CREATE TABLE t2(a int, b int) USING cstore_tableam;
CREATE FUNCTION f(x INT) RETURNS INT AS $$
INSERT INTO t1 VALUES(x, x * 2) RETURNING b - 1;
$$ LANGUAGE SQL;
--
-- Following query will start a write to t1 before finishing
-- write to t1, so it tests that we handle recursive writes
-- correctly.
--
INSERT INTO t2 SELECT i, f(i) FROM generate_series(1, 5) i;
-- there are no subtransactions, so above statement should batch
-- INSERTs inside the UDF and create on stripe per table.
SELECT relname, count(*) FROM cstore.cstore_stripes a, pg_class b
WHERE a.relfilenode=b.relfilenode AND relname IN ('t1', 't2')
GROUP BY relname
ORDER BY relname;
relname | count
---------------------------------------------------------------------
t1 | 1
t2 | 1
(2 rows)
SELECT * FROM t1 ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
(5 rows)
SELECT * FROM t2 ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 1
2 | 3
3 | 5
4 | 7
5 | 9
(5 rows)
TRUNCATE t1;
TRUNCATE t2;
DROP FUNCTION f(INT);
--
-- Test the case when 2 writes are going on concurrently in the
-- same executor, and those 2 writes are dependent.
--
WITH t AS (
INSERT INTO t1 SELECT i, 2*i FROM generate_series(1, 5) i RETURNING *
)
INSERT INTO t2 SELECT t.a, t.a+1 FROM t;
SELECT * FROM t1;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
(5 rows)
SELECT * FROM t2;
a | b
---------------------------------------------------------------------
1 | 2
2 | 3
3 | 4
4 | 5
5 | 6
(5 rows)
TRUNCATE t1;
TRUNCATE t2;
--
-- Test the case when there are 2 independent inserts in a CTE.
-- Also tests the case where some of the tuple_inserts happen in
-- ExecutorFinish() instead of ExecutorRun().
--
WITH t AS (
INSERT INTO t1 SELECT i, 2*i FROM generate_series(1, 5) i RETURNING *
)
INSERT INTO t2 SELECT i, (select count(*) from t1) FROM generate_series(1, 3) i;
SELECT * FROM t1;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
(5 rows)
SELECT * FROM t2;
a | b
---------------------------------------------------------------------
1 | 0
2 | 0
3 | 0
(3 rows)
TRUNCATE t1;
TRUNCATE t2;
--
-- double insert on the same relation
--
WITH t AS (
INSERT INTO t1 SELECT i, 2*i FROM generate_series(1, 5) i RETURNING *
)
INSERT INTO t1 SELECT t.a, t.a+1 FROM t;
SELECT * FROM t1 ORDER BY a, b;
a | b
---------------------------------------------------------------------
1 | 2
1 | 2
2 | 3
2 | 4
3 | 4
3 | 6
4 | 5
4 | 8
5 | 6
5 | 10
(10 rows)
TRUNCATE t1;
TRUNCATE t2;
--
-- A test where result of a UDF call will depend on execution
-- of previous UDF calls.
--
CREATE FUNCTION g(x INT) RETURNS INT AS $$
INSERT INTO t1 VALUES(x, x * 2);
SELECT count(*)::int FROM t1;
$$ LANGUAGE SQL;
-- t3 and t4 are heap tables to help with cross-checking results
CREATE TABLE t3(a int, b int);
CREATE TABLE t4(a int, b int);
CREATE FUNCTION g2(x INT) RETURNS INT AS $$
INSERT INTO t3 VALUES(x, x * 2);
SELECT count(*)::int FROM t3;
$$ LANGUAGE SQL;
INSERT INTO t2 SELECT i, g(i) FROM generate_series(1, 5) i;
INSERT INTO t4 SELECT i, g2(i) FROM generate_series(1, 5) i;
-- check that t1==t3 and t2==t4.
((table t1) except (table t3)) union ((table t3) except (table t1));
a | b
---------------------------------------------------------------------
(0 rows)
((table t2) except (table t4)) union ((table t4) except (table t2));
a | b
---------------------------------------------------------------------
(0 rows)
SELECT * FROM t2 ORDER BY a, b;
a | b
---------------------------------------------------------------------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
(5 rows)
TRUNCATE t1, t2, t3, t4;
--
-- INSERT into the same relation that was INSERTed into in the UDF
--
INSERT INTO t1 SELECT i, g(i) FROM generate_series(1, 3) i;
INSERT INTO t3 SELECT i, g2(i) FROM generate_series(1, 3) i;
SELECT * FROM t1 ORDER BY a, b;
a | b
---------------------------------------------------------------------
1 | 1
1 | 2
2 | 3
2 | 4
3 | 5
3 | 6
(6 rows)
SELECT * FROM t3 ORDER BY a, b;
a | b
---------------------------------------------------------------------
1 | 1
1 | 2
2 | 3
2 | 4
3 | 5
3 | 6
(6 rows)
-- check that t1==t3 and t2==t4.
((table t1) except (table t3)) union ((table t3) except (table t1));
a | b
---------------------------------------------------------------------
(0 rows)
((table t2) except (table t4)) union ((table t4) except (table t2));
a | b
---------------------------------------------------------------------
(0 rows)
DROP FUNCTION g(int), g2(int);
TRUNCATE t1, t2, t3, t4;
--
-- EXCEPTION in plpgsql, which is implemented internally using
-- subtransactions. plgpsql uses SPI to execute INSERT statements.
--
CREATE FUNCTION f(a int) RETURNS VOID AS $$
DECLARE
x int;
BEGIN
INSERT INTO t1 SELECT i, i + 1 FROM generate_series(a, a + 1) i;
x := 10 / a;
INSERT INTO t1 SELECT i, i * 2 FROM generate_series(a + 2, a + 3) i;
EXCEPTION WHEN division_by_zero THEN
INSERT INTO t1 SELECT i, i + 1 FROM generate_series(a + 2, a + 3) i;
END;
$$ LANGUAGE plpgsql;
SELECT f(10);
f
---------------------------------------------------------------------
(1 row)
SELECT f(0), f(20);
f | f
---------------------------------------------------------------------
|
(1 row)
SELECT * FROM t1 ORDER BY a, b;
a | b
---------------------------------------------------------------------
2 | 3
3 | 4
10 | 11
11 | 12
12 | 24
13 | 26
20 | 21
21 | 22
22 | 44
23 | 46
(10 rows)
DROP FUNCTION f(int);
DROP TABLE t1, t2, t3, t4;

View File

@ -0,0 +1,83 @@
--
-- Testing we handle rollbacks properly
--
CREATE TABLE t(a int, b int) USING cstore_tableam;
BEGIN;
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
ROLLBACK;
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
0
(1 row)
-- check stripe metadata also have been rolled-back
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b
WHERE a.relfilenode = b.relfilenode AND b.relname = 't';
count
---------------------------------------------------------------------
0
(1 row)
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
10
(1 row)
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b
WHERE a.relfilenode = b.relfilenode AND b.relname = 't';
count
---------------------------------------------------------------------
1
(1 row)
-- savepoint rollback
BEGIN;
SAVEPOINT s0;
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
SELECT count(*) FROM t; -- force flush
count
---------------------------------------------------------------------
20
(1 row)
SAVEPOINT s1;
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
30
(1 row)
ROLLBACK TO SAVEPOINT s1;
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
20
(1 row)
ROLLBACK TO SAVEPOINT s0;
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
10
(1 row)
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
COMMIT;
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
20
(1 row)
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b
WHERE a.relfilenode = b.relfilenode AND b.relname = 't';
count
---------------------------------------------------------------------
2
(1 row)
DROP TABLE t;

View File

@ -0,0 +1,179 @@
CREATE SCHEMA am_tableoptions;
SET search_path TO am_tableoptions;
CREATE TABLE table_options (a int) USING cstore_tableam;
INSERT INTO table_options SELECT generate_series(1,100);
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 10000 | 150000 | none
(1 row)
-- test changing the compression
SELECT alter_cstore_table_set('table_options', compression => 'pglz');
alter_cstore_table_set
---------------------------------------------------------------------
(1 row)
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 10000 | 150000 | pglz
(1 row)
-- test changing the block_row_count
SELECT alter_cstore_table_set('table_options', block_row_count => 10);
alter_cstore_table_set
---------------------------------------------------------------------
(1 row)
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 10 | 150000 | pglz
(1 row)
-- test changing the block_row_count
SELECT alter_cstore_table_set('table_options', stripe_row_count => 100);
alter_cstore_table_set
---------------------------------------------------------------------
(1 row)
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 10 | 100 | pglz
(1 row)
-- VACUUM FULL creates a new table, make sure it copies settings from the table you are vacuuming
VACUUM FULL table_options;
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 10 | 100 | pglz
(1 row)
-- set all settings at the same time
SELECT alter_cstore_table_set('table_options', stripe_row_count => 1000, block_row_count => 100, compression => 'none');
alter_cstore_table_set
---------------------------------------------------------------------
(1 row)
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 100 | 1000 | none
(1 row)
-- reset settings one by one to the version of the GUC's
SET cstore.block_row_count TO 1000;
SET cstore.stripe_row_count TO 10000;
SET cstore.compression TO 'pglz';
-- verify setting the GUC's didn't change the settings
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 100 | 1000 | none
(1 row)
SELECT alter_cstore_table_reset('table_options', block_row_count => true);
alter_cstore_table_reset
---------------------------------------------------------------------
(1 row)
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 1000 | 1000 | none
(1 row)
SELECT alter_cstore_table_reset('table_options', stripe_row_count => true);
alter_cstore_table_reset
---------------------------------------------------------------------
(1 row)
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 1000 | 10000 | none
(1 row)
SELECT alter_cstore_table_reset('table_options', compression => true);
alter_cstore_table_reset
---------------------------------------------------------------------
(1 row)
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 1000 | 10000 | pglz
(1 row)
-- verify resetting all settings at once work
SET cstore.block_row_count TO 10000;
SET cstore.stripe_row_count TO 100000;
SET cstore.compression TO 'none';
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 1000 | 10000 | pglz
(1 row)
SELECT alter_cstore_table_reset(
'table_options',
block_row_count => true,
stripe_row_count => true,
compression => true);
alter_cstore_table_reset
---------------------------------------------------------------------
(1 row)
-- show table_options settings
SELECT * FROM cstore.cstore_options
WHERE regclass = 'table_options'::regclass;
regclass | block_row_count | stripe_row_count | compression
---------------------------------------------------------------------
table_options | 10000 | 100000 | none
(1 row)
-- verify edge cases
-- first start with a table that is not a cstore table
CREATE TABLE not_a_cstore_table (a int);
SELECT alter_cstore_table_set('not_a_cstore_table', compression => 'pglz');
ERROR: table not_a_cstore_table is not a cstore table
SELECT alter_cstore_table_reset('not_a_cstore_table', compression => true);
ERROR: table not_a_cstore_table is not a cstore table
-- verify you can't use a compression that is not known
SELECT alter_cstore_table_set('table_options', compression => 'foobar');
ERROR: unknown compression type for cstore table: foobar
SET client_min_messages TO warning;
DROP SCHEMA am_tableoptions CASCADE;

View File

@ -0,0 +1,244 @@
--
-- Testing we handle transactions properly
--
CREATE TABLE t(a int, b int) USING cstore_tableam;
INSERT INTO t SELECT i, 2 * i FROM generate_series(1, 3) i;
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
(3 rows)
-- verify that table rewrites work properly
BEGIN;
ALTER TABLE t ALTER COLUMN b TYPE float4 USING (b + 0.5)::float4;
INSERT INTO t VALUES (4, 8.5);
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2.5
2 | 4.5
3 | 6.5
4 | 8.5
(4 rows)
ROLLBACK;
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
(3 rows)
-- verify truncate rollback
BEGIN;
TRUNCATE t;
INSERT INTO t VALUES (4, 8);
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
4 | 8
(1 row)
SAVEPOINT s1;
TRUNCATE t;
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
(0 rows)
ROLLBACK TO SAVEPOINT s1;
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
4 | 8
(1 row)
ROLLBACK;
-- verify truncate with unflushed data in upper xacts
BEGIN;
INSERT INTO t VALUES (4, 8);
SAVEPOINT s1;
TRUNCATE t;
ROLLBACK TO SAVEPOINT s1;
COMMIT;
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
(4 rows)
-- verify DROP TABLE rollback
BEGIN;
INSERT INTO t VALUES (5, 10);
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
(5 rows)
SAVEPOINT s1;
DROP TABLE t;
SELECT * FROM t ORDER BY a;
ERROR: relation "t" does not exist
ROLLBACK TO SAVEPOINT s1;
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
(5 rows)
ROLLBACK;
-- verify DROP TABLE with unflushed data in upper xacts
BEGIN;
INSERT INTO t VALUES (5, 10);
SAVEPOINT s1;
DROP TABLE t;
SELECT * FROM t ORDER BY a;
ERROR: relation "t" does not exist
ROLLBACK TO SAVEPOINT s1;
COMMIT;
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
(5 rows)
-- verify SELECT when unflushed data in upper transactions errors.
BEGIN;
INSERT INTO t VALUES (6, 12);
SAVEPOINT s1;
SELECT * FROM t;
ERROR: cannot read from table when there is unflushed data in upper transactions
ROLLBACK;
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
(5 rows)
--
-- Prepared transactions
--
BEGIN;
INSERT INTO t VALUES (6, 12);
INSERT INTO t VALUES (7, 14);
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
6 | 12
7 | 14
(7 rows)
PREPARE TRANSACTION 'tx01';
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
(5 rows)
ROLLBACK PREPARED 'tx01';
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
(5 rows)
BEGIN;
INSERT INTO t VALUES (6, 13);
INSERT INTO t VALUES (7, 15);
PREPARE TRANSACTION 'tx02';
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
(5 rows)
COMMIT PREPARED 'tx02';
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
6 | 13
7 | 15
(7 rows)
--
-- Prepared statements
--
PREPARE p1(int) AS INSERT INTO t VALUES (8, $1), (9, $1+2);
EXPLAIN (COSTS OFF) EXECUTE p1(16);
QUERY PLAN
---------------------------------------------------------------------
Insert on t
-> Values Scan on "*VALUES*"
(2 rows)
EXECUTE p1(16);
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p1(20);
QUERY PLAN
---------------------------------------------------------------------
Insert on t (actual rows=0 loops=1)
-> Values Scan on "*VALUES*" (actual rows=2 loops=1)
(2 rows)
SELECT * FROM t ORDER BY a;
a | b
---------------------------------------------------------------------
1 | 2
2 | 4
3 | 6
4 | 8
5 | 10
6 | 13
7 | 15
8 | 16
8 | 20
9 | 18
9 | 22
(11 rows)
DROP TABLE t;

View File

@ -0,0 +1,171 @@
create or replace function trs_before() returns trigger language plpgsql as $$
BEGIN
RAISE NOTICE 'BEFORE STATEMENT %', TG_OP;
RETURN NULL;
END;
$$;
create or replace function trs_after() returns trigger language plpgsql as $$
DECLARE
r RECORD;
BEGIN
RAISE NOTICE 'AFTER STATEMENT %', TG_OP;
IF (TG_OP = 'DELETE') THEN
FOR R IN select * from old_table
LOOP
RAISE NOTICE ' (%)', r.i;
END LOOP;
ELSE
FOR R IN select * from new_table
LOOP
RAISE NOTICE ' (%)', r.i;
END LOOP;
END IF;
RETURN NULL;
END;
$$;
create or replace function trr_before() returns trigger language plpgsql as $$
BEGIN
RAISE NOTICE 'BEFORE ROW %: (%)', TG_OP, NEW.i;
RETURN NEW;
END;
$$;
create or replace function trr_after() returns trigger language plpgsql as $$
BEGIN
RAISE NOTICE 'AFTER ROW %: (%)', TG_OP, NEW.i;
RETURN NEW;
END;
$$;
create table test_tr(i int) using cstore_tableam;
create trigger tr_before_stmt before insert on test_tr
for each statement execute procedure trs_before();
create trigger tr_after_stmt after insert on test_tr
referencing new table as new_table
for each statement execute procedure trs_after();
create trigger tr_before_row before insert on test_tr
for each row execute procedure trr_before();
-- after triggers require TIDs, which are not supported yet
create trigger tr_after_row after insert on test_tr
for each row execute procedure trr_after();
ERROR: AFTER ROW triggers are not supported for columnstore access method
HINT: Consider an AFTER STATEMENT trigger instead.
insert into test_tr values(1);
NOTICE: BEFORE STATEMENT INSERT
CONTEXT: PL/pgSQL function trs_before() line 3 at RAISE
NOTICE: BEFORE ROW INSERT: (1)
CONTEXT: PL/pgSQL function trr_before() line 3 at RAISE
NOTICE: AFTER STATEMENT INSERT
CONTEXT: PL/pgSQL function trs_after() line 5 at RAISE
NOTICE: (1)
CONTEXT: PL/pgSQL function trs_after() line 14 at RAISE
insert into test_tr values(2),(3),(4);
NOTICE: BEFORE STATEMENT INSERT
CONTEXT: PL/pgSQL function trs_before() line 3 at RAISE
NOTICE: BEFORE ROW INSERT: (2)
CONTEXT: PL/pgSQL function trr_before() line 3 at RAISE
NOTICE: BEFORE ROW INSERT: (3)
CONTEXT: PL/pgSQL function trr_before() line 3 at RAISE
NOTICE: BEFORE ROW INSERT: (4)
CONTEXT: PL/pgSQL function trr_before() line 3 at RAISE
NOTICE: AFTER STATEMENT INSERT
CONTEXT: PL/pgSQL function trs_after() line 5 at RAISE
NOTICE: (2)
CONTEXT: PL/pgSQL function trs_after() line 14 at RAISE
NOTICE: (3)
CONTEXT: PL/pgSQL function trs_after() line 14 at RAISE
NOTICE: (4)
CONTEXT: PL/pgSQL function trs_after() line 14 at RAISE
SELECT * FROM test_tr ORDER BY i;
i
---------------------------------------------------------------------
1
2
3
4
(4 rows)
drop table test_tr;
create table test_tr(i int) using cstore_tableam;
-- we should be able to clean-up and continue gracefully if we
-- error out in AFTER STATEMENT triggers.
CREATE SEQUENCE counter START 100;
create or replace function trs_after_erroring() returns trigger language plpgsql as $$
BEGIN
IF nextval('counter') % 2 = 0 THEN
RAISE EXCEPTION '%', 'error';
END IF;
RETURN NULL;
END;
$$;
create trigger tr_after_stmt_erroring after insert on test_tr
referencing new table as new_table
for each statement execute procedure trs_after_erroring();
--
-- Once upon a time we didn't clean-up properly after erroring out. Here the first
-- statement errors, but the second succeeds. In old times, because of failure in
-- clean-up, both rows were visible. But only the 2nd one should be visible.
--
insert into test_tr values(5);
ERROR: error
CONTEXT: PL/pgSQL function trs_after_erroring() line 4 at RAISE
insert into test_tr values(6);
SELECT * FROM test_tr ORDER BY i;
i
---------------------------------------------------------------------
6
(1 row)
drop table test_tr;
--
-- https://github.com/citusdata/cstore2/issues/32
--
create table events(
user_id bigint,
event_id bigint,
event_time timestamp default now(),
value float default random())
PARTITION BY RANGE (event_time);
create table events_p2020_11_04_102965
PARTITION OF events FOR VALUES FROM ('2020-11-04 00:00:00+01') TO ('2020-11-05 00:00:00+01')
USING cstore_tableam;
create table events_trigger_target(
user_id bigint,
avg float,
__count__ bigint
) USING cstore_tableam;
CREATE OR REPLACE FUNCTION user_value_by_day()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
BEGIN
IF (TG_OP = 'INSERT' OR TG_OP = 'UPDATE') THEN
EXECUTE format($exec_format$INSERT INTO %s AS __mat__ SELECT user_id, 0.1 AS avg, pg_catalog.count(*) AS __count__ FROM __ins__ events GROUP BY user_id;
$exec_format$, TG_ARGV[0]);
END IF;
IF (TG_OP = 'DELETE' OR TG_OP = 'UPDATE') THEN
RAISE EXCEPTION $ex$MATERIALIZED VIEW 'user_value_by_day' on table 'events' does not support UPDATE/DELETE$ex$;
END IF;
IF (TG_OP = 'TRUNCATE') THEN
EXECUTE format($exec_format$TRUNCATE TABLE %s; $exec_format$, TG_ARGV[0]);
END IF;
RETURN NULL;
END;
$function$;
create trigger "user_value_by_day_INSERT" AFTER INSERT ON events
REFERENCING NEW TABLE AS __ins__
FOR EACH STATEMENT EXECUTE FUNCTION user_value_by_day('events_trigger_target');
COPY events FROM STDIN WITH (FORMAT 'csv');
SELECT * FROM events ORDER BY user_id;
user_id | event_id | event_time | value
---------------------------------------------------------------------
1 | 1 | Wed Nov 04 15:54:02.226999 2020 | 1.1
2 | 3 | Wed Nov 04 16:54:02.226999 2020 | 2.2
(2 rows)
SELECT * FROM events_trigger_target ORDER BY user_id;
user_id | avg | __count__
---------------------------------------------------------------------
1 | 0.1 | 1
2 | 0.1 | 1
(2 rows)
DROP TABLE events;

View File

@ -0,0 +1,273 @@
--
-- Test the TRUNCATE TABLE command for cstore_fdw tables.
--
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
---------------------------------------------------------------------
t
(1 row)
-- CREATE a cstore_fdw table, fill with some data --
CREATE TABLE cstore_truncate_test (a int, b int) USING cstore_tableam;
CREATE TABLE cstore_truncate_test_second (a int, b int) USING cstore_tableam;
-- COMPRESSED
CREATE TABLE cstore_truncate_test_compressed (a int, b int) USING cstore_tableam;
CREATE TABLE cstore_truncate_test_regular (a int, b int);
SELECT count(*) AS cstore_data_files_before_truncate FROM cstore.cstore_data_files \gset
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
set cstore.compression = 'pglz';
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
set cstore.compression to default;
-- query rows
SELECT * FROM cstore_truncate_test;
a | b
---------------------------------------------------------------------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
6 | 6
7 | 7
8 | 8
9 | 9
10 | 10
(10 rows)
TRUNCATE TABLE cstore_truncate_test;
SELECT * FROM cstore_truncate_test;
a | b
---------------------------------------------------------------------
(0 rows)
SELECT COUNT(*) from cstore_truncate_test;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM cstore_truncate_test_compressed;
count
---------------------------------------------------------------------
20
(1 row)
TRUNCATE TABLE cstore_truncate_test_compressed;
SELECT count(*) FROM cstore_truncate_test_compressed;
count
---------------------------------------------------------------------
0
(1 row)
SELECT pg_relation_size('cstore_truncate_test_compressed');
pg_relation_size
---------------------------------------------------------------------
0
(1 row)
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(10, 20) a;
INSERT INTO cstore_truncate_test_second select a, a from generate_series(20, 30) a;
SELECT * from cstore_truncate_test;
a | b
---------------------------------------------------------------------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
6 | 6
7 | 7
8 | 8
9 | 9
10 | 10
(10 rows)
SELECT * from cstore_truncate_test_second;
a | b
---------------------------------------------------------------------
20 | 20
21 | 21
22 | 22
23 | 23
24 | 24
25 | 25
26 | 26
27 | 27
28 | 28
29 | 29
30 | 30
(11 rows)
SELECT * from cstore_truncate_test_regular;
a | b
---------------------------------------------------------------------
10 | 10
11 | 11
12 | 12
13 | 13
14 | 14
15 | 15
16 | 16
17 | 17
18 | 18
19 | 19
20 | 20
(11 rows)
-- make sure multi truncate works
-- notice that the same table might be repeated
TRUNCATE TABLE cstore_truncate_test,
cstore_truncate_test_regular,
cstore_truncate_test_second,
cstore_truncate_test;
SELECT * from cstore_truncate_test;
a | b
---------------------------------------------------------------------
(0 rows)
SELECT * from cstore_truncate_test_second;
a | b
---------------------------------------------------------------------
(0 rows)
SELECT * from cstore_truncate_test_regular;
a | b
---------------------------------------------------------------------
(0 rows)
-- test if truncate on empty table works
TRUNCATE TABLE cstore_truncate_test;
SELECT * from cstore_truncate_test;
a | b
---------------------------------------------------------------------
(0 rows)
-- make sure TRUNATE deletes metadata for old relfilenode
SELECT :cstore_data_files_before_truncate - count(*) FROM cstore.cstore_data_files;
?column?
---------------------------------------------------------------------
0
(1 row)
-- test if truncation in the same transaction that created the table works properly
BEGIN;
CREATE TABLE cstore_same_transaction_truncate(a int) USING cstore_tableam;
INSERT INTO cstore_same_transaction_truncate SELECT * FROM generate_series(1, 100);
TRUNCATE cstore_same_transaction_truncate;
INSERT INTO cstore_same_transaction_truncate SELECT * FROM generate_series(20, 23);
COMMIT;
-- should output "1" for the newly created relation
SELECT count(*) - :cstore_data_files_before_truncate FROM cstore.cstore_data_files;
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT * FROM cstore_same_transaction_truncate;
a
---------------------------------------------------------------------
20
21
22
23
(4 rows)
DROP TABLE cstore_same_transaction_truncate;
-- test if a cached truncate from a pl/pgsql function works
CREATE FUNCTION cstore_truncate_test_regular_func() RETURNS void AS $$
BEGIN
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(1, 10) a;
TRUNCATE TABLE cstore_truncate_test_regular;
END;$$
LANGUAGE plpgsql;
SELECT cstore_truncate_test_regular_func();
cstore_truncate_test_regular_func
---------------------------------------------------------------------
(1 row)
-- the cached plans are used stating from the second call
SELECT cstore_truncate_test_regular_func();
cstore_truncate_test_regular_func
---------------------------------------------------------------------
(1 row)
DROP FUNCTION cstore_truncate_test_regular_func();
DROP TABLE cstore_truncate_test, cstore_truncate_test_second;
DROP TABLE cstore_truncate_test_regular;
DROP TABLE cstore_truncate_test_compressed;
-- test truncate with schema
CREATE SCHEMA truncate_schema;
-- COMPRESSED
CREATE TABLE truncate_schema.truncate_tbl (id int) USING cstore_tableam;
set cstore.compression = 'pglz';
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
set cstore.compression to default;
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
TRUNCATE TABLE truncate_schema.truncate_tbl;
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
0
(1 row)
set cstore.compression = 'pglz';
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
set cstore.compression to default;
-- create a user that can not truncate
CREATE USER truncate_user;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT USAGE ON SCHEMA truncate_schema TO truncate_user;
GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user;
REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user;
SELECT current_user \gset
\c - truncate_user
-- verify truncate command fails and check number of rows
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
TRUNCATE TABLE truncate_schema.truncate_tbl;
ERROR: permission denied for table truncate_tbl
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
-- switch to super user, grant truncate to truncate_user
\c - :current_user
GRANT TRUNCATE ON TABLE truncate_schema.truncate_tbl TO truncate_user;
-- verify truncate_user can truncate now
\c - truncate_user
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
TRUNCATE TABLE truncate_schema.truncate_tbl;
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
0
(1 row)
\c - :current_user
-- cleanup
DROP SCHEMA truncate_schema CASCADE;
NOTICE: drop cascades to table truncate_schema.truncate_tbl
DROP USER truncate_user;

View File

@ -0,0 +1,262 @@
--
-- Test the TRUNCATE TABLE command for cstore_fdw tables.
--
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
---------------------------------------------------------------------
f
(1 row)
-- Check that files for the automatically managed table exist in the
-- cstore_fdw/{databaseoid} directory.
SELECT count(*) FROM (
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
) AS q1) AS q2;
count
---------------------------------------------------------------------
0
(1 row)
-- CREATE a cstore_fdw table, fill with some data --
CREATE FOREIGN TABLE cstore_truncate_test (a int, b int) SERVER cstore_server;
CREATE FOREIGN TABLE cstore_truncate_test_second (a int, b int) SERVER cstore_server;
CREATE FOREIGN TABLE cstore_truncate_test_compressed (a int, b int) SERVER cstore_server OPTIONS (compression 'pglz');
CREATE TABLE cstore_truncate_test_regular (a int, b int);
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
-- query rows
SELECT * FROM cstore_truncate_test;
a | b
---------------------------------------------------------------------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
6 | 6
7 | 7
8 | 8
9 | 9
10 | 10
(10 rows)
TRUNCATE TABLE cstore_truncate_test;
SELECT * FROM cstore_truncate_test;
a | b
---------------------------------------------------------------------
(0 rows)
SELECT COUNT(*) from cstore_truncate_test;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM cstore_truncate_test_compressed;
count
---------------------------------------------------------------------
20
(1 row)
TRUNCATE TABLE cstore_truncate_test_compressed;
SELECT count(*) FROM cstore_truncate_test_compressed;
count
---------------------------------------------------------------------
0
(1 row)
SELECT cstore_table_size('cstore_truncate_test_compressed');
cstore_table_size
---------------------------------------------------------------------
26
(1 row)
-- make sure data files still present
SELECT count(*) FROM (
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
) AS q1) AS q2;
count
---------------------------------------------------------------------
6
(1 row)
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(10, 20) a;
INSERT INTO cstore_truncate_test_second select a, a from generate_series(20, 30) a;
SELECT * from cstore_truncate_test;
a | b
---------------------------------------------------------------------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
6 | 6
7 | 7
8 | 8
9 | 9
10 | 10
(10 rows)
SELECT * from cstore_truncate_test_second;
a | b
---------------------------------------------------------------------
20 | 20
21 | 21
22 | 22
23 | 23
24 | 24
25 | 25
26 | 26
27 | 27
28 | 28
29 | 29
30 | 30
(11 rows)
SELECT * from cstore_truncate_test_regular;
a | b
---------------------------------------------------------------------
10 | 10
11 | 11
12 | 12
13 | 13
14 | 14
15 | 15
16 | 16
17 | 17
18 | 18
19 | 19
20 | 20
(11 rows)
-- make sure multi truncate works
-- notice that the same table might be repeated
TRUNCATE TABLE cstore_truncate_test,
cstore_truncate_test_regular,
cstore_truncate_test_second,
cstore_truncate_test;
SELECT * from cstore_truncate_test;
a | b
---------------------------------------------------------------------
(0 rows)
SELECT * from cstore_truncate_test_second;
a | b
---------------------------------------------------------------------
(0 rows)
SELECT * from cstore_truncate_test_regular;
a | b
---------------------------------------------------------------------
(0 rows)
-- test if truncate on empty table works
TRUNCATE TABLE cstore_truncate_test;
SELECT * from cstore_truncate_test;
a | b
---------------------------------------------------------------------
(0 rows)
-- test if a cached truncate from a pl/pgsql function works
CREATE FUNCTION cstore_truncate_test_regular_func() RETURNS void AS $$
BEGIN
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(1, 10) a;
TRUNCATE TABLE cstore_truncate_test_regular;
END;$$
LANGUAGE plpgsql;
SELECT cstore_truncate_test_regular_func();
cstore_truncate_test_regular_func
---------------------------------------------------------------------
(1 row)
-- the cached plans are used stating from the second call
SELECT cstore_truncate_test_regular_func();
cstore_truncate_test_regular_func
---------------------------------------------------------------------
(1 row)
DROP FUNCTION cstore_truncate_test_regular_func();
DROP FOREIGN TABLE cstore_truncate_test, cstore_truncate_test_second;
DROP TABLE cstore_truncate_test_regular;
DROP FOREIGN TABLE cstore_truncate_test_compressed;
-- test truncate with schema
CREATE SCHEMA truncate_schema;
CREATE FOREIGN TABLE truncate_schema.truncate_tbl (id int) SERVER cstore_server OPTIONS(compression 'pglz');
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
TRUNCATE TABLE truncate_schema.truncate_tbl;
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
0
(1 row)
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
-- create a user that can not truncate
CREATE USER truncate_user;
GRANT USAGE ON SCHEMA truncate_schema TO truncate_user;
GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user;
REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user;
SELECT current_user \gset
\c - truncate_user
-- verify truncate command fails and check number of rows
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
TRUNCATE TABLE truncate_schema.truncate_tbl;
ERROR: permission denied for relation truncate_tbl
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
-- switch to super user, grant truncate to truncate_user
\c - :current_user
GRANT TRUNCATE ON TABLE truncate_schema.truncate_tbl TO truncate_user;
-- verify truncate_user can truncate now
\c - truncate_user
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
TRUNCATE TABLE truncate_schema.truncate_tbl;
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
0
(1 row)
\c - :current_user
-- cleanup
DROP SCHEMA truncate_schema CASCADE;
NOTICE: drop cascades to foreign table truncate_schema.truncate_tbl
DROP USER truncate_user;
-- verify files are removed
SELECT count(*) FROM (
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
) AS q1) AS q2;
count
---------------------------------------------------------------------
0
(1 row)

View File

@ -0,0 +1,234 @@
SELECT count(*) AS columnar_table_count FROM cstore.cstore_data_files \gset
CREATE TABLE t(a int, b int) USING cstore_tableam;
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t';
count
---------------------------------------------------------------------
0
(1 row)
INSERT INTO t SELECT i, i * i FROM generate_series(1, 10) i;
INSERT INTO t SELECT i, i * i FROM generate_series(11, 20) i;
INSERT INTO t SELECT i, i * i FROM generate_series(21, 30) i;
SELECT sum(a), sum(b) FROM t;
sum | sum
---------------------------------------------------------------------
465 | 9455
(1 row)
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t';
count
---------------------------------------------------------------------
3
(1 row)
-- vacuum full should merge stripes together
VACUUM FULL t;
SELECT sum(a), sum(b) FROM t;
sum | sum
---------------------------------------------------------------------
465 | 9455
(1 row)
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t';
count
---------------------------------------------------------------------
1
(1 row)
-- test the case when all data cannot fit into a single stripe
SELECT alter_cstore_table_set('t', stripe_row_count => 1000);
alter_cstore_table_set
---------------------------------------------------------------------
(1 row)
INSERT INTO t SELECT i, 2 * i FROM generate_series(1,2500) i;
SELECT sum(a), sum(b) FROM t;
sum | sum
---------------------------------------------------------------------
3126715 | 6261955
(1 row)
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t';
count
---------------------------------------------------------------------
4
(1 row)
VACUUM FULL t;
SELECT sum(a), sum(b) FROM t;
sum | sum
---------------------------------------------------------------------
3126715 | 6261955
(1 row)
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t';
count
---------------------------------------------------------------------
3
(1 row)
-- VACUUM FULL doesn't reclaim dropped columns, but converts them to NULLs
ALTER TABLE t DROP COLUMN a;
SELECT stripe, attr, block, minimum_value IS NULL, maximum_value IS NULL FROM cstore.cstore_skipnodes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t' ORDER BY 1, 2, 3;
stripe | attr | block | ?column? | ?column?
---------------------------------------------------------------------
1 | 1 | 0 | f | f
1 | 2 | 0 | f | f
2 | 1 | 0 | f | f
2 | 2 | 0 | f | f
3 | 1 | 0 | f | f
3 | 2 | 0 | f | f
(6 rows)
VACUUM FULL t;
SELECT stripe, attr, block, minimum_value IS NULL, maximum_value IS NULL FROM cstore.cstore_skipnodes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t' ORDER BY 1, 2, 3;
stripe | attr | block | ?column? | ?column?
---------------------------------------------------------------------
1 | 1 | 0 | t | t
1 | 2 | 0 | f | f
2 | 1 | 0 | t | t
2 | 2 | 0 | f | f
3 | 1 | 0 | t | t
3 | 2 | 0 | f | f
(6 rows)
-- Make sure we cleaned-up the transient table metadata after VACUUM FULL commands
SELECT count(*) - :columnar_table_count FROM cstore.cstore_data_files;
?column?
---------------------------------------------------------------------
1
(1 row)
-- do this in a transaction so concurrent autovacuum doesn't interfere with results
BEGIN;
SAVEPOINT s1;
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
2530
(1 row)
SELECT pg_size_pretty(pg_relation_size('t'));
pg_size_pretty
---------------------------------------------------------------------
32 kB
(1 row)
INSERT INTO t SELECT i FROM generate_series(1, 10000) i;
SELECT pg_size_pretty(pg_relation_size('t'));
pg_size_pretty
---------------------------------------------------------------------
112 kB
(1 row)
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
12530
(1 row)
ROLLBACK TO SAVEPOINT s1;
-- not truncated by VACUUM or autovacuum yet (being in transaction ensures this),
-- so relation size should be same as before.
SELECT pg_size_pretty(pg_relation_size('t'));
pg_size_pretty
---------------------------------------------------------------------
112 kB
(1 row)
COMMIT;
-- vacuum should truncate the relation to the usable space
VACUUM VERBOSE t;
INFO: statistics for "t":
total file size: 114688, total data size: 10754
total row count: 2530, stripe count: 3, average rows per stripe: 843
block count: 3, containing data for dropped columns: 0, none compressed: 3, pglz compressed: 0
INFO: "t": truncated 14 to 4 pages
DETAIL: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s
SELECT pg_size_pretty(pg_relation_size('t'));
pg_size_pretty
---------------------------------------------------------------------
32 kB
(1 row)
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
2530
(1 row)
-- add some stripes with different compression types and create some gaps,
-- then vacuum to print stats
BEGIN;
SELECT alter_cstore_table_set('t',
block_row_count => 1000,
stripe_row_count => 2000,
compression => 'pglz');
alter_cstore_table_set
---------------------------------------------------------------------
(1 row)
SAVEPOINT s1;
INSERT INTO t SELECT i FROM generate_series(1, 1500) i;
ROLLBACK TO SAVEPOINT s1;
INSERT INTO t SELECT i / 5 FROM generate_series(1, 1500) i;
SELECT alter_cstore_table_set('t', compression => 'none');
alter_cstore_table_set
---------------------------------------------------------------------
(1 row)
SAVEPOINT s2;
INSERT INTO t SELECT i FROM generate_series(1, 1500) i;
ROLLBACK TO SAVEPOINT s2;
INSERT INTO t SELECT i / 5 FROM generate_series(1, 1500) i;
COMMIT;
VACUUM VERBOSE t;
INFO: statistics for "t":
total file size: 49152, total data size: 18808
total row count: 5530, stripe count: 5, average rows per stripe: 1106
block count: 7, containing data for dropped columns: 0, none compressed: 5, pglz compressed: 2
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
5530
(1 row)
-- check that we report blocks with data for dropped columns
ALTER TABLE t ADD COLUMN c int;
INSERT INTO t SELECT 1, i / 5 FROM generate_series(1, 1500) i;
ALTER TABLE t DROP COLUMN c;
VACUUM VERBOSE t;
INFO: statistics for "t":
total file size: 65536, total data size: 31372
total row count: 7030, stripe count: 6, average rows per stripe: 1171
block count: 11, containing data for dropped columns: 2, none compressed: 9, pglz compressed: 2
-- vacuum full should remove blocks for dropped columns
-- note that, a block will be stored in non-compressed for if compression
-- doesn't reduce its size.
SELECT alter_cstore_table_set('t', compression => 'pglz');
alter_cstore_table_set
---------------------------------------------------------------------
(1 row)
VACUUM FULL t;
VACUUM VERBOSE t;
INFO: statistics for "t":
total file size: 49152, total data size: 15728
total row count: 7030, stripe count: 4, average rows per stripe: 1757
block count: 8, containing data for dropped columns: 0, none compressed: 2, pglz compressed: 6
DROP TABLE t;
-- Make sure we cleaned the metadata for t too
SELECT count(*) - :columnar_table_count FROM cstore.cstore_data_files;
?column?
---------------------------------------------------------------------
0
(1 row)

View File

@ -0,0 +1,68 @@
Parsed test spec with 2 sessions
starting permutation: s1-insert s1-begin s1-insert s2-vacuum s1-commit s2-select
step s1-insert:
INSERT INTO test_vacuum_vs_insert SELECT i, 2 * i FROM generate_series(1, 3) i;
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO test_vacuum_vs_insert SELECT i, 2 * i FROM generate_series(1, 3) i;
s2: INFO: statistics for "test_vacuum_vs_insert":
total file size: 16384, total data size: 26
total row count: 3, stripe count: 1, average rows per stripe: 3
block count: 2, containing data for dropped columns: 0, none compressed: 2, pglz compressed: 0
s2: INFO: "test_vacuum_vs_insert": stopping truncate due to conflicting lock request
step s2-vacuum:
VACUUM VERBOSE test_vacuum_vs_insert;
step s1-commit:
COMMIT;
step s2-select:
SELECT * FROM test_vacuum_vs_insert;
a b
1 2
2 4
3 6
1 2
2 4
3 6
starting permutation: s1-insert s1-begin s1-insert s2-vacuum-full s1-commit s2-select
step s1-insert:
INSERT INTO test_vacuum_vs_insert SELECT i, 2 * i FROM generate_series(1, 3) i;
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO test_vacuum_vs_insert SELECT i, 2 * i FROM generate_series(1, 3) i;
step s2-vacuum-full:
VACUUM FULL VERBOSE test_vacuum_vs_insert;
<waiting ...>
step s1-commit:
COMMIT;
s2: INFO: vacuuming "public.test_vacuum_vs_insert"
s2: INFO: "test_vacuum_vs_insert": found 0 removable, 6 nonremovable row versions in 3 pages
DETAIL: 0 dead row versions cannot be removed yet.
CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s.
step s2-vacuum-full: <... completed>
step s2-select:
SELECT * FROM test_vacuum_vs_insert;
a b
1 2
2 4
3 6
1 2
2 4
3 6

View File

@ -0,0 +1,142 @@
Parsed test spec with 2 sessions
starting permutation: s1-begin s2-begin s1-insert s2-insert s1-select s2-select s1-commit s2-commit s1-select
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-insert:
INSERT INTO test_insert_concurrency SELECT i, 2 * i FROM generate_series(1, 3) i;
step s2-insert:
INSERT INTO test_insert_concurrency SELECT i, 2 * i FROM generate_series(4, 6) i;
step s1-select:
SELECT * FROM test_insert_concurrency ORDER BY a;
a b
1 2
2 4
3 6
step s2-select:
SELECT * FROM test_insert_concurrency ORDER BY a;
a b
4 8
5 10
6 12
step s1-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-select:
SELECT * FROM test_insert_concurrency ORDER BY a;
a b
1 2
2 4
3 6
4 8
5 10
6 12
starting permutation: s1-begin s2-begin s1-copy s2-insert s1-select s2-select s1-commit s2-commit s1-select
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-copy:
COPY test_insert_concurrency(a) FROM PROGRAM 'seq 11 13';
step s2-insert:
INSERT INTO test_insert_concurrency SELECT i, 2 * i FROM generate_series(4, 6) i;
step s1-select:
SELECT * FROM test_insert_concurrency ORDER BY a;
a b
11
12
13
step s2-select:
SELECT * FROM test_insert_concurrency ORDER BY a;
a b
4 8
5 10
6 12
step s1-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-select:
SELECT * FROM test_insert_concurrency ORDER BY a;
a b
4 8
5 10
6 12
11
12
13
starting permutation: s1-begin s2-begin s2-insert s1-copy s1-select s2-select s1-commit s2-commit s1-select
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-insert:
INSERT INTO test_insert_concurrency SELECT i, 2 * i FROM generate_series(4, 6) i;
step s1-copy:
COPY test_insert_concurrency(a) FROM PROGRAM 'seq 11 13';
step s1-select:
SELECT * FROM test_insert_concurrency ORDER BY a;
a b
11
12
13
step s2-select:
SELECT * FROM test_insert_concurrency ORDER BY a;
a b
4 8
5 10
6 12
step s1-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-select:
SELECT * FROM test_insert_concurrency ORDER BY a;
a b
4 8
5 10
6 12
11
12
13

View File

@ -333,7 +333,7 @@ ERROR: Table 'citus_local_table_1' is a citus local table. Replicating shard of
-- undistribute_table is supported -- undistribute_table is supported
BEGIN; BEGIN;
SELECT undistribute_table('citus_local_table_1'); SELECT undistribute_table('citus_local_table_1');
NOTICE: Creating a new local table for citus_local_tables_test_schema.citus_local_table_1 NOTICE: creating a new local table for citus_local_tables_test_schema.citus_local_table_1
NOTICE: Moving the data of citus_local_tables_test_schema.citus_local_table_1 NOTICE: Moving the data of citus_local_tables_test_schema.citus_local_table_1
NOTICE: executing the command locally: SELECT a FROM citus_local_tables_test_schema.citus_local_table_1_1504027 citus_local_table_1 NOTICE: executing the command locally: SELECT a FROM citus_local_tables_test_schema.citus_local_table_1_1504027 citus_local_table_1
NOTICE: Dropping the old citus_local_tables_test_schema.citus_local_table_1 NOTICE: Dropping the old citus_local_tables_test_schema.citus_local_table_1

View File

@ -0,0 +1,6 @@
Parsed test spec with 1 sessions
starting permutation: s1a
step s1a:
CREATE EXTENSION cstore_fdw;

View File

@ -0,0 +1,174 @@
--
-- Testing ALTER TABLE on cstore_fdw tables.
--
CREATE FOREIGN TABLE test_alter_table (a int, b int, c int) SERVER cstore_server;
WITH sample_data AS (VALUES
(1, 2, 3),
(4, 5, 6),
(7, 8, 9)
)
INSERT INTO test_alter_table SELECT * FROM sample_data;
-- drop a column
ALTER FOREIGN TABLE test_alter_table DROP COLUMN a;
-- test analyze
ANALYZE test_alter_table;
-- verify select queries run as expected
SELECT * FROM test_alter_table;
b | c
---------------------------------------------------------------------
2 | 3
5 | 6
8 | 9
(3 rows)
SELECT a FROM test_alter_table;
ERROR: column "a" does not exist
SELECT b FROM test_alter_table;
b
---------------------------------------------------------------------
2
5
8
(3 rows)
-- verify insert runs as expected
INSERT INTO test_alter_table (SELECT 3, 5, 8);
ERROR: INSERT has more expressions than target columns
INSERT INTO test_alter_table (SELECT 5, 8);
-- add a column with no defaults
ALTER FOREIGN TABLE test_alter_table ADD COLUMN d int;
SELECT * FROM test_alter_table;
b | c | d
---------------------------------------------------------------------
2 | 3 |
5 | 6 |
8 | 9 |
5 | 8 |
(4 rows)
INSERT INTO test_alter_table (SELECT 3, 5, 8);
SELECT * FROM test_alter_table;
b | c | d
---------------------------------------------------------------------
2 | 3 |
5 | 6 |
8 | 9 |
5 | 8 |
3 | 5 | 8
(5 rows)
-- add a fixed-length column with default value
ALTER FOREIGN TABLE test_alter_table ADD COLUMN e int default 3;
SELECT * from test_alter_table;
b | c | d | e
---------------------------------------------------------------------
2 | 3 | | 3
5 | 6 | | 3
8 | 9 | | 3
5 | 8 | | 3
3 | 5 | 8 | 3
(5 rows)
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8);
SELECT * from test_alter_table;
b | c | d | e
---------------------------------------------------------------------
2 | 3 | | 3
5 | 6 | | 3
8 | 9 | | 3
5 | 8 | | 3
3 | 5 | 8 | 3
1 | 2 | 4 | 8
(6 rows)
-- add a variable-length column with default value
ALTER FOREIGN TABLE test_alter_table ADD COLUMN f text DEFAULT 'TEXT ME';
SELECT * from test_alter_table;
b | c | d | e | f
---------------------------------------------------------------------
2 | 3 | | 3 | TEXT ME
5 | 6 | | 3 | TEXT ME
8 | 9 | | 3 | TEXT ME
5 | 8 | | 3 | TEXT ME
3 | 5 | 8 | 3 | TEXT ME
1 | 2 | 4 | 8 | TEXT ME
(6 rows)
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8, 'ABCDEF');
SELECT * from test_alter_table;
b | c | d | e | f
---------------------------------------------------------------------
2 | 3 | | 3 | TEXT ME
5 | 6 | | 3 | TEXT ME
8 | 9 | | 3 | TEXT ME
5 | 8 | | 3 | TEXT ME
3 | 5 | 8 | 3 | TEXT ME
1 | 2 | 4 | 8 | TEXT ME
1 | 2 | 4 | 8 | ABCDEF
(7 rows)
-- drop couple of columns
ALTER FOREIGN TABLE test_alter_table DROP COLUMN c;
ALTER FOREIGN TABLE test_alter_table DROP COLUMN e;
ANALYZE test_alter_table;
SELECT * from test_alter_table;
b | d | f
---------------------------------------------------------------------
2 | | TEXT ME
5 | | TEXT ME
8 | | TEXT ME
5 | | TEXT ME
3 | 8 | TEXT ME
1 | 4 | TEXT ME
1 | 4 | ABCDEF
(7 rows)
SELECT count(*) from test_alter_table;
count
---------------------------------------------------------------------
7
(1 row)
SELECT count(t.*) from test_alter_table t;
count
---------------------------------------------------------------------
7
(1 row)
-- unsupported default values
ALTER FOREIGN TABLE test_alter_table ADD COLUMN g boolean DEFAULT isfinite(current_date);
ALTER FOREIGN TABLE test_alter_table ADD COLUMN h DATE DEFAULT current_date;
SELECT * FROM test_alter_table;
ERROR: unsupported default value for column "g"
HINT: Expression is either mutable or does not evaluate to constant value
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN g DROP DEFAULT;
SELECT * FROM test_alter_table;
ERROR: unsupported default value for column "h"
HINT: Expression is either mutable or does not evaluate to constant value
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN h DROP DEFAULT;
ANALYZE test_alter_table;
SELECT * FROM test_alter_table;
b | d | f | g | h
---------------------------------------------------------------------
2 | | TEXT ME | |
5 | | TEXT ME | |
8 | | TEXT ME | |
5 | | TEXT ME | |
3 | 8 | TEXT ME | |
1 | 4 | TEXT ME | |
1 | 4 | ABCDEF | |
(7 rows)
-- unsupported type change
ALTER FOREIGN TABLE test_alter_table ADD COLUMN i int;
ALTER FOREIGN TABLE test_alter_table ADD COLUMN j float;
ALTER FOREIGN TABLE test_alter_table ADD COLUMN k text;
-- this is valid type change
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN i TYPE float;
-- this is not valid
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN j TYPE int;
ERROR: Column j cannot be cast automatically to type pg_catalog.int4
-- text / varchar conversion is valid both ways
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN k TYPE varchar(20);
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN k TYPE text;
DROP FOREIGN TABLE test_alter_table;

View File

@ -0,0 +1,19 @@
--
-- Test the ANALYZE command for cstore_fdw tables.
--
-- ANALYZE uncompressed table
ANALYZE contestant;
SELECT count(*) FROM pg_stats WHERE tablename='contestant';
count
---------------------------------------------------------------------
6
(1 row)
-- ANALYZE compressed table
ANALYZE contestant_compressed;
SELECT count(*) FROM pg_stats WHERE tablename='contestant_compressed';
count
---------------------------------------------------------------------
6
(1 row)

View File

@ -0,0 +1,10 @@
DROP FOREIGN TABLE collation_block_filtering_test;
DROP FOREIGN TABLE test_block_filtering;
DROP FOREIGN TABLE test_null_values;
DROP FOREIGN TABLE test_other_types;
DROP FOREIGN TABLE test_range_types;
DROP FOREIGN TABLE test_enum_and_composite_types;
DROP TYPE composite_type;
DROP TYPE enum_type;
DROP FOREIGN TABLE test_datetime_types;
DROP FOREIGN TABLE test_array_types;

View File

@ -0,0 +1,58 @@
--
-- Tests the different DROP commands for cstore_fdw tables.
--
-- DROP FOREIGN TABL
-- DROP SCHEMA
-- DROP EXTENSION
-- DROP DATABASE
--
-- Note that travis does not create
-- cstore_fdw extension in default database (postgres). This has caused
-- different behavior between travis tests and local tests. Thus
-- 'postgres' directory is excluded from comparison to have the same result.
-- store postgres database oid
SELECT oid postgres_oid FROM pg_database WHERE datname = 'postgres' \gset
SELECT count(*) AS cstore_data_files_before_drop FROM cstore.cstore_data_files \gset
-- DROP cstore_fdw tables
DROP FOREIGN TABLE contestant;
DROP FOREIGN TABLE contestant_compressed;
-- make sure DROP deletes metadata
SELECT :cstore_data_files_before_drop - count(*) FROM cstore.cstore_data_files;
?column?
---------------------------------------------------------------------
2
(1 row)
-- Create a cstore_fdw table under a schema and drop it.
CREATE SCHEMA test_schema;
CREATE FOREIGN TABLE test_schema.test_table(data int) SERVER cstore_server;
SELECT count(*) AS cstore_data_files_before_drop FROM cstore.cstore_data_files \gset
DROP SCHEMA test_schema CASCADE;
NOTICE: drop cascades to foreign table test_schema.test_table
SELECT :cstore_data_files_before_drop - count(*) FROM cstore.cstore_data_files;
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT current_database() datname \gset
CREATE DATABASE db_to_drop;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
\c db_to_drop
CREATE EXTENSION citus;
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
CREATE FOREIGN TABLE test_table(data int) SERVER cstore_server;
DROP EXTENSION citus CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to server cstore_server
drop cascades to foreign table test_table
-- test database drop
CREATE EXTENSION citus;
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
CREATE FOREIGN TABLE test_table(data int) SERVER cstore_server;
\c :datname
DROP DATABASE db_to_drop;

View File

@ -0,0 +1,18 @@
--
-- Test utility functions for cstore_fdw tables.
--
CREATE FOREIGN TABLE empty_table (a int) SERVER cstore_server;
CREATE FOREIGN TABLE table_with_data (a int) SERVER cstore_server;
CREATE TABLE non_cstore_table (a int);
COPY table_with_data FROM STDIN;
SELECT cstore_table_size('empty_table') < cstore_table_size('table_with_data');
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT cstore_table_size('non_cstore_table');
ERROR: relation is not a cstore table
DROP FOREIGN TABLE empty_table;
DROP FOREIGN TABLE table_with_data;
DROP TABLE non_cstore_table;

View File

@ -0,0 +1,88 @@
--
-- Testing insert on cstore_fdw tables.
--
CREATE FOREIGN TABLE test_insert_command (a int) SERVER cstore_server;
-- test single row inserts fail
select count(*) from test_insert_command;
count
---------------------------------------------------------------------
0
(1 row)
insert into test_insert_command values(1);
ERROR: operation is not supported
select count(*) from test_insert_command;
count
---------------------------------------------------------------------
0
(1 row)
insert into test_insert_command default values;
ERROR: operation is not supported
select count(*) from test_insert_command;
count
---------------------------------------------------------------------
0
(1 row)
-- test inserting from another table succeed
CREATE TABLE test_insert_command_data (a int);
select count(*) from test_insert_command_data;
count
---------------------------------------------------------------------
0
(1 row)
insert into test_insert_command_data values(1);
select count(*) from test_insert_command_data;
count
---------------------------------------------------------------------
1
(1 row)
insert into test_insert_command select * from test_insert_command_data;
select count(*) from test_insert_command;
count
---------------------------------------------------------------------
1
(1 row)
drop table test_insert_command_data;
drop foreign table test_insert_command;
-- test long attribute value insertion
-- create sufficiently long text so that data is stored in toast
CREATE TABLE test_long_text AS
SELECT a as int_val, string_agg(random()::text, '') as text_val
FROM generate_series(1, 10) a, generate_series(1, 1000) b
GROUP BY a ORDER BY a;
-- store hash values of text for later comparison
CREATE TABLE test_long_text_hash AS
SELECT int_val, md5(text_val) AS hash
FROM test_long_text;
CREATE FOREIGN TABLE test_cstore_long_text(int_val int, text_val text)
SERVER cstore_server;
-- store long text in cstore table
INSERT INTO test_cstore_long_text SELECT * FROM test_long_text;
-- drop source table to remove original text from toast
DROP TABLE test_long_text;
-- check if text data is still available in cstore table
-- by comparing previously stored hash.
SELECT a.int_val
FROM test_long_text_hash a, test_cstore_long_text c
WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val);
int_val
---------------------------------------------------------------------
1
2
3
4
5
6
7
8
9
10
(10 rows)
DROP TABLE test_long_text_hash;
DROP FOREIGN TABLE test_cstore_long_text;

View File

@ -0,0 +1,105 @@
--
-- Test querying cstore_fdw tables.
--
-- Settings to make the result deterministic
SET datestyle = "ISO, YMD";
-- Query uncompressed data
SELECT count(*) FROM contestant;
count
---------------------------------------------------------------------
8
(1 row)
SELECT avg(rating), stddev_samp(rating) FROM contestant;
avg | stddev_samp
---------------------------------------------------------------------
2344.3750000000000000 | 433.746119785032
(1 row)
SELECT country, avg(rating) FROM contestant WHERE rating > 2200
GROUP BY country ORDER BY country;
country | avg
---------------------------------------------------------------------
XA | 2203.0000000000000000
XB | 2610.5000000000000000
XC | 2236.0000000000000000
XD | 3090.0000000000000000
(4 rows)
SELECT * FROM contestant ORDER BY handle;
handle | birthdate | rating | percentile | country | achievements
---------------------------------------------------------------------
a | 1990-01-10 | 2090 | 97.1 | XA | {a}
b | 1990-11-01 | 2203 | 98.1 | XA | {a,b}
c | 1988-11-01 | 2907 | 99.4 | XB | {w,y}
d | 1985-05-05 | 2314 | 98.3 | XB | {}
e | 1995-05-05 | 2236 | 98.2 | XC | {a}
f | 1983-04-02 | 3090 | 99.6 | XD | {a,b,c,y}
g | 1991-12-13 | 1803 | 85.1 | XD | {a,c}
h | 1987-10-26 | 2112 | 95.4 | XD | {w,a}
(8 rows)
-- Query compressed data
SELECT count(*) FROM contestant_compressed;
count
---------------------------------------------------------------------
8
(1 row)
SELECT avg(rating), stddev_samp(rating) FROM contestant_compressed;
avg | stddev_samp
---------------------------------------------------------------------
2344.3750000000000000 | 433.746119785032
(1 row)
SELECT country, avg(rating) FROM contestant_compressed WHERE rating > 2200
GROUP BY country ORDER BY country;
country | avg
---------------------------------------------------------------------
XA | 2203.0000000000000000
XB | 2610.5000000000000000
XC | 2236.0000000000000000
XD | 3090.0000000000000000
(4 rows)
SELECT * FROM contestant_compressed ORDER BY handle;
handle | birthdate | rating | percentile | country | achievements
---------------------------------------------------------------------
a | 1990-01-10 | 2090 | 97.1 | XA | {a}
b | 1990-11-01 | 2203 | 98.1 | XA | {a,b}
c | 1988-11-01 | 2907 | 99.4 | XB | {w,y}
d | 1985-05-05 | 2314 | 98.3 | XB | {}
e | 1995-05-05 | 2236 | 98.2 | XC | {a}
f | 1983-04-02 | 3090 | 99.6 | XD | {a,b,c,y}
g | 1991-12-13 | 1803 | 85.1 | XD | {a,c}
h | 1987-10-26 | 2112 | 95.4 | XD | {w,a}
(8 rows)
-- Verify that we handle whole-row references correctly
SELECT to_json(v) FROM contestant v ORDER BY rating LIMIT 1;
to_json
---------------------------------------------------------------------
{"handle":"g","birthdate":"1991-12-13","rating":1803,"percentile":85.1,"country":"XD ","achievements":["a","c"]}
(1 row)
-- Test variables used in expressions
CREATE FOREIGN TABLE union_first (a int, b int) SERVER cstore_server;
CREATE FOREIGN TABLE union_second (a int, b int) SERVER cstore_server;
INSERT INTO union_first SELECT a, a FROM generate_series(1, 5) a;
INSERT INTO union_second SELECT a, a FROM generate_series(11, 15) a;
(SELECT a*1, b FROM union_first) union all (SELECT a*1, b FROM union_second);
?column? | b
---------------------------------------------------------------------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
11 | 11
12 | 12
13 | 13
14 | 14
15 | 15
(10 rows)
DROP FOREIGN TABLE union_first, union_second;

Some files were not shown because too many files have changed in this diff Show More