mirror of https://github.com/citusdata/citus.git
Add 'src/backend/columnar/' from commit '4339e911933ca2109db46014befdaccf77c5c13f'
git-subtree-dir: src/backend/columnar git-subtree-mainline:pull/4311/head34de1f645c
git-subtree-split:4339e91193
commit
5fe4c12d49
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
status=0
|
||||
|
||||
basedir="$(pwd)"
|
||||
installdir="${basedir}/install-${PG_MAJOR}"
|
||||
|
||||
make install DESTDIR="${installdir}"
|
||||
pushd "${installdir}"
|
||||
find . -type f -print > "${basedir}/files.lst"
|
||||
cat "${basedir}/files.lst"
|
||||
tar cvf "${basedir}/install-${PG_MAJOR}.tar" $(cat "${basedir}/files.lst")
|
||||
popd
|
|
@ -0,0 +1,138 @@
|
|||
version: 2.1
|
||||
orbs:
|
||||
codecov: codecov/codecov@1.1.1
|
||||
|
||||
jobs:
|
||||
check-style:
|
||||
docker:
|
||||
- image: 'citus/stylechecker:latest'
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: 'Check Style'
|
||||
command: |
|
||||
citus_indent --check
|
||||
- run:
|
||||
name: 'Check if whitespace fixing changed anything, install editorconfig if it did'
|
||||
command: |
|
||||
git diff --exit-code
|
||||
|
||||
build-11:
|
||||
docker:
|
||||
- image: 'citus/extbuilder:11.9'
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: 'Configure, Build, and Install'
|
||||
command: |
|
||||
PG_MAJOR=11 .circleci/build.sh
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- install-11.tar
|
||||
|
||||
build-12:
|
||||
docker:
|
||||
- image: 'citus/extbuilder:12.4'
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: 'Configure, Build, and Install'
|
||||
command: |
|
||||
PG_MAJOR=12 .circleci/build.sh
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- install-12.tar
|
||||
|
||||
build-13:
|
||||
docker:
|
||||
- image: 'citus/extbuilder:13.0'
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: 'Configure, Build, and Install'
|
||||
command: |
|
||||
PG_MAJOR=13 .circleci/build.sh
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- install-13.tar
|
||||
|
||||
test-11_checkinstall:
|
||||
docker:
|
||||
- image: 'citus/exttester:11.9'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Prepare Container & Install Extension'
|
||||
command: |
|
||||
chown -R circleci:circleci /home/circleci
|
||||
tar xfv "${CIRCLE_WORKING_DIRECTORY}/install-${PG_MAJOR}.tar" --directory /
|
||||
- run:
|
||||
name: 'Run Test'
|
||||
command: |
|
||||
gosu circleci .circleci/run_test.sh installcheck
|
||||
- codecov/upload:
|
||||
flags: 'test_11,installcheck'
|
||||
|
||||
test-12_checkinstall:
|
||||
docker:
|
||||
- image: 'citus/exttester:12.4'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Prepare Container & Install Extension'
|
||||
command: |
|
||||
chown -R circleci:circleci /home/circleci
|
||||
tar xfv "${CIRCLE_WORKING_DIRECTORY}/install-${PG_MAJOR}.tar" --directory /
|
||||
- run:
|
||||
name: 'Run Test'
|
||||
command: |
|
||||
gosu circleci .circleci/run_test.sh installcheck
|
||||
- codecov/upload:
|
||||
flags: 'test_12,installcheck'
|
||||
|
||||
test-13_checkinstall:
|
||||
docker:
|
||||
- image: 'citus/exttester:13.0'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Prepare Container & Install Extension'
|
||||
command: |
|
||||
chown -R circleci:circleci /home/circleci
|
||||
tar xfv "${CIRCLE_WORKING_DIRECTORY}/install-${PG_MAJOR}.tar" --directory /
|
||||
- run:
|
||||
name: 'Run Test'
|
||||
command: |
|
||||
gosu circleci .circleci/run_test.sh installcheck
|
||||
- codecov/upload:
|
||||
flags: 'test_13,installcheck'
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_test:
|
||||
jobs:
|
||||
|
||||
- check-style
|
||||
|
||||
- build-11
|
||||
- build-12
|
||||
- build-13
|
||||
|
||||
- test-11_checkinstall:
|
||||
requires: [build-11]
|
||||
- test-12_checkinstall:
|
||||
requires: [build-12]
|
||||
- test-13_checkinstall:
|
||||
requires: [build-13]
|
|
@ -0,0 +1,27 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
status=0
|
||||
|
||||
export PGPORT=${PGPORT:-55432}
|
||||
|
||||
function cleanup {
|
||||
pg_ctl -D /tmp/postgres stop
|
||||
rm -rf /tmp/postgres
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
rm -rf /tmp/postgres
|
||||
initdb -E unicode /tmp/postgres
|
||||
echo "shared_preload_libraries = 'cstore_fdw'" >> /tmp/postgres/postgresql.conf
|
||||
pg_ctl -D /tmp/postgres -o "-p ${PGPORT}" -l /tmp/postgres_logfile start || status=$?
|
||||
if [ -z $status ]; then cat /tmp/postgres_logfile; fi
|
||||
|
||||
make "${@}" || status=$?
|
||||
diffs="regression.diffs"
|
||||
|
||||
if test -f "${diffs}"; then cat "${diffs}"; fi
|
||||
|
||||
exit $status
|
|
@ -0,0 +1,26 @@
|
|||
* whitespace=space-before-tab,trailing-space
|
||||
*.[chly] whitespace=space-before-tab,trailing-space,indent-with-non-tab,tabwidth=4
|
||||
*.dsl whitespace=space-before-tab,trailing-space,tab-in-indent
|
||||
*.patch -whitespace
|
||||
*.pl whitespace=space-before-tab,trailing-space,tabwidth=4
|
||||
*.po whitespace=space-before-tab,trailing-space,tab-in-indent,-blank-at-eof
|
||||
*.sgml whitespace=space-before-tab,trailing-space,tab-in-indent,-blank-at-eol
|
||||
*.x[ms]l whitespace=space-before-tab,trailing-space,tab-in-indent
|
||||
|
||||
# Avoid confusing ASCII underlines with leftover merge conflict markers
|
||||
README conflict-marker-size=32
|
||||
README.* conflict-marker-size=32
|
||||
|
||||
# Certain data files that contain special whitespace, and other special cases
|
||||
*.data -whitespace
|
||||
|
||||
# Test output files that contain extra whitespace
|
||||
*.out -whitespace
|
||||
src/test/regress/output/*.source -whitespace
|
||||
|
||||
# These files are maintained or generated elsewhere. We take them as is.
|
||||
configure -whitespace
|
||||
|
||||
# all C files (implementation and header) use our style...
|
||||
*.[ch] citus-style
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
# =====
|
||||
# = C =
|
||||
# =====
|
||||
|
||||
# Object files
|
||||
*.o
|
||||
*.ko
|
||||
*.obj
|
||||
*.elf
|
||||
*.bc
|
||||
|
||||
# Libraries
|
||||
*.lib
|
||||
*.a
|
||||
|
||||
# Shared objects (inc. Windows DLLs)
|
||||
*.dll
|
||||
*.so
|
||||
*.so.*
|
||||
*.dylib
|
||||
|
||||
# Executables
|
||||
*.exe
|
||||
*.app
|
||||
*.i*86
|
||||
*.x86_64
|
||||
*.hex
|
||||
|
||||
# ========
|
||||
# = Gcov =
|
||||
# ========
|
||||
|
||||
# gcc coverage testing tool files
|
||||
|
||||
*.gcno
|
||||
*.gcda
|
||||
*.gcov
|
||||
|
||||
# ====================
|
||||
# = Project-Specific =
|
||||
# ====================
|
||||
|
||||
/data/*.cstore
|
||||
/data/*.footer
|
||||
|
||||
/sql/*block_filtering.sql
|
||||
/sql/*copyto.sql
|
||||
/sql/*create.sql
|
||||
/sql/*data_types.sql
|
||||
/sql/*load.sql
|
||||
|
||||
/expected/*block_filtering.out
|
||||
/expected/*copyto.out
|
||||
/expected/*create.out
|
||||
/expected/*data_types.out
|
||||
/expected/*load.out
|
||||
/results/*
|
||||
/.deps/*
|
||||
/regression.diffs
|
||||
/regression.out
|
||||
.vscode
|
||||
|
||||
*.pb-c.*
|
||||
|
||||
# ignore files that could be created by circleci automation
|
||||
files.lst
|
||||
install-*.tar
|
||||
install-*/
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,45 @@
|
|||
{
|
||||
"name": "cstore_fdw",
|
||||
"abstract": "Columnar Store for PostgreSQL",
|
||||
"description": "PostgreSQL extension which implements a Columnar Store.",
|
||||
"version": "1.7.0",
|
||||
"maintainer": "Murat Tuncer <murat.tuncer@microsoft.com>",
|
||||
"license": "apache_2_0",
|
||||
"provides": {
|
||||
"cstore_fdw": {
|
||||
"abstract": "Foreign Data Wrapper for Columnar Store Tables",
|
||||
"file": "cstore_fdw--1.7.sql",
|
||||
"docfile": "README.md",
|
||||
"version": "1.7.0"
|
||||
}
|
||||
},
|
||||
"prereqs": {
|
||||
"runtime": {
|
||||
"requires": {
|
||||
"PostgreSQL": "9.3.0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"bugtracker": {
|
||||
"web": "http://github.com/citusdata/cstore_fdw/issues/"
|
||||
},
|
||||
"repository": {
|
||||
"url": "git://github.com/citusdata/cstore_fdw.git",
|
||||
"web": "https://github.com/citusdata/cstore_fdw/",
|
||||
"type": "git"
|
||||
}
|
||||
},
|
||||
"generated_by": "Murat Tuncer",
|
||||
"meta-spec": {
|
||||
"version": "1.0.0",
|
||||
"url": "http://pgxn.org/meta/spec.txt"
|
||||
},
|
||||
"tags": [
|
||||
"orc",
|
||||
"fdw",
|
||||
"foreign data wrapper",
|
||||
"cstore_fdw",
|
||||
"columnar store"
|
||||
]
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
# cstore_fdw/Makefile
|
||||
#
|
||||
# Copyright (c) 2016 Citus Data, Inc.
|
||||
#
|
||||
|
||||
MODULE_big = cstore_fdw
|
||||
|
||||
VER := $(lastword $(shell pg_config --version))
|
||||
VER_WORDS = $(subst ., ,$(VER))
|
||||
MVER = $(firstword $(VER_WORDS))
|
||||
|
||||
# error for versions earlier than 10 so that lex comparison will work
|
||||
ifneq ($(shell printf '%02d' $(MVER)),$(MVER))
|
||||
$(error version $(VER) not supported)
|
||||
endif
|
||||
|
||||
# lexicographic comparison of version number
|
||||
ifeq ($(lastword $(sort 12 $(MVER))),$(MVER))
|
||||
USE_TABLEAM = yes
|
||||
USE_FDW = yes
|
||||
else ifeq ($(lastword $(sort 11 $(MVER))),$(MVER))
|
||||
USE_TABLEAM = no
|
||||
USE_FDW = yes
|
||||
else
|
||||
$(error version $(VER) is not supported)
|
||||
endif
|
||||
|
||||
PG_CFLAGS = -std=c11 -Wshadow -Werror
|
||||
OBJS = cstore.o cstore_writer.o cstore_reader.o \
|
||||
cstore_compression.o mod.o cstore_metadata_tables.o
|
||||
|
||||
EXTENSION = cstore_fdw
|
||||
DATA = cstore_fdw--1.7.sql cstore_fdw--1.6--1.7.sql cstore_fdw--1.5--1.6.sql cstore_fdw--1.4--1.5.sql \
|
||||
cstore_fdw--1.3--1.4.sql cstore_fdw--1.2--1.3.sql cstore_fdw--1.1--1.2.sql \
|
||||
cstore_fdw--1.0--1.1.sql cstore_fdw--1.7--1.8.sql
|
||||
|
||||
REGRESS = extension_create
|
||||
ISOLATION = create
|
||||
EXTRA_CLEAN = sql/fdw_block_filtering.sql sql/fdw_create.sql sql/fdw_data_types.sql sql/fdw_load.sql \
|
||||
sql/fdw_copyto.sql expected/fdw_block_filtering.out expected/fdw_create.out \
|
||||
expected/fdw_data_types.out expected/fdw_load.out expected/fdw_copyto.out \
|
||||
sql/am_block_filtering.sql sql/am_create.sql sql/am_data_types.sql sql/am_load.sql \
|
||||
sql/am_copyto.sql expected/am_block_filtering.out \
|
||||
expected/am_data_types.out expected/am_load.out expected/am_copyto.out
|
||||
|
||||
ifeq ($(USE_FDW),yes)
|
||||
PG_CFLAGS += -DUSE_FDW
|
||||
OBJS += cstore_fdw.o
|
||||
REGRESS += fdw_create fdw_load fdw_query fdw_analyze fdw_data_types \
|
||||
fdw_functions fdw_block_filtering fdw_drop fdw_insert \
|
||||
fdw_copyto fdw_alter fdw_rollback fdw_truncate fdw_clean
|
||||
endif
|
||||
|
||||
ifeq ($(USE_TABLEAM),yes)
|
||||
PG_CFLAGS += -DUSE_TABLEAM
|
||||
OBJS += cstore_tableam.o cstore_customscan.o
|
||||
REGRESS += am_create am_load am_query am_analyze am_data_types am_functions \
|
||||
am_drop am_insert am_copyto am_alter am_rollback am_truncate am_vacuum am_clean \
|
||||
am_block_filtering am_join am_trigger am_tableoptions
|
||||
ISOLATION += am_write_concurrency am_vacuum_vs_insert
|
||||
endif
|
||||
|
||||
ifeq ($(enable_coverage),yes)
|
||||
PG_CPPFLAGS += --coverage
|
||||
SHLIB_LINK += --coverage
|
||||
EXTRA_CLEAN += *.gcno
|
||||
endif
|
||||
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
PG_CPPFLAGS += -I/usr/local/include
|
||||
endif
|
||||
|
||||
#
|
||||
# Users need to specify their Postgres installation path through pg_config. For
|
||||
# example: /usr/local/pgsql/bin/pg_config or /usr/lib/postgresql/9.3/bin/pg_config
|
||||
#
|
||||
|
||||
PG_CONFIG = pg_config
|
||||
PGXS := $(shell $(PG_CONFIG) --pgxs)
|
||||
include $(PGXS)
|
||||
|
||||
# command for getting postgres source directory is taken from citus/configure.in
|
||||
POSTGRES_SRCDIR=$(shell grep ^abs_top_srcdir $(shell dirname $(shell $(PG_CONFIG) --pgxs))/../Makefile.global|cut -d ' ' -f3-)
|
||||
PGXS_ISOLATION_TESTER=$(top_builddir)/src/test/isolation/pg_isolation_regress
|
||||
|
||||
# If postgres installation doesn't include pg_isolation_regress, try using the
|
||||
# one in postgres source directory.
|
||||
ifeq (,$(wildcard $(PGXS_ISOLATION_TESTER)))
|
||||
pg_isolation_regress_installcheck = \
|
||||
$(POSTGRES_SRCDIR)/src/test/isolation/pg_isolation_regress \
|
||||
--inputdir=$(srcdir) $(EXTRA_REGRESS_OPTS)
|
||||
else
|
||||
pg_isolation_regress_installcheck = \
|
||||
$(PGXS_ISOLATION_TESTER) \
|
||||
--inputdir=$(srcdir) $(EXTRA_REGRESS_OPTS)
|
||||
endif
|
||||
|
||||
installcheck:
|
||||
|
||||
reindent:
|
||||
citus_indent .
|
|
@ -0,0 +1,373 @@
|
|||
cstore_fdw
|
||||
==========
|
||||
|
||||
[][status]
|
||||
[][coverage]
|
||||
|
||||
Cstore_fdw is an open source columnar store extension for PostgreSQL. Columnar stores provide notable benefits for analytics use cases where data is loaded in batches. Cstore_fdw’s columnar nature delivers performance by only reading relevant data from disk, and it may compress data 6x-10x to reduce space requirements for data archival.
|
||||
|
||||
Cstore_fdw is developed by [Citus Data](https://www.citusdata.com) and can be used in combination with [Citus](https://github.com/citusdata/citus), a postgres extension that intelligently distributes your data and queries across many nodes so your database can scale and your queries are fast. If you have any questions about how Citus can help you scale or how to use Citus in combination with cstore_fdw, [please let us know](https://www.citusdata.com/about/contact_us/).
|
||||
|
||||
Join the [Mailing List][mailing-list] to stay on top of the latest developments for Cstore_fdw.
|
||||
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
This extension uses a format for its data layout that is inspired by ORC,
|
||||
the Optimized Row Columnar format. Like ORC, the cstore format improves
|
||||
upon RCFile developed at Facebook, and brings the following benefits:
|
||||
|
||||
* Compression: Reduces in-memory and on-disk data size by 2-4x. Can be extended
|
||||
to support different codecs.
|
||||
* Column projections: Only reads column data relevant to the query. Improves
|
||||
performance for I/O bound queries.
|
||||
* Skip indexes: Stores min/max statistics for row groups, and uses them to skip
|
||||
over unrelated rows.
|
||||
|
||||
Further, we used the Postgres foreign data wrapper APIs and type representations
|
||||
with this extension. This brings:
|
||||
|
||||
* Support for 40+ Postgres data types. The user can also create new types and
|
||||
use them.
|
||||
* Statistics collection. PostgreSQL's query optimizer uses these stats to
|
||||
evaluate different query plans and pick the best one.
|
||||
* Simple setup. Create foreign table and copy data. Run SQL.
|
||||
|
||||
|
||||
Building
|
||||
--------
|
||||
|
||||
cstore\_fdw depends on protobuf-c for serializing and deserializing table metadata.
|
||||
So we need to install these packages first:
|
||||
|
||||
# Fedora 17+, CentOS, and Amazon Linux
|
||||
sudo yum install protobuf-c-devel
|
||||
|
||||
# Ubuntu 10.4+
|
||||
sudo apt-get install protobuf-c-compiler
|
||||
sudo apt-get install libprotobuf-c0-dev
|
||||
|
||||
# Ubuntu 18.4+
|
||||
sudo apt-get install protobuf-c-compiler
|
||||
sudo apt-get install libprotobuf-c-dev
|
||||
|
||||
# Mac OS X
|
||||
brew install protobuf-c
|
||||
|
||||
**Note.** In CentOS 5, 6, and 7, you may need to install or update EPEL 5, 6, or 7 repositories.
|
||||
See [this page](https://support.rackspace.com/how-to/install-epel-and-additional-repositories-on-centos-and-red-hat/)
|
||||
for instructions.
|
||||
|
||||
**Note.** In Amazon Linux, the EPEL repository is installed by default, but not
|
||||
enabled. See [these instructions](http://aws.amazon.com/amazon-linux-ami/faqs/#epel)
|
||||
for how to enable it.
|
||||
|
||||
Once you have protobuf-c installed on your machine, you are ready to build
|
||||
cstore\_fdw. For this, you need to include the pg\_config directory path in
|
||||
your make command. This path is typically the same as your PostgreSQL
|
||||
installation's bin/ directory path. For example:
|
||||
|
||||
PATH=/usr/local/pgsql/bin/:$PATH make
|
||||
sudo PATH=/usr/local/pgsql/bin/:$PATH make install
|
||||
|
||||
**Note.** cstore_fdw requires PostgreSQL version from 9.3 to 12. It doesn't
|
||||
support earlier versions of PostgreSQL.
|
||||
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Before using cstore\_fdw, you need to add it to ```shared_preload_libraries```
|
||||
in your ```postgresql.conf``` and restart Postgres:
|
||||
|
||||
shared_preload_libraries = 'cstore_fdw' # (change requires restart)
|
||||
|
||||
The following parameters can be set on a cstore foreign table object.
|
||||
|
||||
* filename (optional): The absolute path to the location for storing table data.
|
||||
If you don't specify the filename option, cstore\_fdw will automatically
|
||||
choose the $PGDATA/cstore\_fdw directory to store the files. If specified the
|
||||
value of this parameter will be used as a prefix for all files created to
|
||||
store table data. For example, the value ```/cstore_fdw/my_table``` could result in
|
||||
the files ```/cstore_fdw/my_table``` and ```/cstore_fdw/my_table.footer``` being used
|
||||
to manage table data.
|
||||
* compression (optional): The compression used for compressing value streams.
|
||||
Valid options are ```none``` and ```pglz```. The default is ```none```.
|
||||
* stripe\_row\_count (optional): Number of rows per stripe. The default is
|
||||
```150000```. Reducing this decreases the amount memory used for loading data
|
||||
and querying, but also decreases the performance.
|
||||
* block\_row\_count (optional): Number of rows per column block. The default is
|
||||
```10000```. cstore\_fdw compresses, creates skip indexes, and reads from disk
|
||||
at the block granularity. Increasing this value helps with compression and results
|
||||
in fewer reads from disk. However, higher values also reduce the probability of
|
||||
skipping over unrelated row blocks.
|
||||
|
||||
|
||||
To load or append data into a cstore table, you have two options:
|
||||
|
||||
* You can use the [```COPY``` command][copy-command] to load or append data from
|
||||
a file, a program, or STDIN.
|
||||
* You can use the ```INSERT INTO cstore_table SELECT ...``` syntax to load or
|
||||
append data from another table.
|
||||
|
||||
You can use the [```ANALYZE``` command][analyze-command] to collect statistics
|
||||
about the table. These statistics help the query planner to help determine the
|
||||
most efficient execution plan for each query.
|
||||
|
||||
**Note.** We currently don't support updating table using DELETE, and UPDATE
|
||||
commands. We also don't support single row inserts.
|
||||
|
||||
|
||||
Updating from earlier versions to 1.7
|
||||
---------------------------------------
|
||||
|
||||
To update an existing cstore_fdw installation from versions earlier than 1.6
|
||||
you can take the following steps:
|
||||
|
||||
* Download and install cstore_fdw version 1.6 using instructions from the "Building"
|
||||
section,
|
||||
* Restart the PostgreSQL server,
|
||||
* Run ```ALTER EXTENSION cstore_fdw UPDATE;```
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
As an example, we demonstrate loading and querying data to/from a column store
|
||||
table from scratch here. Let's start with downloading and decompressing the data
|
||||
files.
|
||||
|
||||
wget http://examples.citusdata.com/customer_reviews_1998.csv.gz
|
||||
wget http://examples.citusdata.com/customer_reviews_1999.csv.gz
|
||||
|
||||
gzip -d customer_reviews_1998.csv.gz
|
||||
gzip -d customer_reviews_1999.csv.gz
|
||||
|
||||
Then, let's log into Postgres, and run the following commands to create a column
|
||||
store foreign table:
|
||||
|
||||
```SQL
|
||||
-- load extension first time after install
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
|
||||
-- create server object
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
|
||||
-- create foreign table
|
||||
CREATE FOREIGN TABLE customer_reviews
|
||||
(
|
||||
customer_id TEXT,
|
||||
review_date DATE,
|
||||
review_rating INTEGER,
|
||||
review_votes INTEGER,
|
||||
review_helpful_votes INTEGER,
|
||||
product_id CHAR(10),
|
||||
product_title TEXT,
|
||||
product_sales_rank BIGINT,
|
||||
product_group TEXT,
|
||||
product_category TEXT,
|
||||
product_subcategory TEXT,
|
||||
similar_product_ids CHAR(10)[]
|
||||
)
|
||||
SERVER cstore_server
|
||||
OPTIONS(compression 'pglz');
|
||||
```
|
||||
|
||||
Next, we load data into the table:
|
||||
|
||||
```SQL
|
||||
\COPY customer_reviews FROM 'customer_reviews_1998.csv' WITH CSV;
|
||||
\COPY customer_reviews FROM 'customer_reviews_1999.csv' WITH CSV;
|
||||
```
|
||||
|
||||
**Note.** If you are getting ```ERROR: cannot copy to foreign table
|
||||
"customer_reviews"``` when trying to run the COPY commands, double check that you
|
||||
have added cstore\_fdw to ```shared_preload_libraries``` in ```postgresql.conf```
|
||||
and restarted Postgres.
|
||||
|
||||
Next, we collect data distribution statistics about the table. This is optional,
|
||||
but usually very helpful:
|
||||
|
||||
```SQL
|
||||
ANALYZE customer_reviews;
|
||||
```
|
||||
|
||||
Finally, let's run some example SQL queries on the column store table.
|
||||
|
||||
```SQL
|
||||
-- Find all reviews a particular customer made on the Dune series in 1998.
|
||||
SELECT
|
||||
customer_id, review_date, review_rating, product_id, product_title
|
||||
FROM
|
||||
customer_reviews
|
||||
WHERE
|
||||
customer_id ='A27T7HVDXA3K2A' AND
|
||||
product_title LIKE '%Dune%' AND
|
||||
review_date >= '1998-01-01' AND
|
||||
review_date <= '1998-12-31';
|
||||
|
||||
-- Do we have a correlation between a book's title's length and its review ratings?
|
||||
SELECT
|
||||
width_bucket(length(product_title), 1, 50, 5) title_length_bucket,
|
||||
round(avg(review_rating), 2) AS review_average,
|
||||
count(*)
|
||||
FROM
|
||||
customer_reviews
|
||||
WHERE
|
||||
product_group = 'Book'
|
||||
GROUP BY
|
||||
title_length_bucket
|
||||
ORDER BY
|
||||
title_length_bucket;
|
||||
```
|
||||
|
||||
|
||||
Usage with Citus
|
||||
----------------
|
||||
|
||||
The example above illustrated how to load data into a PostgreSQL database running
|
||||
on a single host. However, sometimes your data is too large to analyze effectively
|
||||
on a single host. Citus is a product built by Citus Data that allows you to run
|
||||
a distributed PostgreSQL database to analyze your data using the power of multiple
|
||||
hosts. You can easily install and run other PostgreSQL extensions and foreign data
|
||||
wrappers—including cstore_fdw—alongside Citus.
|
||||
|
||||
You can create a cstore_fdw table and distribute it using the
|
||||
```create_distributed_table()``` UDF just like any other table. You can load data
|
||||
using the ```copy``` command as you would do in single node PostgreSQL.
|
||||
|
||||
Using Skip Indexes
|
||||
------------------
|
||||
|
||||
cstore_fdw partitions each column into multiple blocks. Skip indexes store minimum
|
||||
and maximum values for each of these blocks. While scanning the table, if min/max
|
||||
values of the block contradict the WHERE clause, then the block is completely
|
||||
skipped. This way, the query processes less data and hence finishes faster.
|
||||
|
||||
To use skip indexes more efficiently, you should load the data after sorting it
|
||||
on a column that is commonly used in the WHERE clause. This ensures that there is
|
||||
a minimum overlap between blocks and the chance of them being skipped is higher.
|
||||
|
||||
In practice, the data generally has an inherent dimension (for example a time field)
|
||||
on which it is naturally sorted. Usually, the queries also have a filter clause on
|
||||
that column (for example you want to query only the last week's data), and hence you
|
||||
don't need to sort the data in such cases.
|
||||
|
||||
|
||||
Uninstalling cstore_fdw
|
||||
-----------------------
|
||||
|
||||
Before uninstalling the extension, first you need to drop all the cstore tables:
|
||||
|
||||
postgres=# DROP FOREIGN TABLE cstore_table_1;
|
||||
...
|
||||
postgres=# DROP FOREIGN TABLE cstore_table_n;
|
||||
|
||||
Then, you should drop the cstore server and extension:
|
||||
|
||||
postgres=# DROP SERVER cstore_server;
|
||||
postgres=# DROP EXTENSION cstore_fdw;
|
||||
|
||||
cstore\_fdw automatically creates some directories inside the PostgreSQL's data
|
||||
directory to store its files. To remove them, you can run:
|
||||
|
||||
$ rm -rf $PGDATA/cstore_fdw
|
||||
|
||||
Then, you should remove cstore\_fdw from ```shared_preload_libraries``` in
|
||||
your ```postgresql.conf```:
|
||||
|
||||
shared_preload_libraries = '' # (change requires restart)
|
||||
|
||||
Finally, to uninstall the extension you can run the following command in the
|
||||
extension's source code directory. This will clean up all the files copied during
|
||||
the installation:
|
||||
|
||||
$ sudo PATH=/usr/local/pgsql/bin/:$PATH make uninstall
|
||||
|
||||
|
||||
Changeset
|
||||
---------
|
||||
### Version 1.7.0
|
||||
* (Fix) Add support for PostgreSQL 12
|
||||
* (Fix) Support count(t.*) from t type queries
|
||||
* (Fix) Build failures for MacOS 10.14+
|
||||
* (Fix) Make foreign scan parallel safe
|
||||
* (Fix) Add support for PostgreSQL 11 COPY
|
||||
### Version 1.6.2
|
||||
* (Fix) Add support for PostgreSQL 11
|
||||
### Version 1.6.1
|
||||
* (Fix) Fix crash during truncate (Cstore crashing server when enabled, not used)
|
||||
* (Fix) No such file or directory warning when attempting to drop database
|
||||
### Version 1.6
|
||||
* (Feature) Added support for PostgreSQL 10.
|
||||
* (Fix) Removed table files when a schema, extension or database is dropped.
|
||||
* (Fix) Removed unused code fragments.
|
||||
* (Fix) Fixed incorrect initialization of stripe buffers.
|
||||
* (Fix) Checked user access rights when executing truncate.
|
||||
* (Fix) Made copy command cancellable.
|
||||
* (Fix) Fixed namespace issue regarding drop table.
|
||||
|
||||
### Version 1.5.1
|
||||
* (Fix) Verify cstore_fdw server on CREATE FOREIGN TABLE command
|
||||
|
||||
### Version 1.5
|
||||
* (Feature) Added support for PostgreSQL 9.6.
|
||||
* (Fix) Removed table data when cstore_fdw table is indirectly dropped.
|
||||
* (Fix) Removed unused code fragments.
|
||||
* (Fix) Fixed column selection logic to return columns used in expressions.
|
||||
* (Fix) Prevented alter table command from changinf column type to incompatible types.
|
||||
|
||||
### Version 1.4.1
|
||||
|
||||
* (Fix) Compatibility fix for Citus [copy command][copy-command].
|
||||
|
||||
### Version 1.4
|
||||
|
||||
* (Feature) Added support for ```TRUNCATE TABLE```
|
||||
* (Fix) Added support for PostgreSQL 9.5
|
||||
|
||||
### Version 1.3
|
||||
|
||||
* (Feature) Added support for ```ALTER TABLE ADD COLUMN``` and ```ALTER TABLE DROP COLUMN```.
|
||||
* (Feature) Added column list support in ```COPY FROM```.
|
||||
* (Optimization) Improve row count estimation, which results in better plans.
|
||||
* (Fix) Fix the deadlock issue during concurrent inserts.
|
||||
* (Fix) Return correct result when using whole row references.
|
||||
|
||||
### Version 1.2
|
||||
|
||||
* (Feature) Added support for ```COPY TO```.
|
||||
* (Feature) Added support for ```INSERT INTO cstore_table SELECT ...```.
|
||||
* (Optimization) Improved memory usage.
|
||||
* (Fix) Dropping multiple cstore tables in a single command cleans-up files
|
||||
of all them.
|
||||
|
||||
### Version 1.1
|
||||
|
||||
* (Feature) Make filename option optional, and use a default directory inside
|
||||
$PGDATA to manage cstore tables.
|
||||
* (Feature) Automatically delete files on DROP FOREIGN TABLE.
|
||||
* (Fix) Return empty table if no data has been loaded. Previously, cstore_fdw
|
||||
errored out.
|
||||
* (Fix) Fix overestimating relation column counts when planning.
|
||||
* (Feature) Added cstore\_table\_size(tablename) for getting the size of a cstore
|
||||
table in bytes.
|
||||
|
||||
|
||||
Copyright
|
||||
---------
|
||||
|
||||
Copyright (c) 2017 Citus Data, Inc.
|
||||
|
||||
This module is free software; you can redistribute it and/or modify it under the
|
||||
Apache v2.0 License.
|
||||
|
||||
For all types of questions and comments about the wrapper, please contact us at
|
||||
engage @ citusdata.com.
|
||||
|
||||
[status]: https://travis-ci.org/citusdata/cstore_fdw
|
||||
[mailing-list]: https://groups.google.com/forum/#!forum/cstore-users
|
||||
[coverage]: https://coveralls.io/r/citusdata/cstore_fdw
|
||||
[copy-command]: http://www.postgresql.org/docs/current/static/sql-copy.html
|
||||
[analyze-command]: http://www.postgresql.org/docs/current/static/sql-analyze.html
|
|
@ -0,0 +1,41 @@
|
|||
To see the list of features and bug-fixes planned for next releases, see our
|
||||
[development roadmap][roadmap].
|
||||
|
||||
Requested Features
|
||||
------------------
|
||||
|
||||
* Improve write performance
|
||||
* Improve read performance
|
||||
* Add checksum logic
|
||||
* Add new compression methods
|
||||
* Enable INSERT/DELETE/UPDATE
|
||||
* Enable users other than superuser to safely create columnar tables (permissions)
|
||||
* Transactional semantics
|
||||
* Add config setting to make pg\_fsync() optional
|
||||
|
||||
|
||||
Known Issues
|
||||
------------
|
||||
|
||||
* Copy command ignores NOT NULL constraints.
|
||||
* Planning functions don't take into account average column width.
|
||||
* Planning functions don't correctly take into account block skipping benefits.
|
||||
* On 32-bit platforms, when file size is outside the 32-bit signed range, EXPLAIN
|
||||
command prints incorrect file size.
|
||||
* If two different columnar tables are configured to point to the same file,
|
||||
writes to the underlying file aren't protected from each other.
|
||||
* When a data load is in progress, concurrent reads on the table overestimate the
|
||||
page count.
|
||||
* We have a minor memory leak in CStoreEndWrite. We need to also free the
|
||||
comparisonFunctionArray.
|
||||
* block\_filtering test fails on Ubuntu because the "da\_DK" locale is not enabled
|
||||
by default.
|
||||
* We don't yet incorporate the compression method's impact on disk I/O into cost
|
||||
estimates.
|
||||
* CitusDB integration errors:
|
||||
* Concurrent staging cstore\_fdw tables doesn't work.
|
||||
* Setting a default value for column with ALTER TABLE has limited support for
|
||||
existing rows.
|
||||
|
||||
[roadmap]: https://github.com/citusdata/cstore_fdw/wiki/Roadmap
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore.c
|
||||
*
|
||||
* This file contains...
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "miscadmin.h"
|
||||
#include "utils/guc.h"
|
||||
#include "utils/rel.h"
|
||||
|
||||
#include "cstore.h"
|
||||
|
||||
/* Default values for option parameters */
|
||||
#define DEFAULT_COMPRESSION_TYPE COMPRESSION_NONE
|
||||
#define DEFAULT_STRIPE_ROW_COUNT 150000
|
||||
#define DEFAULT_BLOCK_ROW_COUNT 10000
|
||||
|
||||
int cstore_compression = DEFAULT_COMPRESSION_TYPE;
|
||||
int cstore_stripe_row_count = DEFAULT_STRIPE_ROW_COUNT;
|
||||
int cstore_block_row_count = DEFAULT_BLOCK_ROW_COUNT;
|
||||
|
||||
static const struct config_enum_entry cstore_compression_options[] =
|
||||
{
|
||||
{ "none", COMPRESSION_NONE, false },
|
||||
{ "pglz", COMPRESSION_PG_LZ, false },
|
||||
{ NULL, 0, false }
|
||||
};
|
||||
|
||||
void
|
||||
cstore_init()
|
||||
{
|
||||
DefineCustomEnumVariable("cstore.compression",
|
||||
"Compression type for cstore.",
|
||||
NULL,
|
||||
&cstore_compression,
|
||||
DEFAULT_COMPRESSION_TYPE,
|
||||
cstore_compression_options,
|
||||
PGC_USERSET,
|
||||
0,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL);
|
||||
|
||||
DefineCustomIntVariable("cstore.stripe_row_count",
|
||||
"Maximum number of tuples per stripe.",
|
||||
NULL,
|
||||
&cstore_stripe_row_count,
|
||||
DEFAULT_STRIPE_ROW_COUNT,
|
||||
STRIPE_ROW_COUNT_MINIMUM,
|
||||
STRIPE_ROW_COUNT_MAXIMUM,
|
||||
PGC_USERSET,
|
||||
0,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL);
|
||||
|
||||
DefineCustomIntVariable("cstore.block_row_count",
|
||||
"Maximum number of rows per block.",
|
||||
NULL,
|
||||
&cstore_block_row_count,
|
||||
DEFAULT_BLOCK_ROW_COUNT,
|
||||
BLOCK_ROW_COUNT_MINIMUM,
|
||||
BLOCK_ROW_COUNT_MAXIMUM,
|
||||
PGC_USERSET,
|
||||
0,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL);
|
||||
}
|
||||
|
||||
|
||||
/* ParseCompressionType converts a string to a compression type. */
|
||||
CompressionType
|
||||
ParseCompressionType(const char *compressionTypeString)
|
||||
{
|
||||
CompressionType compressionType = COMPRESSION_TYPE_INVALID;
|
||||
Assert(compressionTypeString != NULL);
|
||||
|
||||
if (strncmp(compressionTypeString, COMPRESSION_STRING_NONE, NAMEDATALEN) == 0)
|
||||
{
|
||||
compressionType = COMPRESSION_NONE;
|
||||
}
|
||||
else if (strncmp(compressionTypeString, COMPRESSION_STRING_PG_LZ, NAMEDATALEN) == 0)
|
||||
{
|
||||
compressionType = COMPRESSION_PG_LZ;
|
||||
}
|
||||
|
||||
return compressionType;
|
||||
}
|
|
@ -0,0 +1,351 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore.h
|
||||
*
|
||||
* Type and function declarations for CStore
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef CSTORE_H
|
||||
#define CSTORE_H
|
||||
|
||||
#include "fmgr.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "storage/bufpage.h"
|
||||
#include "storage/lockdefs.h"
|
||||
#include "utils/relcache.h"
|
||||
#include "utils/snapmgr.h"
|
||||
|
||||
/* Defines for valid option names */
|
||||
#define OPTION_NAME_COMPRESSION_TYPE "compression"
|
||||
#define OPTION_NAME_STRIPE_ROW_COUNT "stripe_row_count"
|
||||
#define OPTION_NAME_BLOCK_ROW_COUNT "block_row_count"
|
||||
|
||||
/* Limits for option parameters */
|
||||
#define STRIPE_ROW_COUNT_MINIMUM 1000
|
||||
#define STRIPE_ROW_COUNT_MAXIMUM 10000000
|
||||
#define BLOCK_ROW_COUNT_MINIMUM 1000
|
||||
#define BLOCK_ROW_COUNT_MAXIMUM 100000
|
||||
|
||||
/* String representations of compression types */
|
||||
#define COMPRESSION_STRING_NONE "none"
|
||||
#define COMPRESSION_STRING_PG_LZ "pglz"
|
||||
|
||||
/* CStore file signature */
|
||||
#define CSTORE_MAGIC_NUMBER "citus_cstore"
|
||||
#define CSTORE_VERSION_MAJOR 1
|
||||
#define CSTORE_VERSION_MINOR 7
|
||||
|
||||
/* miscellaneous defines */
|
||||
#define CSTORE_FDW_NAME "cstore_fdw"
|
||||
#define CSTORE_TUPLE_COST_MULTIPLIER 10
|
||||
#define CSTORE_POSTSCRIPT_SIZE_LENGTH 1
|
||||
#define CSTORE_POSTSCRIPT_SIZE_MAX 256
|
||||
|
||||
/* Enumaration for cstore file's compression method */
|
||||
typedef enum
|
||||
{
|
||||
COMPRESSION_TYPE_INVALID = -1,
|
||||
COMPRESSION_NONE = 0,
|
||||
COMPRESSION_PG_LZ = 1,
|
||||
|
||||
COMPRESSION_COUNT
|
||||
} CompressionType;
|
||||
|
||||
|
||||
/*
|
||||
* CStoreFdwOptions holds the option values to be used when reading or writing
|
||||
* a cstore file. To resolve these values, we first check foreign table's options,
|
||||
* and if not present, we then fall back to the default values specified above.
|
||||
*/
|
||||
typedef struct CStoreOptions
|
||||
{
|
||||
CompressionType compressionType;
|
||||
uint64 stripeRowCount;
|
||||
uint32 blockRowCount;
|
||||
} CStoreOptions;
|
||||
|
||||
|
||||
/*
|
||||
* StripeMetadata represents information about a stripe. This information is
|
||||
* stored in the cstore file's footer.
|
||||
*/
|
||||
typedef struct StripeMetadata
|
||||
{
|
||||
uint64 fileOffset;
|
||||
uint64 dataLength;
|
||||
uint32 columnCount;
|
||||
uint32 blockCount;
|
||||
uint32 blockRowCount;
|
||||
uint64 rowCount;
|
||||
uint64 id;
|
||||
} StripeMetadata;
|
||||
|
||||
|
||||
/* DataFileMetadata represents the metadata of a cstore file. */
|
||||
typedef struct DataFileMetadata
|
||||
{
|
||||
List *stripeMetadataList;
|
||||
uint64 blockRowCount;
|
||||
uint64 stripeRowCount;
|
||||
CompressionType compression;
|
||||
} DataFileMetadata;
|
||||
|
||||
|
||||
/* ColumnBlockSkipNode contains statistics for a ColumnBlockData. */
|
||||
typedef struct ColumnBlockSkipNode
|
||||
{
|
||||
/* statistics about values of a column block */
|
||||
bool hasMinMax;
|
||||
Datum minimumValue;
|
||||
Datum maximumValue;
|
||||
uint64 rowCount;
|
||||
|
||||
/*
|
||||
* Offsets and sizes of value and exists streams in the column data.
|
||||
* These enable us to skip reading suppressed row blocks, and start reading
|
||||
* a block without reading previous blocks.
|
||||
*/
|
||||
uint64 valueBlockOffset;
|
||||
uint64 valueLength;
|
||||
uint64 existsBlockOffset;
|
||||
uint64 existsLength;
|
||||
|
||||
CompressionType valueCompressionType;
|
||||
} ColumnBlockSkipNode;
|
||||
|
||||
|
||||
/*
|
||||
* StripeSkipList can be used for skipping row blocks. It contains a column block
|
||||
* skip node for each block of each column. blockSkipNodeArray[column][block]
|
||||
* is the entry for the specified column block.
|
||||
*/
|
||||
typedef struct StripeSkipList
|
||||
{
|
||||
ColumnBlockSkipNode **blockSkipNodeArray;
|
||||
uint32 columnCount;
|
||||
uint32 blockCount;
|
||||
} StripeSkipList;
|
||||
|
||||
|
||||
/*
|
||||
* BlockData represents a block of data for multiple columns. valueArray stores
|
||||
* the values of data, and existsArray stores whether a value is present.
|
||||
* valueBuffer is used to store (uncompressed) serialized values
|
||||
* referenced by Datum's in valueArray. It is only used for by-reference Datum's.
|
||||
* There is a one-to-one correspondence between valueArray and existsArray.
|
||||
*/
|
||||
typedef struct BlockData
|
||||
{
|
||||
uint32 rowCount;
|
||||
uint32 columnCount;
|
||||
|
||||
/*
|
||||
* Following are indexed by [column][row]. If a column is not projected,
|
||||
* then existsArray[column] and valueArray[column] are NULL.
|
||||
*/
|
||||
bool **existsArray;
|
||||
Datum **valueArray;
|
||||
|
||||
/* valueBuffer keeps actual data for type-by-reference datums from valueArray. */
|
||||
StringInfo *valueBufferArray;
|
||||
} BlockData;
|
||||
|
||||
|
||||
/*
|
||||
* ColumnBlockBuffers represents a block of serialized data in a column.
|
||||
* valueBuffer stores the serialized values of data, and existsBuffer stores
|
||||
* serialized value of presence information. valueCompressionType contains
|
||||
* compression type if valueBuffer is compressed. Finally rowCount has
|
||||
* the number of rows in this block.
|
||||
*/
|
||||
typedef struct ColumnBlockBuffers
|
||||
{
|
||||
StringInfo existsBuffer;
|
||||
StringInfo valueBuffer;
|
||||
CompressionType valueCompressionType;
|
||||
} ColumnBlockBuffers;
|
||||
|
||||
|
||||
/*
|
||||
* ColumnBuffers represents data buffers for a column in a row stripe. Each
|
||||
* column is made of multiple column blocks.
|
||||
*/
|
||||
typedef struct ColumnBuffers
|
||||
{
|
||||
ColumnBlockBuffers **blockBuffersArray;
|
||||
} ColumnBuffers;
|
||||
|
||||
|
||||
/* StripeBuffers represents data for a row stripe in a cstore file. */
|
||||
typedef struct StripeBuffers
|
||||
{
|
||||
uint32 columnCount;
|
||||
uint32 rowCount;
|
||||
ColumnBuffers **columnBuffersArray;
|
||||
} StripeBuffers;
|
||||
|
||||
|
||||
/* TableReadState represents state of a cstore file read operation. */
|
||||
typedef struct TableReadState
|
||||
{
|
||||
DataFileMetadata *datafileMetadata;
|
||||
StripeMetadata *currentStripeMetadata;
|
||||
TupleDesc tupleDescriptor;
|
||||
Relation relation;
|
||||
|
||||
/*
|
||||
* List of Var pointers for columns in the query. We use this both for
|
||||
* getting vector of projected columns, and also when we want to build
|
||||
* base constraint to find selected row blocks.
|
||||
*/
|
||||
List *projectedColumnList;
|
||||
|
||||
List *whereClauseList;
|
||||
MemoryContext stripeReadContext;
|
||||
StripeBuffers *stripeBuffers;
|
||||
uint32 readStripeCount;
|
||||
uint64 stripeReadRowCount;
|
||||
BlockData *blockData;
|
||||
int32 deserializedBlockIndex;
|
||||
} TableReadState;
|
||||
|
||||
|
||||
/* TableWriteState represents state of a cstore file write operation. */
|
||||
typedef struct TableWriteState
|
||||
{
|
||||
CompressionType compressionType;
|
||||
TupleDesc tupleDescriptor;
|
||||
FmgrInfo **comparisonFunctionArray;
|
||||
Relation relation;
|
||||
|
||||
MemoryContext stripeWriteContext;
|
||||
StripeBuffers *stripeBuffers;
|
||||
StripeSkipList *stripeSkipList;
|
||||
uint32 stripeMaxRowCount;
|
||||
uint32 blockRowCount;
|
||||
BlockData *blockData;
|
||||
|
||||
/*
|
||||
* compressionBuffer buffer is used as temporary storage during
|
||||
* data value compression operation. It is kept here to minimize
|
||||
* memory allocations. It lives in stripeWriteContext and gets
|
||||
* deallocated when memory context is reset.
|
||||
*/
|
||||
StringInfo compressionBuffer;
|
||||
} TableWriteState;
|
||||
|
||||
extern int cstore_compression;
|
||||
extern int cstore_stripe_row_count;
|
||||
extern int cstore_block_row_count;
|
||||
|
||||
extern void cstore_init(void);
|
||||
|
||||
extern CompressionType ParseCompressionType(const char *compressionTypeString);
|
||||
|
||||
/* Function declarations for writing to a cstore file */
|
||||
extern TableWriteState * CStoreBeginWrite(Relation relation,
|
||||
CompressionType compressionType,
|
||||
uint64 stripeMaxRowCount,
|
||||
uint32 blockRowCount,
|
||||
TupleDesc tupleDescriptor);
|
||||
extern void CStoreWriteRow(TableWriteState *state, Datum *columnValues,
|
||||
bool *columnNulls);
|
||||
extern void CStoreEndWrite(TableWriteState *state);
|
||||
|
||||
/* Function declarations for reading from a cstore file */
|
||||
extern TableReadState * CStoreBeginRead(Relation relation,
|
||||
TupleDesc tupleDescriptor,
|
||||
List *projectedColumnList, List *qualConditions);
|
||||
extern bool CStoreReadFinished(TableReadState *state);
|
||||
extern bool CStoreReadNextRow(TableReadState *state, Datum *columnValues,
|
||||
bool *columnNulls);
|
||||
extern void CStoreRescan(TableReadState *readState);
|
||||
extern void CStoreEndRead(TableReadState *state);
|
||||
|
||||
/* Function declarations for common functions */
|
||||
extern FmgrInfo * GetFunctionInfoOrNull(Oid typeId, Oid accessMethodId,
|
||||
int16 procedureId);
|
||||
extern BlockData * CreateEmptyBlockData(uint32 columnCount, bool *columnMask,
|
||||
uint32 blockRowCount);
|
||||
extern void FreeBlockData(BlockData *blockData);
|
||||
extern uint64 CStoreTableRowCount(Relation relation);
|
||||
extern bool CompressBuffer(StringInfo inputBuffer, StringInfo outputBuffer,
|
||||
CompressionType compressionType);
|
||||
extern StringInfo DecompressBuffer(StringInfo buffer, CompressionType compressionType);
|
||||
extern char * CompressionTypeStr(CompressionType type);
|
||||
|
||||
/* cstore_metadata_tables.c */
|
||||
extern void DeleteDataFileMetadataRowIfExists(Oid relfilenode);
|
||||
extern void InitCStoreDataFileMetadata(Oid relfilenode, int blockRowCount, int
|
||||
stripeRowCount, CompressionType compression);
|
||||
extern void UpdateCStoreDataFileMetadata(Oid relfilenode, int blockRowCount, int
|
||||
stripeRowCount, CompressionType compression);
|
||||
extern DataFileMetadata * ReadDataFileMetadata(Oid relfilenode, bool missingOk);
|
||||
extern uint64 GetHighestUsedAddress(Oid relfilenode);
|
||||
extern StripeMetadata ReserveStripe(Relation rel, uint64 size,
|
||||
uint64 rowCount, uint64 columnCount,
|
||||
uint64 blockCount, uint64 blockRowCount);
|
||||
extern void SaveStripeSkipList(Oid relfilenode, uint64 stripe,
|
||||
StripeSkipList *stripeSkipList,
|
||||
TupleDesc tupleDescriptor);
|
||||
extern StripeSkipList * ReadStripeSkipList(Oid relfilenode, uint64 stripe,
|
||||
TupleDesc tupleDescriptor,
|
||||
uint32 blockCount);
|
||||
|
||||
typedef struct SmgrAddr
|
||||
{
|
||||
BlockNumber blockno;
|
||||
uint32 offset;
|
||||
} SmgrAddr;
|
||||
|
||||
/*
|
||||
* Map logical offsets (as tracked in the metadata) to a physical page and
|
||||
* offset where the data is kept.
|
||||
*/
|
||||
static inline SmgrAddr
|
||||
logical_to_smgr(uint64 logicalOffset)
|
||||
{
|
||||
uint64 bytes_per_page = BLCKSZ - SizeOfPageHeaderData;
|
||||
SmgrAddr addr;
|
||||
|
||||
addr.blockno = logicalOffset / bytes_per_page;
|
||||
addr.offset = SizeOfPageHeaderData + (logicalOffset % bytes_per_page);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Map a physical page adnd offset address to a logical address.
|
||||
*/
|
||||
static inline uint64
|
||||
smgr_to_logical(SmgrAddr addr)
|
||||
{
|
||||
uint64 bytes_per_page = BLCKSZ - SizeOfPageHeaderData;
|
||||
return bytes_per_page * addr.blockno + addr.offset - SizeOfPageHeaderData;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Get the first usable address of next block.
|
||||
*/
|
||||
static inline SmgrAddr
|
||||
next_block_start(SmgrAddr addr)
|
||||
{
|
||||
SmgrAddr result = {
|
||||
.blockno = addr.blockno + 1,
|
||||
.offset = SizeOfPageHeaderData
|
||||
};
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
#endif /* CSTORE_H */
|
|
@ -0,0 +1,196 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_compression.c
|
||||
*
|
||||
* This file contains compression/decompression functions definitions
|
||||
* used in cstore_fdw.
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#include "postgres.h"
|
||||
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
#include "common/pg_lzcompress.h"
|
||||
#else
|
||||
#include "utils/pg_lzcompress.h"
|
||||
#endif
|
||||
|
||||
#include "cstore.h"
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
|
||||
/*
|
||||
* The information at the start of the compressed data. This decription is taken
|
||||
* from pg_lzcompress in pre-9.5 version of PostgreSQL.
|
||||
*/
|
||||
typedef struct CStoreCompressHeader
|
||||
{
|
||||
int32 vl_len_; /* varlena header (do not touch directly!) */
|
||||
int32 rawsize;
|
||||
} CStoreCompressHeader;
|
||||
|
||||
/*
|
||||
* Utilities for manipulation of header information for compressed data
|
||||
*/
|
||||
|
||||
#define CSTORE_COMPRESS_HDRSZ ((int32) sizeof(CStoreCompressHeader))
|
||||
#define CSTORE_COMPRESS_RAWSIZE(ptr) (((CStoreCompressHeader *) (ptr))->rawsize)
|
||||
#define CSTORE_COMPRESS_RAWDATA(ptr) (((char *) (ptr)) + CSTORE_COMPRESS_HDRSZ)
|
||||
#define CSTORE_COMPRESS_SET_RAWSIZE(ptr, len) (((CStoreCompressHeader *) (ptr))->rawsize = \
|
||||
(len))
|
||||
|
||||
#else
|
||||
|
||||
#define CSTORE_COMPRESS_HDRSZ (0)
|
||||
#define CSTORE_COMPRESS_RAWSIZE(ptr) (PGLZ_RAW_SIZE((PGLZ_Header *) buffer->data))
|
||||
#define CSTORE_COMPRESS_RAWDATA(ptr) (((PGLZ_Header *) (ptr)))
|
||||
#define CSTORE_COMPRESS_SET_RAWSIZE(ptr, len) (((CStoreCompressHeader *) (ptr))->rawsize = \
|
||||
(len))
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* CompressBuffer compresses the given buffer with the given compression type
|
||||
* outputBuffer enlarged to contain compressed data. The function returns true
|
||||
* if compression is done, returns false if compression is not done.
|
||||
* outputBuffer is valid only if the function returns true.
|
||||
*/
|
||||
bool
|
||||
CompressBuffer(StringInfo inputBuffer, StringInfo outputBuffer,
|
||||
CompressionType compressionType)
|
||||
{
|
||||
uint64 maximumLength = PGLZ_MAX_OUTPUT(inputBuffer->len) + CSTORE_COMPRESS_HDRSZ;
|
||||
bool compressionResult = false;
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
int32 compressedByteCount = 0;
|
||||
#endif
|
||||
|
||||
if (compressionType != COMPRESSION_PG_LZ)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
resetStringInfo(outputBuffer);
|
||||
enlargeStringInfo(outputBuffer, maximumLength);
|
||||
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
compressedByteCount = pglz_compress((const char *) inputBuffer->data,
|
||||
inputBuffer->len,
|
||||
CSTORE_COMPRESS_RAWDATA(outputBuffer->data),
|
||||
PGLZ_strategy_always);
|
||||
if (compressedByteCount >= 0)
|
||||
{
|
||||
CSTORE_COMPRESS_SET_RAWSIZE(outputBuffer->data, inputBuffer->len);
|
||||
SET_VARSIZE_COMPRESSED(outputBuffer->data,
|
||||
compressedByteCount + CSTORE_COMPRESS_HDRSZ);
|
||||
compressionResult = true;
|
||||
}
|
||||
#else
|
||||
|
||||
compressionResult = pglz_compress(inputBuffer->data, inputBuffer->len,
|
||||
CSTORE_COMPRESS_RAWDATA(outputBuffer->data),
|
||||
PGLZ_strategy_always);
|
||||
#endif
|
||||
|
||||
if (compressionResult)
|
||||
{
|
||||
outputBuffer->len = VARSIZE(outputBuffer->data);
|
||||
}
|
||||
|
||||
return compressionResult;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DecompressBuffer decompresses the given buffer with the given compression
|
||||
* type. This function returns the buffer as-is when no compression is applied.
|
||||
*/
|
||||
StringInfo
|
||||
DecompressBuffer(StringInfo buffer, CompressionType compressionType)
|
||||
{
|
||||
StringInfo decompressedBuffer = NULL;
|
||||
|
||||
Assert(compressionType == COMPRESSION_NONE || compressionType == COMPRESSION_PG_LZ);
|
||||
|
||||
if (compressionType == COMPRESSION_NONE)
|
||||
{
|
||||
/* in case of no compression, return buffer */
|
||||
decompressedBuffer = buffer;
|
||||
}
|
||||
else if (compressionType == COMPRESSION_PG_LZ)
|
||||
{
|
||||
uint32 compressedDataSize = VARSIZE(buffer->data) - CSTORE_COMPRESS_HDRSZ;
|
||||
uint32 decompressedDataSize = CSTORE_COMPRESS_RAWSIZE(buffer->data);
|
||||
char *decompressedData = NULL;
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
int32 decompressedByteCount = 0;
|
||||
#endif
|
||||
|
||||
if (compressedDataSize + CSTORE_COMPRESS_HDRSZ != buffer->len)
|
||||
{
|
||||
ereport(ERROR, (errmsg("cannot decompress the buffer"),
|
||||
errdetail("Expected %u bytes, but received %u bytes",
|
||||
compressedDataSize, buffer->len)));
|
||||
}
|
||||
|
||||
decompressedData = palloc0(decompressedDataSize);
|
||||
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
decompressedByteCount = pglz_decompress(CSTORE_COMPRESS_RAWDATA(buffer->data),
|
||||
compressedDataSize, decompressedData,
|
||||
decompressedDataSize, true);
|
||||
#else
|
||||
decompressedByteCount = pglz_decompress(CSTORE_COMPRESS_RAWDATA(buffer->data),
|
||||
compressedDataSize, decompressedData,
|
||||
decompressedDataSize);
|
||||
#endif
|
||||
|
||||
if (decompressedByteCount < 0)
|
||||
{
|
||||
ereport(ERROR, (errmsg("cannot decompress the buffer"),
|
||||
errdetail("compressed data is corrupted")));
|
||||
}
|
||||
#else
|
||||
pglz_decompress((PGLZ_Header *) buffer->data, decompressedData);
|
||||
#endif
|
||||
|
||||
decompressedBuffer = palloc0(sizeof(StringInfoData));
|
||||
decompressedBuffer->data = decompressedData;
|
||||
decompressedBuffer->len = decompressedDataSize;
|
||||
decompressedBuffer->maxlen = decompressedDataSize;
|
||||
}
|
||||
|
||||
return decompressedBuffer;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CompressionTypeStr returns string representation of a compression type.
|
||||
*/
|
||||
char *
|
||||
CompressionTypeStr(CompressionType type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case COMPRESSION_NONE:
|
||||
{
|
||||
return "none";
|
||||
}
|
||||
|
||||
case COMPRESSION_PG_LZ:
|
||||
{
|
||||
return "pglz";
|
||||
}
|
||||
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,433 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_customscan.c
|
||||
*
|
||||
* This file contains the implementation of a postgres custom scan that
|
||||
* we use to push down the projections into the table access methods.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "access/skey.h"
|
||||
#include "nodes/extensible.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "nodes/plannodes.h"
|
||||
#include "optimizer/optimizer.h"
|
||||
#include "optimizer/pathnode.h"
|
||||
#include "optimizer/paths.h"
|
||||
#include "optimizer/restrictinfo.h"
|
||||
#include "utils/relcache.h"
|
||||
|
||||
#include "cstore.h"
|
||||
#include "cstore_customscan.h"
|
||||
#include "cstore_tableam.h"
|
||||
|
||||
typedef struct CStoreScanPath
|
||||
{
|
||||
CustomPath custom_path;
|
||||
|
||||
/* place for local state during planning */
|
||||
} CStoreScanPath;
|
||||
|
||||
typedef struct CStoreScanScan
|
||||
{
|
||||
CustomScan custom_scan;
|
||||
|
||||
/* place for local state during execution */
|
||||
} CStoreScanScan;
|
||||
|
||||
typedef struct CStoreScanState
|
||||
{
|
||||
CustomScanState custom_scanstate;
|
||||
|
||||
List *qual;
|
||||
} CStoreScanState;
|
||||
|
||||
|
||||
static void CStoreSetRelPathlistHook(PlannerInfo *root, RelOptInfo *rel, Index rti,
|
||||
RangeTblEntry *rte);
|
||||
static Path * CreateCStoreScanPath(RelOptInfo *rel, RangeTblEntry *rte);
|
||||
static Cost CStoreScanCost(RangeTblEntry *rte);
|
||||
static Plan * CStoreScanPath_PlanCustomPath(PlannerInfo *root,
|
||||
RelOptInfo *rel,
|
||||
struct CustomPath *best_path,
|
||||
List *tlist,
|
||||
List *clauses,
|
||||
List *custom_plans);
|
||||
|
||||
static Node * CStoreScan_CreateCustomScanState(CustomScan *cscan);
|
||||
|
||||
static void CStoreScan_BeginCustomScan(CustomScanState *node, EState *estate, int eflags);
|
||||
static TupleTableSlot * CStoreScan_ExecCustomScan(CustomScanState *node);
|
||||
static void CStoreScan_EndCustomScan(CustomScanState *node);
|
||||
static void CStoreScan_ReScanCustomScan(CustomScanState *node);
|
||||
|
||||
/* saved hook value in case of unload */
|
||||
static set_rel_pathlist_hook_type PreviousSetRelPathlistHook = NULL;
|
||||
|
||||
static bool EnableCStoreCustomScan = true;
|
||||
|
||||
|
||||
const struct CustomPathMethods CStoreScanPathMethods = {
|
||||
.CustomName = "CStoreScan",
|
||||
.PlanCustomPath = CStoreScanPath_PlanCustomPath,
|
||||
};
|
||||
|
||||
const struct CustomScanMethods CStoreScanScanMethods = {
|
||||
.CustomName = "CStoreScan",
|
||||
.CreateCustomScanState = CStoreScan_CreateCustomScanState,
|
||||
};
|
||||
|
||||
const struct CustomExecMethods CStoreExecuteMethods = {
|
||||
.CustomName = "CStoreScan",
|
||||
|
||||
.BeginCustomScan = CStoreScan_BeginCustomScan,
|
||||
.ExecCustomScan = CStoreScan_ExecCustomScan,
|
||||
.EndCustomScan = CStoreScan_EndCustomScan,
|
||||
.ReScanCustomScan = CStoreScan_ReScanCustomScan,
|
||||
|
||||
.ExplainCustomScan = NULL,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* cstore_customscan_init installs the hook required to intercept the postgres planner and
|
||||
* provide extra paths for cstore tables
|
||||
*/
|
||||
void
|
||||
cstore_customscan_init()
|
||||
{
|
||||
PreviousSetRelPathlistHook = set_rel_pathlist_hook;
|
||||
set_rel_pathlist_hook = CStoreSetRelPathlistHook;
|
||||
|
||||
/* register customscan specific GUC's */
|
||||
DefineCustomBoolVariable(
|
||||
"cstore.enable_custom_scan",
|
||||
gettext_noop("Enables the use of a custom scan to push projections and quals "
|
||||
"into the storage layer"),
|
||||
NULL,
|
||||
&EnableCStoreCustomScan,
|
||||
true,
|
||||
PGC_USERSET,
|
||||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
clear_paths(RelOptInfo *rel)
|
||||
{
|
||||
rel->pathlist = NULL;
|
||||
rel->partial_pathlist = NULL;
|
||||
rel->cheapest_startup_path = NULL;
|
||||
rel->cheapest_total_path = NULL;
|
||||
rel->cheapest_unique_path = NULL;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
CStoreSetRelPathlistHook(PlannerInfo *root, RelOptInfo *rel, Index rti,
|
||||
RangeTblEntry *rte)
|
||||
{
|
||||
Relation relation;
|
||||
|
||||
/* call into previous hook if assigned */
|
||||
if (PreviousSetRelPathlistHook)
|
||||
{
|
||||
PreviousSetRelPathlistHook(root, rel, rti, rte);
|
||||
}
|
||||
|
||||
if (!EnableCStoreCustomScan)
|
||||
{
|
||||
/* custon scans are disabled, use normal table access method api instead */
|
||||
return;
|
||||
}
|
||||
|
||||
if (!OidIsValid(rte->relid) || rte->rtekind != RTE_RELATION)
|
||||
{
|
||||
/* some calls to the pathlist hook don't have a valid relation set. Do nothing */
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Here we want to inspect if this relation pathlist hook is accessing a cstore table.
|
||||
* If that is the case we want to insert an extra path that pushes down the projection
|
||||
* into the scan of the table to minimize the data read.
|
||||
*/
|
||||
relation = RelationIdGetRelation(rte->relid);
|
||||
if (relation->rd_tableam == GetCstoreTableAmRoutine())
|
||||
{
|
||||
Path *customPath = CreateCStoreScanPath(rel, rte);
|
||||
|
||||
ereport(DEBUG1, (errmsg("pathlist hook for cstore table am")));
|
||||
|
||||
/* we propose a new path that will be the only path for scanning this relation */
|
||||
clear_paths(rel);
|
||||
add_path(rel, customPath);
|
||||
}
|
||||
RelationClose(relation);
|
||||
}
|
||||
|
||||
|
||||
static Path *
|
||||
CreateCStoreScanPath(RelOptInfo *rel, RangeTblEntry *rte)
|
||||
{
|
||||
CStoreScanPath *cspath = (CStoreScanPath *) newNode(sizeof(CStoreScanPath),
|
||||
T_CustomPath);
|
||||
CustomPath *cpath;
|
||||
Path *path;
|
||||
|
||||
/*
|
||||
* popuate custom path information
|
||||
*/
|
||||
cpath = &cspath->custom_path;
|
||||
cpath->methods = &CStoreScanPathMethods;
|
||||
|
||||
/*
|
||||
* populate generic path information
|
||||
*/
|
||||
path = &cpath->path;
|
||||
path->pathtype = T_CustomScan;
|
||||
path->parent = rel;
|
||||
path->pathtarget = rel->reltarget;
|
||||
|
||||
/*
|
||||
* Add cost estimates for a cstore table scan, row count is the rows estimated by
|
||||
* postgres' planner.
|
||||
*/
|
||||
path->rows = rel->rows;
|
||||
path->startup_cost = 0;
|
||||
path->total_cost = path->startup_cost + CStoreScanCost(rte);
|
||||
|
||||
return (Path *) cspath;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CStoreScanCost calculates the cost of scanning the cstore table. The cost is estimated
|
||||
* by using all stripe metadata to estimate based on the columns to read how many pages
|
||||
* need to be read.
|
||||
*/
|
||||
static Cost
|
||||
CStoreScanCost(RangeTblEntry *rte)
|
||||
{
|
||||
Relation rel = RelationIdGetRelation(rte->relid);
|
||||
DataFileMetadata *metadata = ReadDataFileMetadata(rel->rd_node.relNode, false);
|
||||
uint32 maxColumnCount = 0;
|
||||
uint64 totalStripeSize = 0;
|
||||
ListCell *stripeMetadataCell = NULL;
|
||||
|
||||
RelationClose(rel);
|
||||
rel = NULL;
|
||||
|
||||
foreach(stripeMetadataCell, metadata->stripeMetadataList)
|
||||
{
|
||||
StripeMetadata *stripeMetadata = (StripeMetadata *) lfirst(stripeMetadataCell);
|
||||
totalStripeSize += stripeMetadata->dataLength;
|
||||
maxColumnCount = Max(maxColumnCount, stripeMetadata->columnCount);
|
||||
}
|
||||
|
||||
{
|
||||
Bitmapset *attr_needed = rte->selectedCols;
|
||||
double numberOfColumnsRead = bms_num_members(attr_needed);
|
||||
double selectionRatio = numberOfColumnsRead / (double) maxColumnCount;
|
||||
Cost scanCost = (double) totalStripeSize / BLCKSZ * selectionRatio;
|
||||
return scanCost;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static Plan *
|
||||
CStoreScanPath_PlanCustomPath(PlannerInfo *root,
|
||||
RelOptInfo *rel,
|
||||
struct CustomPath *best_path,
|
||||
List *tlist,
|
||||
List *clauses,
|
||||
List *custom_plans)
|
||||
{
|
||||
CStoreScanScan *plan = (CStoreScanScan *) newNode(sizeof(CStoreScanScan),
|
||||
T_CustomScan);
|
||||
|
||||
CustomScan *cscan = &plan->custom_scan;
|
||||
cscan->methods = &CStoreScanScanMethods;
|
||||
|
||||
/* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
|
||||
clauses = extract_actual_clauses(clauses, false);
|
||||
|
||||
cscan->scan.plan.targetlist = list_copy(tlist);
|
||||
cscan->scan.plan.qual = clauses;
|
||||
cscan->scan.scanrelid = best_path->path.parent->relid;
|
||||
|
||||
return (Plan *) plan;
|
||||
}
|
||||
|
||||
|
||||
static Node *
|
||||
CStoreScan_CreateCustomScanState(CustomScan *cscan)
|
||||
{
|
||||
CStoreScanState *cstorescanstate = (CStoreScanState *) newNode(
|
||||
sizeof(CStoreScanState), T_CustomScanState);
|
||||
|
||||
CustomScanState *cscanstate = &cstorescanstate->custom_scanstate;
|
||||
cscanstate->methods = &CStoreExecuteMethods;
|
||||
|
||||
cstorescanstate->qual = cscan->scan.plan.qual;
|
||||
|
||||
return (Node *) cscanstate;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
CStoreScan_BeginCustomScan(CustomScanState *cscanstate, EState *estate, int eflags)
|
||||
{
|
||||
/* scan slot is already initialized */
|
||||
}
|
||||
|
||||
|
||||
static Bitmapset *
|
||||
CStoreAttrNeeded(ScanState *ss)
|
||||
{
|
||||
TupleTableSlot *slot = ss->ss_ScanTupleSlot;
|
||||
int natts = slot->tts_tupleDescriptor->natts;
|
||||
Bitmapset *attr_needed = NULL;
|
||||
Plan *plan = ss->ps.plan;
|
||||
int flags = PVC_RECURSE_AGGREGATES |
|
||||
PVC_RECURSE_WINDOWFUNCS | PVC_RECURSE_PLACEHOLDERS;
|
||||
List *vars = list_concat(pull_var_clause((Node *) plan->targetlist, flags),
|
||||
pull_var_clause((Node *) plan->qual, flags));
|
||||
ListCell *lc;
|
||||
|
||||
foreach(lc, vars)
|
||||
{
|
||||
Var *var = lfirst(lc);
|
||||
|
||||
if (var->varattno == 0)
|
||||
{
|
||||
elog(DEBUG1, "Need attribute: all");
|
||||
|
||||
/* all attributes are required, we don't need to add more so break*/
|
||||
attr_needed = bms_add_range(attr_needed, 0, natts - 1);
|
||||
break;
|
||||
}
|
||||
|
||||
elog(DEBUG1, "Need attribute: %d", var->varattno);
|
||||
attr_needed = bms_add_member(attr_needed, var->varattno - 1);
|
||||
}
|
||||
|
||||
return attr_needed;
|
||||
}
|
||||
|
||||
|
||||
static TupleTableSlot *
|
||||
CStoreScanNext(CStoreScanState *cstorescanstate)
|
||||
{
|
||||
CustomScanState *node = (CustomScanState *) cstorescanstate;
|
||||
TableScanDesc scandesc;
|
||||
EState *estate;
|
||||
ScanDirection direction;
|
||||
TupleTableSlot *slot;
|
||||
|
||||
/*
|
||||
* get information from the estate and scan state
|
||||
*/
|
||||
scandesc = node->ss.ss_currentScanDesc;
|
||||
estate = node->ss.ps.state;
|
||||
direction = estate->es_direction;
|
||||
slot = node->ss.ss_ScanTupleSlot;
|
||||
|
||||
if (scandesc == NULL)
|
||||
{
|
||||
/* the cstore access method does not use the flags, they are specific to heap */
|
||||
uint32 flags = 0;
|
||||
Bitmapset *attr_needed = CStoreAttrNeeded(&node->ss);
|
||||
|
||||
/*
|
||||
* We reach here if the scan is not parallel, or if we're serially
|
||||
* executing a scan that was planned to be parallel.
|
||||
*/
|
||||
scandesc = cstore_beginscan_extended(node->ss.ss_currentRelation,
|
||||
estate->es_snapshot,
|
||||
0, NULL, NULL, flags, attr_needed,
|
||||
cstorescanstate->qual);
|
||||
bms_free(attr_needed);
|
||||
|
||||
node->ss.ss_currentScanDesc = scandesc;
|
||||
}
|
||||
|
||||
/*
|
||||
* get the next tuple from the table
|
||||
*/
|
||||
if (table_scan_getnextslot(scandesc, direction, slot))
|
||||
{
|
||||
return slot;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SeqRecheck -- access method routine to recheck a tuple in EvalPlanQual
|
||||
*/
|
||||
static bool
|
||||
CStoreScanRecheck(CStoreScanState *node, TupleTableSlot *slot)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static TupleTableSlot *
|
||||
CStoreScan_ExecCustomScan(CustomScanState *node)
|
||||
{
|
||||
return ExecScan(&node->ss,
|
||||
(ExecScanAccessMtd) CStoreScanNext,
|
||||
(ExecScanRecheckMtd) CStoreScanRecheck);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
CStoreScan_EndCustomScan(CustomScanState *node)
|
||||
{
|
||||
TableScanDesc scanDesc;
|
||||
|
||||
/*
|
||||
* get information from node
|
||||
*/
|
||||
scanDesc = node->ss.ss_currentScanDesc;
|
||||
|
||||
/*
|
||||
* Free the exprcontext
|
||||
*/
|
||||
ExecFreeExprContext(&node->ss.ps);
|
||||
|
||||
/*
|
||||
* clean out the tuple table
|
||||
*/
|
||||
if (node->ss.ps.ps_ResultTupleSlot)
|
||||
{
|
||||
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
||||
}
|
||||
ExecClearTuple(node->ss.ss_ScanTupleSlot);
|
||||
|
||||
/*
|
||||
* close heap scan
|
||||
*/
|
||||
if (scanDesc != NULL)
|
||||
{
|
||||
table_endscan(scanDesc);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
CStoreScan_ReScanCustomScan(CustomScanState *node)
|
||||
{
|
||||
TableScanDesc scanDesc = node->ss.ss_currentScanDesc;
|
||||
if (scanDesc != NULL)
|
||||
{
|
||||
table_rescan(node->ss.ss_currentScanDesc, NULL);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_customscan.h
|
||||
*
|
||||
* Forward declarations of functions to hookup the custom scan feature of
|
||||
* cstore.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef CSTORE_FDW_CSTORE_CUSTOMSCAN_H
|
||||
#define CSTORE_FDW_CSTORE_CUSTOMSCAN_H
|
||||
|
||||
void cstore_customscan_init(void);
|
||||
|
||||
|
||||
#endif /*CSTORE_FDW_CSTORE_CUSTOMSCAN_H */
|
|
@ -0,0 +1,26 @@
|
|||
/* cstore_fdw/cstore_fdw--1.0--1.1.sql */
|
||||
|
||||
-- complain if script is sourced in psql, rather than via ALTER EXTENSION UPDATE
|
||||
\echo Use "ALTER EXTENSION cstore_fdw UPDATE TO '1.1'" to load this file. \quit
|
||||
|
||||
CREATE FUNCTION cstore_ddl_event_end_trigger()
|
||||
RETURNS event_trigger
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE EVENT TRIGGER cstore_ddl_event_end
|
||||
ON ddl_command_end
|
||||
EXECUTE PROCEDURE cstore_ddl_event_end_trigger();
|
||||
|
||||
CREATE FUNCTION cstore_table_size(relation regclass)
|
||||
RETURNS bigint
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
-- cstore_fdw creates directories to store files for tables with automatically
|
||||
-- determined filename during the CREATE SERVER statement. Since this feature
|
||||
-- was newly added in v1.1, servers created with v1.0 did not create them. So,
|
||||
-- we create a server with v1.1 to ensure that the required directories are
|
||||
-- created to allow users to create automatically managed tables with old servers.
|
||||
CREATE SERVER cstore_server_for_updating_1_0_to_1_1 FOREIGN DATA WRAPPER cstore_fdw;
|
||||
DROP SERVER cstore_server_for_updating_1_0_to_1_1;
|
|
@ -0,0 +1,3 @@
|
|||
/* cstore_fdw/cstore_fdw--1.1--1.2.sql */
|
||||
|
||||
-- No new functions or definitions were added in 1.2
|
|
@ -0,0 +1,3 @@
|
|||
/* cstore_fdw/cstore_fdw--1.2--1.3.sql */
|
||||
|
||||
-- No new functions or definitions were added in 1.3
|
|
@ -0,0 +1,3 @@
|
|||
/* cstore_fdw/cstore_fdw--1.3--1.4.sql */
|
||||
|
||||
-- No new functions or definitions were added in 1.4
|
|
@ -0,0 +1,28 @@
|
|||
/* cstore_fdw/cstore_fdw--1.4--1.5.sql */
|
||||
|
||||
CREATE FUNCTION cstore_clean_table_resources(oid)
|
||||
RETURNS void
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE OR REPLACE FUNCTION cstore_drop_trigger()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $csdt$
|
||||
DECLARE v_obj record;
|
||||
BEGIN
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() LOOP
|
||||
|
||||
IF v_obj.object_type NOT IN ('table', 'foreign table') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
|
||||
PERFORM cstore_clean_table_resources(v_obj.objid);
|
||||
|
||||
END LOOP;
|
||||
END;
|
||||
$csdt$;
|
||||
|
||||
CREATE EVENT TRIGGER cstore_drop_event
|
||||
ON SQL_DROP
|
||||
EXECUTE PROCEDURE cstore_drop_trigger();
|
|
@ -0,0 +1,19 @@
|
|||
/* cstore_fdw/cstore_fdw--1.5--1.6.sql */
|
||||
|
||||
CREATE OR REPLACE FUNCTION cstore_drop_trigger()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $csdt$
|
||||
DECLARE v_obj record;
|
||||
BEGIN
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() LOOP
|
||||
|
||||
IF v_obj.object_type NOT IN ('table', 'foreign table') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
|
||||
PERFORM public.cstore_clean_table_resources(v_obj.objid);
|
||||
|
||||
END LOOP;
|
||||
END;
|
||||
$csdt$;
|
|
@ -0,0 +1,3 @@
|
|||
/* cstore_fdw/cstore_fdw--1.6--1.6.sql */
|
||||
|
||||
-- No new functions or definitions were added in 1.7
|
|
@ -0,0 +1,35 @@
|
|||
/* cstore_fdw/cstore_fdw--1.7--1.8.sql */
|
||||
|
||||
DO $proc$
|
||||
BEGIN
|
||||
|
||||
IF version() ~ '12' or version() ~ '13' THEN
|
||||
EXECUTE $$
|
||||
CREATE FUNCTION cstore_tableam_handler(internal)
|
||||
RETURNS table_am_handler
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', 'cstore_tableam_handler';
|
||||
|
||||
CREATE ACCESS METHOD cstore_tableam
|
||||
TYPE TABLE HANDLER cstore_tableam_handler;
|
||||
|
||||
CREATE FUNCTION pg_catalog.alter_cstore_table_set(
|
||||
table_name regclass,
|
||||
block_row_count int DEFAULT NULL,
|
||||
stripe_row_count int DEFAULT NULL,
|
||||
compression name DEFAULT null)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', 'alter_cstore_table_set';
|
||||
|
||||
CREATE FUNCTION pg_catalog.alter_cstore_table_reset(
|
||||
table_name regclass,
|
||||
block_row_count bool DEFAULT false,
|
||||
stripe_row_count bool DEFAULT false,
|
||||
compression bool DEFAULT false)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', 'alter_cstore_table_reset';
|
||||
$$;
|
||||
END IF;
|
||||
END$proc$;
|
|
@ -0,0 +1,88 @@
|
|||
/* cstore_fdw/cstore_fdw--1.7.sql */
|
||||
|
||||
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
|
||||
\echo Use "CREATE EXTENSION cstore_fdw" to load this file. \quit
|
||||
|
||||
CREATE FUNCTION cstore_fdw_handler()
|
||||
RETURNS fdw_handler
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE FUNCTION cstore_fdw_validator(text[], oid)
|
||||
RETURNS void
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE FOREIGN DATA WRAPPER cstore_fdw
|
||||
HANDLER cstore_fdw_handler
|
||||
VALIDATOR cstore_fdw_validator;
|
||||
|
||||
CREATE FUNCTION cstore_ddl_event_end_trigger()
|
||||
RETURNS event_trigger
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE EVENT TRIGGER cstore_ddl_event_end
|
||||
ON ddl_command_end
|
||||
EXECUTE PROCEDURE cstore_ddl_event_end_trigger();
|
||||
|
||||
CREATE FUNCTION public.cstore_table_size(relation regclass)
|
||||
RETURNS bigint
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE TABLE cstore_data_files (
|
||||
relfilenode oid NOT NULL,
|
||||
block_row_count int NOT NULL,
|
||||
stripe_row_count int NOT NULL,
|
||||
compression name NOT NULL,
|
||||
version_major bigint NOT NULL,
|
||||
version_minor bigint NOT NULL,
|
||||
PRIMARY KEY (relfilenode)
|
||||
) WITH (user_catalog_table = true);
|
||||
|
||||
COMMENT ON TABLE cstore_data_files IS 'CStore data file wide metadata';
|
||||
|
||||
CREATE TABLE cstore_stripes (
|
||||
relfilenode oid NOT NULL,
|
||||
stripe bigint NOT NULL,
|
||||
file_offset bigint NOT NULL,
|
||||
data_length bigint NOT NULL,
|
||||
column_count int NOT NULL,
|
||||
block_count int NOT NULL,
|
||||
block_row_count int NOT NULL,
|
||||
row_count bigint NOT NULL,
|
||||
PRIMARY KEY (relfilenode, stripe),
|
||||
FOREIGN KEY (relfilenode) REFERENCES cstore_data_files(relfilenode) ON DELETE CASCADE INITIALLY DEFERRED
|
||||
) WITH (user_catalog_table = true);
|
||||
|
||||
COMMENT ON TABLE cstore_stripes IS 'CStore per stripe metadata';
|
||||
|
||||
CREATE TABLE cstore_skipnodes (
|
||||
relfilenode oid NOT NULL,
|
||||
stripe bigint NOT NULL,
|
||||
attr int NOT NULL,
|
||||
block int NOT NULL,
|
||||
row_count bigint NOT NULL,
|
||||
minimum_value bytea,
|
||||
maximum_value bytea,
|
||||
value_stream_offset bigint NOT NULL,
|
||||
value_stream_length bigint NOT NULL,
|
||||
exists_stream_offset bigint NOT NULL,
|
||||
exists_stream_length bigint NOT NULL,
|
||||
value_compression_type int NOT NULL,
|
||||
PRIMARY KEY (relfilenode, stripe, attr, block),
|
||||
FOREIGN KEY (relfilenode, stripe) REFERENCES cstore_stripes(relfilenode, stripe) ON DELETE CASCADE INITIALLY DEFERRED
|
||||
) WITH (user_catalog_table = true);
|
||||
|
||||
COMMENT ON TABLE cstore_skipnodes IS 'CStore per block metadata';
|
||||
|
||||
CREATE VIEW cstore_options AS
|
||||
SELECT c.oid::regclass regclass,
|
||||
d.block_row_count,
|
||||
d.stripe_row_count,
|
||||
d.compression
|
||||
FROM pg_class c
|
||||
JOIN cstore.cstore_data_files d USING(relfilenode);
|
||||
|
||||
COMMENT ON VIEW cstore_options IS 'CStore per table settings';
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,6 @@
|
|||
# cstore_fdw extension
|
||||
comment = 'foreign-data wrapper for flat cstore access'
|
||||
default_version = '1.8'
|
||||
module_pathname = '$libdir/cstore_fdw'
|
||||
relocatable = false
|
||||
schema = cstore
|
|
@ -0,0 +1,35 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_fdw.h
|
||||
*
|
||||
* Type and function declarations for CStore foreign data wrapper.
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef CSTORE_FDW_H
|
||||
#define CSTORE_FDW_H
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "fmgr.h"
|
||||
|
||||
void cstore_fdw_init(void);
|
||||
void cstore_fdw_finish(void);
|
||||
|
||||
/* event trigger function declarations */
|
||||
extern Datum cstore_ddl_event_end_trigger(PG_FUNCTION_ARGS);
|
||||
|
||||
/* Function declarations for utility UDFs */
|
||||
extern Datum cstore_table_size(PG_FUNCTION_ARGS);
|
||||
extern Datum cstore_clean_table_resources(PG_FUNCTION_ARGS);
|
||||
|
||||
/* Function declarations for foreign data wrapper */
|
||||
extern Datum cstore_fdw_handler(PG_FUNCTION_ARGS);
|
||||
extern Datum cstore_fdw_validator(PG_FUNCTION_ARGS);
|
||||
|
||||
#endif /* CSTORE_FDW_H */
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,15 @@
|
|||
#include "postgres.h"
|
||||
#include "fmgr.h"
|
||||
#include "access/tableam.h"
|
||||
#include "access/skey.h"
|
||||
#include "nodes/bitmapset.h"
|
||||
|
||||
const TableAmRoutine * GetCstoreTableAmRoutine(void);
|
||||
extern void cstore_tableam_init(void);
|
||||
extern void cstore_tableam_finish(void);
|
||||
|
||||
extern TableScanDesc cstore_beginscan_extended(Relation relation, Snapshot snapshot,
|
||||
int nkeys, ScanKey key,
|
||||
ParallelTableScanDesc parallel_scan,
|
||||
uint32 flags, Bitmapset *attr_needed,
|
||||
List *scanQual);
|
|
@ -0,0 +1,65 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_version_compat.h
|
||||
*
|
||||
* Compatibility macros for writing code agnostic to PostgreSQL versions
|
||||
*
|
||||
* Copyright (c) 2018, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef CSTORE_COMPAT_H
|
||||
#define CSTORE_COMPAT_H
|
||||
|
||||
#if PG_VERSION_NUM < 100000
|
||||
|
||||
/* Accessor for the i'th attribute of tupdesc. */
|
||||
#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)])
|
||||
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM < 110000
|
||||
#define ALLOCSET_DEFAULT_SIZES ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, \
|
||||
ALLOCSET_DEFAULT_MAXSIZE
|
||||
#define ACLCHECK_OBJECT_TABLE ACL_KIND_CLASS
|
||||
#else
|
||||
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
|
||||
|
||||
#define ExplainPropertyLong(qlabel, value, es) \
|
||||
ExplainPropertyInteger(qlabel, NULL, value, es)
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM >= 130000
|
||||
#define CALL_PREVIOUS_UTILITY() \
|
||||
PreviousProcessUtilityHook(plannedStatement, queryString, context, paramListInfo, \
|
||||
queryEnvironment, destReceiver, queryCompletion)
|
||||
#elif PG_VERSION_NUM >= 100000
|
||||
#define CALL_PREVIOUS_UTILITY() \
|
||||
PreviousProcessUtilityHook(plannedStatement, queryString, context, paramListInfo, \
|
||||
queryEnvironment, destReceiver, completionTag)
|
||||
#else
|
||||
#define CALL_PREVIOUS_UTILITY() \
|
||||
PreviousProcessUtilityHook(parseTree, queryString, context, paramListInfo, \
|
||||
destReceiver, completionTag)
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM < 120000
|
||||
#define TTS_EMPTY(slot) ((slot)->tts_isempty)
|
||||
#define ExecForceStoreHeapTuple(tuple, slot, shouldFree) \
|
||||
ExecStoreTuple(newTuple, tupleSlot, InvalidBuffer, shouldFree);
|
||||
#define TableScanDesc HeapScanDesc
|
||||
#define table_beginscan heap_beginscan
|
||||
#define table_endscan heap_endscan
|
||||
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM >= 130000
|
||||
#define heap_open table_open
|
||||
#define heap_openrv table_openrv
|
||||
#define heap_close table_close
|
||||
#endif
|
||||
|
||||
#endif /* CSTORE_COMPAT_H */
|
|
@ -0,0 +1,764 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_writer.c
|
||||
*
|
||||
* This file contains function definitions for writing cstore files. This
|
||||
* includes the logic for writing file level metadata, writing row stripes,
|
||||
* and calculating block skip nodes.
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "access/nbtree.h"
|
||||
#include "catalog/pg_am.h"
|
||||
#include "miscadmin.h"
|
||||
#include "storage/fd.h"
|
||||
#include "storage/smgr.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/rel.h"
|
||||
|
||||
#include "cstore.h"
|
||||
#include "cstore_version_compat.h"
|
||||
|
||||
static StripeBuffers * CreateEmptyStripeBuffers(uint32 stripeMaxRowCount,
|
||||
uint32 blockRowCount,
|
||||
uint32 columnCount);
|
||||
static StripeSkipList * CreateEmptyStripeSkipList(uint32 stripeMaxRowCount,
|
||||
uint32 blockRowCount,
|
||||
uint32 columnCount);
|
||||
static void FlushStripe(TableWriteState *writeState);
|
||||
static StringInfo SerializeBoolArray(bool *boolArray, uint32 boolArrayLength);
|
||||
static void SerializeSingleDatum(StringInfo datumBuffer, Datum datum,
|
||||
bool datumTypeByValue, int datumTypeLength,
|
||||
char datumTypeAlign);
|
||||
static void SerializeBlockData(TableWriteState *writeState, uint32 blockIndex,
|
||||
uint32 rowCount);
|
||||
static void UpdateBlockSkipNodeMinMax(ColumnBlockSkipNode *blockSkipNode,
|
||||
Datum columnValue, bool columnTypeByValue,
|
||||
int columnTypeLength, Oid columnCollation,
|
||||
FmgrInfo *comparisonFunction);
|
||||
static Datum DatumCopy(Datum datum, bool datumTypeByValue, int datumTypeLength);
|
||||
static StringInfo CopyStringInfo(StringInfo sourceString);
|
||||
|
||||
|
||||
/*
|
||||
* CStoreBeginWrite initializes a cstore data load operation and returns a table
|
||||
* handle. This handle should be used for adding the row values and finishing the
|
||||
* data load operation. If the cstore footer file already exists, we read the
|
||||
* footer and then seek to right after the last stripe where the new stripes
|
||||
* will be added.
|
||||
*/
|
||||
TableWriteState *
|
||||
CStoreBeginWrite(Relation relation,
|
||||
CompressionType compressionType,
|
||||
uint64 stripeMaxRowCount, uint32 blockRowCount,
|
||||
TupleDesc tupleDescriptor)
|
||||
{
|
||||
TableWriteState *writeState = NULL;
|
||||
FmgrInfo **comparisonFunctionArray = NULL;
|
||||
MemoryContext stripeWriteContext = NULL;
|
||||
uint32 columnCount = 0;
|
||||
uint32 columnIndex = 0;
|
||||
bool *columnMaskArray = NULL;
|
||||
BlockData *blockData = NULL;
|
||||
|
||||
/* get comparison function pointers for each of the columns */
|
||||
columnCount = tupleDescriptor->natts;
|
||||
comparisonFunctionArray = palloc0(columnCount * sizeof(FmgrInfo *));
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
FmgrInfo *comparisonFunction = NULL;
|
||||
FormData_pg_attribute *attributeForm = TupleDescAttr(tupleDescriptor,
|
||||
columnIndex);
|
||||
|
||||
if (!attributeForm->attisdropped)
|
||||
{
|
||||
Oid typeId = attributeForm->atttypid;
|
||||
|
||||
comparisonFunction = GetFunctionInfoOrNull(typeId, BTREE_AM_OID,
|
||||
BTORDER_PROC);
|
||||
}
|
||||
|
||||
comparisonFunctionArray[columnIndex] = comparisonFunction;
|
||||
}
|
||||
|
||||
/*
|
||||
* We allocate all stripe specific data in the stripeWriteContext, and
|
||||
* reset this memory context once we have flushed the stripe to the file.
|
||||
* This is to avoid memory leaks.
|
||||
*/
|
||||
stripeWriteContext = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"Stripe Write Memory Context",
|
||||
ALLOCSET_DEFAULT_SIZES);
|
||||
|
||||
columnMaskArray = palloc(columnCount * sizeof(bool));
|
||||
memset(columnMaskArray, true, columnCount);
|
||||
|
||||
blockData = CreateEmptyBlockData(columnCount, columnMaskArray, blockRowCount);
|
||||
|
||||
writeState = palloc0(sizeof(TableWriteState));
|
||||
writeState->relation = relation;
|
||||
writeState->compressionType = compressionType;
|
||||
writeState->stripeMaxRowCount = stripeMaxRowCount;
|
||||
writeState->blockRowCount = blockRowCount;
|
||||
writeState->tupleDescriptor = tupleDescriptor;
|
||||
writeState->comparisonFunctionArray = comparisonFunctionArray;
|
||||
writeState->stripeBuffers = NULL;
|
||||
writeState->stripeSkipList = NULL;
|
||||
writeState->stripeWriteContext = stripeWriteContext;
|
||||
writeState->blockData = blockData;
|
||||
writeState->compressionBuffer = NULL;
|
||||
|
||||
return writeState;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CStoreWriteRow adds a row to the cstore file. If the stripe is not initialized,
|
||||
* we create structures to hold stripe data and skip list. Then, we serialize and
|
||||
* append data to serialized value buffer for each of the columns and update
|
||||
* corresponding skip nodes. Then, whole block data is compressed at every
|
||||
* rowBlockCount insertion. Then, if row count exceeds stripeMaxRowCount, we flush
|
||||
* the stripe, and add its metadata to the table footer.
|
||||
*/
|
||||
void
|
||||
CStoreWriteRow(TableWriteState *writeState, Datum *columnValues, bool *columnNulls)
|
||||
{
|
||||
uint32 columnIndex = 0;
|
||||
uint32 blockIndex = 0;
|
||||
uint32 blockRowIndex = 0;
|
||||
StripeBuffers *stripeBuffers = writeState->stripeBuffers;
|
||||
StripeSkipList *stripeSkipList = writeState->stripeSkipList;
|
||||
uint32 columnCount = writeState->tupleDescriptor->natts;
|
||||
const uint32 blockRowCount = writeState->blockRowCount;
|
||||
BlockData *blockData = writeState->blockData;
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(writeState->stripeWriteContext);
|
||||
|
||||
if (stripeBuffers == NULL)
|
||||
{
|
||||
stripeBuffers = CreateEmptyStripeBuffers(writeState->stripeMaxRowCount,
|
||||
blockRowCount, columnCount);
|
||||
stripeSkipList = CreateEmptyStripeSkipList(writeState->stripeMaxRowCount,
|
||||
blockRowCount, columnCount);
|
||||
writeState->stripeBuffers = stripeBuffers;
|
||||
writeState->stripeSkipList = stripeSkipList;
|
||||
writeState->compressionBuffer = makeStringInfo();
|
||||
|
||||
/*
|
||||
* serializedValueBuffer lives in stripe write memory context so it needs to be
|
||||
* initialized when the stripe is created.
|
||||
*/
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
blockData->valueBufferArray[columnIndex] = makeStringInfo();
|
||||
}
|
||||
}
|
||||
|
||||
blockIndex = stripeBuffers->rowCount / blockRowCount;
|
||||
blockRowIndex = stripeBuffers->rowCount % blockRowCount;
|
||||
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
ColumnBlockSkipNode **blockSkipNodeArray = stripeSkipList->blockSkipNodeArray;
|
||||
ColumnBlockSkipNode *blockSkipNode =
|
||||
&blockSkipNodeArray[columnIndex][blockIndex];
|
||||
|
||||
if (columnNulls[columnIndex])
|
||||
{
|
||||
blockData->existsArray[columnIndex][blockRowIndex] = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
FmgrInfo *comparisonFunction =
|
||||
writeState->comparisonFunctionArray[columnIndex];
|
||||
Form_pg_attribute attributeForm =
|
||||
TupleDescAttr(writeState->tupleDescriptor, columnIndex);
|
||||
bool columnTypeByValue = attributeForm->attbyval;
|
||||
int columnTypeLength = attributeForm->attlen;
|
||||
Oid columnCollation = attributeForm->attcollation;
|
||||
char columnTypeAlign = attributeForm->attalign;
|
||||
|
||||
blockData->existsArray[columnIndex][blockRowIndex] = true;
|
||||
|
||||
SerializeSingleDatum(blockData->valueBufferArray[columnIndex],
|
||||
columnValues[columnIndex], columnTypeByValue,
|
||||
columnTypeLength, columnTypeAlign);
|
||||
|
||||
UpdateBlockSkipNodeMinMax(blockSkipNode, columnValues[columnIndex],
|
||||
columnTypeByValue, columnTypeLength,
|
||||
columnCollation, comparisonFunction);
|
||||
}
|
||||
|
||||
blockSkipNode->rowCount++;
|
||||
}
|
||||
|
||||
stripeSkipList->blockCount = blockIndex + 1;
|
||||
|
||||
/* last row of the block is inserted serialize the block */
|
||||
if (blockRowIndex == blockRowCount - 1)
|
||||
{
|
||||
SerializeBlockData(writeState, blockIndex, blockRowCount);
|
||||
}
|
||||
|
||||
stripeBuffers->rowCount++;
|
||||
if (stripeBuffers->rowCount >= writeState->stripeMaxRowCount)
|
||||
{
|
||||
FlushStripe(writeState);
|
||||
|
||||
/* set stripe data and skip list to NULL so they are recreated next time */
|
||||
writeState->stripeBuffers = NULL;
|
||||
writeState->stripeSkipList = NULL;
|
||||
}
|
||||
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CStoreEndWrite finishes a cstore data load operation. If we have an unflushed
|
||||
* stripe, we flush it. Then, we sync and close the cstore data file. Last, we
|
||||
* flush the footer to a temporary file, and atomically rename this temporary
|
||||
* file to the original footer file.
|
||||
*/
|
||||
void
|
||||
CStoreEndWrite(TableWriteState *writeState)
|
||||
{
|
||||
StripeBuffers *stripeBuffers = writeState->stripeBuffers;
|
||||
|
||||
if (stripeBuffers != NULL)
|
||||
{
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(writeState->stripeWriteContext);
|
||||
|
||||
FlushStripe(writeState);
|
||||
MemoryContextReset(writeState->stripeWriteContext);
|
||||
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
}
|
||||
|
||||
MemoryContextDelete(writeState->stripeWriteContext);
|
||||
pfree(writeState->comparisonFunctionArray);
|
||||
FreeBlockData(writeState->blockData);
|
||||
pfree(writeState);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateEmptyStripeBuffers allocates an empty StripeBuffers structure with the given
|
||||
* column count.
|
||||
*/
|
||||
static StripeBuffers *
|
||||
CreateEmptyStripeBuffers(uint32 stripeMaxRowCount, uint32 blockRowCount,
|
||||
uint32 columnCount)
|
||||
{
|
||||
StripeBuffers *stripeBuffers = NULL;
|
||||
uint32 columnIndex = 0;
|
||||
uint32 maxBlockCount = (stripeMaxRowCount / blockRowCount) + 1;
|
||||
ColumnBuffers **columnBuffersArray = palloc0(columnCount * sizeof(ColumnBuffers *));
|
||||
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
uint32 blockIndex = 0;
|
||||
ColumnBlockBuffers **blockBuffersArray =
|
||||
palloc0(maxBlockCount * sizeof(ColumnBlockBuffers *));
|
||||
|
||||
for (blockIndex = 0; blockIndex < maxBlockCount; blockIndex++)
|
||||
{
|
||||
blockBuffersArray[blockIndex] = palloc0(sizeof(ColumnBlockBuffers));
|
||||
blockBuffersArray[blockIndex]->existsBuffer = NULL;
|
||||
blockBuffersArray[blockIndex]->valueBuffer = NULL;
|
||||
blockBuffersArray[blockIndex]->valueCompressionType = COMPRESSION_NONE;
|
||||
}
|
||||
|
||||
columnBuffersArray[columnIndex] = palloc0(sizeof(ColumnBuffers));
|
||||
columnBuffersArray[columnIndex]->blockBuffersArray = blockBuffersArray;
|
||||
}
|
||||
|
||||
stripeBuffers = palloc0(sizeof(StripeBuffers));
|
||||
stripeBuffers->columnBuffersArray = columnBuffersArray;
|
||||
stripeBuffers->columnCount = columnCount;
|
||||
stripeBuffers->rowCount = 0;
|
||||
|
||||
return stripeBuffers;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateEmptyStripeSkipList allocates an empty StripeSkipList structure with
|
||||
* the given column count. This structure has enough blocks to hold statistics
|
||||
* for stripeMaxRowCount rows.
|
||||
*/
|
||||
static StripeSkipList *
|
||||
CreateEmptyStripeSkipList(uint32 stripeMaxRowCount, uint32 blockRowCount,
|
||||
uint32 columnCount)
|
||||
{
|
||||
StripeSkipList *stripeSkipList = NULL;
|
||||
uint32 columnIndex = 0;
|
||||
uint32 maxBlockCount = (stripeMaxRowCount / blockRowCount) + 1;
|
||||
|
||||
ColumnBlockSkipNode **blockSkipNodeArray =
|
||||
palloc0(columnCount * sizeof(ColumnBlockSkipNode *));
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
blockSkipNodeArray[columnIndex] =
|
||||
palloc0(maxBlockCount * sizeof(ColumnBlockSkipNode));
|
||||
}
|
||||
|
||||
stripeSkipList = palloc0(sizeof(StripeSkipList));
|
||||
stripeSkipList->columnCount = columnCount;
|
||||
stripeSkipList->blockCount = 0;
|
||||
stripeSkipList->blockSkipNodeArray = blockSkipNodeArray;
|
||||
|
||||
return stripeSkipList;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
WriteToSmgr(Relation rel, uint64 logicalOffset, char *data, uint32 dataLength)
|
||||
{
|
||||
uint64 remaining = dataLength;
|
||||
Buffer buffer;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
SmgrAddr addr = logical_to_smgr(logicalOffset);
|
||||
BlockNumber nblocks;
|
||||
Page page;
|
||||
PageHeader phdr;
|
||||
uint64 to_write;
|
||||
|
||||
RelationOpenSmgr(rel);
|
||||
nblocks = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM);
|
||||
Assert(addr.blockno < nblocks);
|
||||
(void) nblocks; /* keep compiler quiet */
|
||||
RelationCloseSmgr(rel);
|
||||
|
||||
buffer = ReadBuffer(rel, addr.blockno);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
page = BufferGetPage(buffer);
|
||||
phdr = (PageHeader) page;
|
||||
if (PageIsNew(page))
|
||||
{
|
||||
PageInit(page, BLCKSZ, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* After a transaction has been rolled-back, we might be
|
||||
* over-writing the rolledback write, so phdr->pd_lower can be
|
||||
* different from addr.offset.
|
||||
*
|
||||
* We reset pd_lower to reset the rolledback write.
|
||||
*/
|
||||
if (phdr->pd_lower > addr.offset)
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("over-writing page %u", addr.blockno),
|
||||
errdetail("This can happen after a roll-back.")));
|
||||
phdr->pd_lower = addr.offset;
|
||||
}
|
||||
Assert(phdr->pd_lower == addr.offset);
|
||||
|
||||
START_CRIT_SECTION();
|
||||
|
||||
to_write = Min(phdr->pd_upper - phdr->pd_lower, remaining);
|
||||
memcpy(page + phdr->pd_lower, data, to_write);
|
||||
phdr->pd_lower += to_write;
|
||||
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
if (RelationNeedsWAL(rel))
|
||||
{
|
||||
XLogRecPtr recptr = 0;
|
||||
|
||||
XLogBeginInsert();
|
||||
|
||||
/*
|
||||
* Since cstore will mostly write whole pages we force the transmission of the
|
||||
* whole image in the buffer
|
||||
*/
|
||||
XLogRegisterBuffer(0, buffer, REGBUF_FORCE_IMAGE);
|
||||
|
||||
recptr = XLogInsert(RM_GENERIC_ID, 0);
|
||||
PageSetLSN(page, recptr);
|
||||
}
|
||||
|
||||
END_CRIT_SECTION();
|
||||
|
||||
UnlockReleaseBuffer(buffer);
|
||||
|
||||
data += to_write;
|
||||
remaining -= to_write;
|
||||
logicalOffset += to_write;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FlushStripe flushes current stripe data into the file. The function first ensures
|
||||
* the last data block for each column is properly serialized and compressed. Then,
|
||||
* the function creates the skip list and footer buffers. Finally, the function
|
||||
* flushes the skip list, data, and footer buffers to the file.
|
||||
*/
|
||||
static void
|
||||
FlushStripe(TableWriteState *writeState)
|
||||
{
|
||||
StripeMetadata stripeMetadata = { 0 };
|
||||
uint32 columnIndex = 0;
|
||||
uint32 blockIndex = 0;
|
||||
StripeBuffers *stripeBuffers = writeState->stripeBuffers;
|
||||
StripeSkipList *stripeSkipList = writeState->stripeSkipList;
|
||||
ColumnBlockSkipNode **columnSkipNodeArray = stripeSkipList->blockSkipNodeArray;
|
||||
TupleDesc tupleDescriptor = writeState->tupleDescriptor;
|
||||
uint32 columnCount = tupleDescriptor->natts;
|
||||
uint32 blockCount = stripeSkipList->blockCount;
|
||||
uint32 blockRowCount = writeState->blockRowCount;
|
||||
uint32 lastBlockIndex = stripeBuffers->rowCount / blockRowCount;
|
||||
uint32 lastBlockRowCount = stripeBuffers->rowCount % blockRowCount;
|
||||
uint64 currentFileOffset = 0;
|
||||
uint64 stripeSize = 0;
|
||||
uint64 stripeRowCount = 0;
|
||||
|
||||
/*
|
||||
* check if the last block needs serialization , the last block was not serialized
|
||||
* if it was not full yet, e.g. (rowCount > 0)
|
||||
*/
|
||||
if (lastBlockRowCount > 0)
|
||||
{
|
||||
SerializeBlockData(writeState, lastBlockIndex, lastBlockRowCount);
|
||||
}
|
||||
|
||||
/* update buffer sizes in stripe skip list */
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
ColumnBlockSkipNode *blockSkipNodeArray = columnSkipNodeArray[columnIndex];
|
||||
ColumnBuffers *columnBuffers = stripeBuffers->columnBuffersArray[columnIndex];
|
||||
|
||||
for (blockIndex = 0; blockIndex < blockCount; blockIndex++)
|
||||
{
|
||||
ColumnBlockBuffers *blockBuffers =
|
||||
columnBuffers->blockBuffersArray[blockIndex];
|
||||
uint64 existsBufferSize = blockBuffers->existsBuffer->len;
|
||||
ColumnBlockSkipNode *blockSkipNode = &blockSkipNodeArray[blockIndex];
|
||||
|
||||
blockSkipNode->existsBlockOffset = stripeSize;
|
||||
blockSkipNode->existsLength = existsBufferSize;
|
||||
stripeSize += existsBufferSize;
|
||||
}
|
||||
|
||||
for (blockIndex = 0; blockIndex < blockCount; blockIndex++)
|
||||
{
|
||||
ColumnBlockBuffers *blockBuffers =
|
||||
columnBuffers->blockBuffersArray[blockIndex];
|
||||
uint64 valueBufferSize = blockBuffers->valueBuffer->len;
|
||||
CompressionType valueCompressionType = blockBuffers->valueCompressionType;
|
||||
ColumnBlockSkipNode *blockSkipNode = &blockSkipNodeArray[blockIndex];
|
||||
|
||||
blockSkipNode->valueBlockOffset = stripeSize;
|
||||
blockSkipNode->valueLength = valueBufferSize;
|
||||
blockSkipNode->valueCompressionType = valueCompressionType;
|
||||
|
||||
stripeSize += valueBufferSize;
|
||||
}
|
||||
}
|
||||
|
||||
for (blockIndex = 0; blockIndex < blockCount; blockIndex++)
|
||||
{
|
||||
stripeRowCount +=
|
||||
stripeSkipList->blockSkipNodeArray[0][blockIndex].rowCount;
|
||||
}
|
||||
|
||||
stripeMetadata = ReserveStripe(writeState->relation, stripeSize,
|
||||
stripeRowCount, columnCount, blockCount,
|
||||
blockRowCount);
|
||||
|
||||
currentFileOffset = stripeMetadata.fileOffset;
|
||||
|
||||
/*
|
||||
* Each stripe has only one section:
|
||||
* Data section, in which we store data for each column continuously.
|
||||
* We store data for each for each column in blocks. For each block, we
|
||||
* store two buffers: "exists" buffer, and "value" buffer. "exists" buffer
|
||||
* tells which values are not NULL. "value" buffer contains values for
|
||||
* present values. For each column, we first store all "exists" buffers,
|
||||
* and then all "value" buffers.
|
||||
*/
|
||||
|
||||
/* flush the data buffers */
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
ColumnBuffers *columnBuffers = stripeBuffers->columnBuffersArray[columnIndex];
|
||||
|
||||
for (blockIndex = 0; blockIndex < stripeSkipList->blockCount; blockIndex++)
|
||||
{
|
||||
ColumnBlockBuffers *blockBuffers =
|
||||
columnBuffers->blockBuffersArray[blockIndex];
|
||||
StringInfo existsBuffer = blockBuffers->existsBuffer;
|
||||
|
||||
WriteToSmgr(writeState->relation, currentFileOffset,
|
||||
existsBuffer->data, existsBuffer->len);
|
||||
currentFileOffset += existsBuffer->len;
|
||||
}
|
||||
|
||||
for (blockIndex = 0; blockIndex < stripeSkipList->blockCount; blockIndex++)
|
||||
{
|
||||
ColumnBlockBuffers *blockBuffers =
|
||||
columnBuffers->blockBuffersArray[blockIndex];
|
||||
StringInfo valueBuffer = blockBuffers->valueBuffer;
|
||||
|
||||
WriteToSmgr(writeState->relation, currentFileOffset,
|
||||
valueBuffer->data, valueBuffer->len);
|
||||
currentFileOffset += valueBuffer->len;
|
||||
}
|
||||
}
|
||||
|
||||
/* create skip list and footer buffers */
|
||||
SaveStripeSkipList(writeState->relation->rd_node.relNode,
|
||||
stripeMetadata.id,
|
||||
stripeSkipList, tupleDescriptor);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SerializeBoolArray serializes the given boolean array and returns the result
|
||||
* as a StringInfo. This function packs every 8 boolean values into one byte.
|
||||
*/
|
||||
static StringInfo
|
||||
SerializeBoolArray(bool *boolArray, uint32 boolArrayLength)
|
||||
{
|
||||
StringInfo boolArrayBuffer = NULL;
|
||||
uint32 boolArrayIndex = 0;
|
||||
uint32 byteCount = (boolArrayLength + 7) / 8;
|
||||
|
||||
boolArrayBuffer = makeStringInfo();
|
||||
enlargeStringInfo(boolArrayBuffer, byteCount);
|
||||
boolArrayBuffer->len = byteCount;
|
||||
memset(boolArrayBuffer->data, 0, byteCount);
|
||||
|
||||
for (boolArrayIndex = 0; boolArrayIndex < boolArrayLength; boolArrayIndex++)
|
||||
{
|
||||
if (boolArray[boolArrayIndex])
|
||||
{
|
||||
uint32 byteIndex = boolArrayIndex / 8;
|
||||
uint32 bitIndex = boolArrayIndex % 8;
|
||||
boolArrayBuffer->data[byteIndex] |= (1 << bitIndex);
|
||||
}
|
||||
}
|
||||
|
||||
return boolArrayBuffer;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SerializeSingleDatum serializes the given datum value and appends it to the
|
||||
* provided string info buffer.
|
||||
*/
|
||||
static void
|
||||
SerializeSingleDatum(StringInfo datumBuffer, Datum datum, bool datumTypeByValue,
|
||||
int datumTypeLength, char datumTypeAlign)
|
||||
{
|
||||
uint32 datumLength = att_addlength_datum(0, datumTypeLength, datum);
|
||||
uint32 datumLengthAligned = att_align_nominal(datumLength, datumTypeAlign);
|
||||
char *currentDatumDataPointer = NULL;
|
||||
|
||||
enlargeStringInfo(datumBuffer, datumLengthAligned);
|
||||
|
||||
currentDatumDataPointer = datumBuffer->data + datumBuffer->len;
|
||||
memset(currentDatumDataPointer, 0, datumLengthAligned);
|
||||
|
||||
if (datumTypeLength > 0)
|
||||
{
|
||||
if (datumTypeByValue)
|
||||
{
|
||||
store_att_byval(currentDatumDataPointer, datum, datumTypeLength);
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(currentDatumDataPointer, DatumGetPointer(datum), datumTypeLength);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Assert(!datumTypeByValue);
|
||||
memcpy(currentDatumDataPointer, DatumGetPointer(datum), datumLength);
|
||||
}
|
||||
|
||||
datumBuffer->len += datumLengthAligned;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SerializeBlockData serializes and compresses block data at given block index with given
|
||||
* compression type for every column.
|
||||
*/
|
||||
static void
|
||||
SerializeBlockData(TableWriteState *writeState, uint32 blockIndex, uint32 rowCount)
|
||||
{
|
||||
uint32 columnIndex = 0;
|
||||
StripeBuffers *stripeBuffers = writeState->stripeBuffers;
|
||||
BlockData *blockData = writeState->blockData;
|
||||
CompressionType requestedCompressionType = writeState->compressionType;
|
||||
const uint32 columnCount = stripeBuffers->columnCount;
|
||||
StringInfo compressionBuffer = writeState->compressionBuffer;
|
||||
|
||||
/* serialize exist values, data values are already serialized */
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
ColumnBuffers *columnBuffers = stripeBuffers->columnBuffersArray[columnIndex];
|
||||
ColumnBlockBuffers *blockBuffers = columnBuffers->blockBuffersArray[blockIndex];
|
||||
|
||||
blockBuffers->existsBuffer =
|
||||
SerializeBoolArray(blockData->existsArray[columnIndex], rowCount);
|
||||
}
|
||||
|
||||
/*
|
||||
* check and compress value buffers, if a value buffer is not compressable
|
||||
* then keep it as uncompressed, store compression information.
|
||||
*/
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
ColumnBuffers *columnBuffers = stripeBuffers->columnBuffersArray[columnIndex];
|
||||
ColumnBlockBuffers *blockBuffers = columnBuffers->blockBuffersArray[blockIndex];
|
||||
StringInfo serializedValueBuffer = NULL;
|
||||
CompressionType actualCompressionType = COMPRESSION_NONE;
|
||||
bool compressed = false;
|
||||
|
||||
serializedValueBuffer = blockData->valueBufferArray[columnIndex];
|
||||
|
||||
/* the only other supported compression type is pg_lz for now */
|
||||
Assert(requestedCompressionType == COMPRESSION_NONE ||
|
||||
requestedCompressionType == COMPRESSION_PG_LZ);
|
||||
|
||||
/*
|
||||
* if serializedValueBuffer is be compressed, update serializedValueBuffer
|
||||
* with compressed data and store compression type.
|
||||
*/
|
||||
compressed = CompressBuffer(serializedValueBuffer, compressionBuffer,
|
||||
requestedCompressionType);
|
||||
if (compressed)
|
||||
{
|
||||
serializedValueBuffer = compressionBuffer;
|
||||
actualCompressionType = COMPRESSION_PG_LZ;
|
||||
}
|
||||
|
||||
/* store (compressed) value buffer */
|
||||
blockBuffers->valueCompressionType = actualCompressionType;
|
||||
blockBuffers->valueBuffer = CopyStringInfo(serializedValueBuffer);
|
||||
|
||||
/* valueBuffer needs to be reset for next block's data */
|
||||
resetStringInfo(blockData->valueBufferArray[columnIndex]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* UpdateBlockSkipNodeMinMax takes the given column value, and checks if this
|
||||
* value falls outside the range of minimum/maximum values of the given column
|
||||
* block skip node. If it does, the function updates the column block skip node
|
||||
* accordingly.
|
||||
*/
|
||||
static void
|
||||
UpdateBlockSkipNodeMinMax(ColumnBlockSkipNode *blockSkipNode, Datum columnValue,
|
||||
bool columnTypeByValue, int columnTypeLength,
|
||||
Oid columnCollation, FmgrInfo *comparisonFunction)
|
||||
{
|
||||
bool hasMinMax = blockSkipNode->hasMinMax;
|
||||
Datum previousMinimum = blockSkipNode->minimumValue;
|
||||
Datum previousMaximum = blockSkipNode->maximumValue;
|
||||
Datum currentMinimum = 0;
|
||||
Datum currentMaximum = 0;
|
||||
|
||||
/* if type doesn't have a comparison function, skip min/max values */
|
||||
if (comparisonFunction == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (!hasMinMax)
|
||||
{
|
||||
currentMinimum = DatumCopy(columnValue, columnTypeByValue, columnTypeLength);
|
||||
currentMaximum = DatumCopy(columnValue, columnTypeByValue, columnTypeLength);
|
||||
}
|
||||
else
|
||||
{
|
||||
Datum minimumComparisonDatum = FunctionCall2Coll(comparisonFunction,
|
||||
columnCollation, columnValue,
|
||||
previousMinimum);
|
||||
Datum maximumComparisonDatum = FunctionCall2Coll(comparisonFunction,
|
||||
columnCollation, columnValue,
|
||||
previousMaximum);
|
||||
int minimumComparison = DatumGetInt32(minimumComparisonDatum);
|
||||
int maximumComparison = DatumGetInt32(maximumComparisonDatum);
|
||||
|
||||
if (minimumComparison < 0)
|
||||
{
|
||||
currentMinimum = DatumCopy(columnValue, columnTypeByValue, columnTypeLength);
|
||||
}
|
||||
else
|
||||
{
|
||||
currentMinimum = previousMinimum;
|
||||
}
|
||||
|
||||
if (maximumComparison > 0)
|
||||
{
|
||||
currentMaximum = DatumCopy(columnValue, columnTypeByValue, columnTypeLength);
|
||||
}
|
||||
else
|
||||
{
|
||||
currentMaximum = previousMaximum;
|
||||
}
|
||||
}
|
||||
|
||||
blockSkipNode->hasMinMax = true;
|
||||
blockSkipNode->minimumValue = currentMinimum;
|
||||
blockSkipNode->maximumValue = currentMaximum;
|
||||
}
|
||||
|
||||
|
||||
/* Creates a copy of the given datum. */
|
||||
static Datum
|
||||
DatumCopy(Datum datum, bool datumTypeByValue, int datumTypeLength)
|
||||
{
|
||||
Datum datumCopy = 0;
|
||||
|
||||
if (datumTypeByValue)
|
||||
{
|
||||
datumCopy = datum;
|
||||
}
|
||||
else
|
||||
{
|
||||
uint32 datumLength = att_addlength_datum(0, datumTypeLength, datum);
|
||||
char *datumData = palloc0(datumLength);
|
||||
memcpy(datumData, DatumGetPointer(datum), datumLength);
|
||||
|
||||
datumCopy = PointerGetDatum(datumData);
|
||||
}
|
||||
|
||||
return datumCopy;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CopyStringInfo creates a deep copy of given source string allocating only needed
|
||||
* amount of memory.
|
||||
*/
|
||||
static StringInfo
|
||||
CopyStringInfo(StringInfo sourceString)
|
||||
{
|
||||
StringInfo targetString = palloc0(sizeof(StringInfoData));
|
||||
|
||||
if (sourceString->len > 0)
|
||||
{
|
||||
targetString->data = palloc0(sourceString->len);
|
||||
targetString->len = sourceString->len;
|
||||
targetString->maxlen = sourceString->len;
|
||||
memcpy(targetString->data, sourceString->data, sourceString->len);
|
||||
}
|
||||
|
||||
return targetString;
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
"{1,2,3}","{1,2,3}","{a,b,c}"
|
||||
{},{},{}
|
||||
"{-2147483648,2147483647}","{-9223372036854775808,9223372036854775807}","{""""}"
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,5 @@
|
|||
a,1990-01-10,2090,97.1,XA ,{a}
|
||||
b,1990-11-01,2203,98.1,XA ,"{a,b}"
|
||||
c,1988-11-01,2907,99.4,XB ,"{w,y}"
|
||||
d,1985-05-05,2314,98.3,XB ,{}
|
||||
e,1995-05-05,2236,98.2,XC ,{a}
|
|
|
@ -0,0 +1,3 @@
|
|||
f,1983-04-02,3090,99.6,XD ,"{a,b,c,y}"
|
||||
g,1991-12-13,1803,85.1,XD ,"{a,c}"
|
||||
h,1987-10-26,2112,95.4,XD ,"{w,a}"
|
|
|
@ -0,0 +1,2 @@
|
|||
2000-01-02 04:05:06,1999-01-08 14:05:06+02,2000-01-02,04:05:06,04:00:00
|
||||
1970-01-01 00:00:00,infinity,-infinity,00:00:00,00:00:00
|
|
|
@ -0,0 +1,2 @@
|
|||
a,"(2,b)"
|
||||
b,"(3,c)"
|
|
|
@ -0,0 +1,2 @@
|
|||
,{NULL},"(,)"
|
||||
,,
|
|
|
@ -0,0 +1,2 @@
|
|||
f,\xdeadbeef,$1.00,192.168.1.2,10101,a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11,"{""key"": ""value""}"
|
||||
t,\xcdb0,$1.50,127.0.0.1,"",a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11,[]
|
|
|
@ -0,0 +1,2 @@
|
|||
"[1,3)","[1,3)","[1,3)","[""2000-01-02 00:30:00"",""2010-02-03 12:30:00"")"
|
||||
empty,"[1,)","(,)",empty
|
|
|
@ -0,0 +1,177 @@
|
|||
--
|
||||
-- Testing ALTER TABLE on cstore_fdw tables.
|
||||
--
|
||||
CREATE TABLE test_alter_table (a int, b int, c int) USING cstore_tableam;
|
||||
WITH sample_data AS (VALUES
|
||||
(1, 2, 3),
|
||||
(4, 5, 6),
|
||||
(7, 8, 9)
|
||||
)
|
||||
INSERT INTO test_alter_table SELECT * FROM sample_data;
|
||||
-- drop a column
|
||||
ALTER TABLE test_alter_table DROP COLUMN a;
|
||||
-- test analyze
|
||||
ANALYZE test_alter_table;
|
||||
-- verify select queries run as expected
|
||||
SELECT * FROM test_alter_table;
|
||||
b | c
|
||||
---+---
|
||||
2 | 3
|
||||
5 | 6
|
||||
8 | 9
|
||||
(3 rows)
|
||||
|
||||
SELECT a FROM test_alter_table;
|
||||
ERROR: column "a" does not exist
|
||||
LINE 1: SELECT a FROM test_alter_table;
|
||||
^
|
||||
SELECT b FROM test_alter_table;
|
||||
b
|
||||
---
|
||||
2
|
||||
5
|
||||
8
|
||||
(3 rows)
|
||||
|
||||
-- verify insert runs as expected
|
||||
INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
ERROR: INSERT has more expressions than target columns
|
||||
LINE 1: INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
^
|
||||
INSERT INTO test_alter_table (SELECT 5, 8);
|
||||
-- add a column with no defaults
|
||||
ALTER TABLE test_alter_table ADD COLUMN d int;
|
||||
SELECT * FROM test_alter_table;
|
||||
b | c | d
|
||||
---+---+---
|
||||
2 | 3 |
|
||||
5 | 6 |
|
||||
8 | 9 |
|
||||
5 | 8 |
|
||||
(4 rows)
|
||||
|
||||
INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
SELECT * FROM test_alter_table;
|
||||
b | c | d
|
||||
---+---+---
|
||||
2 | 3 |
|
||||
5 | 6 |
|
||||
8 | 9 |
|
||||
5 | 8 |
|
||||
3 | 5 | 8
|
||||
(5 rows)
|
||||
|
||||
-- add a fixed-length column with default value
|
||||
ALTER TABLE test_alter_table ADD COLUMN e int default 3;
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e
|
||||
---+---+---+---
|
||||
2 | 3 | | 3
|
||||
5 | 6 | | 3
|
||||
8 | 9 | | 3
|
||||
5 | 8 | | 3
|
||||
3 | 5 | 8 | 3
|
||||
(5 rows)
|
||||
|
||||
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8);
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e
|
||||
---+---+---+---
|
||||
2 | 3 | | 3
|
||||
5 | 6 | | 3
|
||||
8 | 9 | | 3
|
||||
5 | 8 | | 3
|
||||
3 | 5 | 8 | 3
|
||||
1 | 2 | 4 | 8
|
||||
(6 rows)
|
||||
|
||||
-- add a variable-length column with default value
|
||||
ALTER TABLE test_alter_table ADD COLUMN f text DEFAULT 'TEXT ME';
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e | f
|
||||
---+---+---+---+---------
|
||||
2 | 3 | | 3 | TEXT ME
|
||||
5 | 6 | | 3 | TEXT ME
|
||||
8 | 9 | | 3 | TEXT ME
|
||||
5 | 8 | | 3 | TEXT ME
|
||||
3 | 5 | 8 | 3 | TEXT ME
|
||||
1 | 2 | 4 | 8 | TEXT ME
|
||||
(6 rows)
|
||||
|
||||
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8, 'ABCDEF');
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e | f
|
||||
---+---+---+---+---------
|
||||
2 | 3 | | 3 | TEXT ME
|
||||
5 | 6 | | 3 | TEXT ME
|
||||
8 | 9 | | 3 | TEXT ME
|
||||
5 | 8 | | 3 | TEXT ME
|
||||
3 | 5 | 8 | 3 | TEXT ME
|
||||
1 | 2 | 4 | 8 | TEXT ME
|
||||
1 | 2 | 4 | 8 | ABCDEF
|
||||
(7 rows)
|
||||
|
||||
-- drop couple of columns
|
||||
ALTER TABLE test_alter_table DROP COLUMN c;
|
||||
ALTER TABLE test_alter_table DROP COLUMN e;
|
||||
ANALYZE test_alter_table;
|
||||
SELECT * from test_alter_table;
|
||||
b | d | f
|
||||
---+---+---------
|
||||
2 | | TEXT ME
|
||||
5 | | TEXT ME
|
||||
8 | | TEXT ME
|
||||
5 | | TEXT ME
|
||||
3 | 8 | TEXT ME
|
||||
1 | 4 | TEXT ME
|
||||
1 | 4 | ABCDEF
|
||||
(7 rows)
|
||||
|
||||
SELECT count(*) from test_alter_table;
|
||||
count
|
||||
-------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
SELECT count(t.*) from test_alter_table t;
|
||||
count
|
||||
-------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
-- unsupported default values
|
||||
ALTER TABLE test_alter_table ADD COLUMN g boolean DEFAULT isfinite(current_date);
|
||||
ALTER TABLE test_alter_table ADD COLUMN h DATE DEFAULT current_date;
|
||||
SELECT * FROM test_alter_table;
|
||||
ERROR: unsupported default value for column "g"
|
||||
HINT: Expression is either mutable or does not evaluate to constant value
|
||||
ALTER TABLE test_alter_table ALTER COLUMN g DROP DEFAULT;
|
||||
SELECT * FROM test_alter_table;
|
||||
ERROR: unsupported default value for column "h"
|
||||
HINT: Expression is either mutable or does not evaluate to constant value
|
||||
ALTER TABLE test_alter_table ALTER COLUMN h DROP DEFAULT;
|
||||
ANALYZE test_alter_table;
|
||||
SELECT * FROM test_alter_table;
|
||||
b | d | f | g | h
|
||||
---+---+---------+---+---
|
||||
2 | | TEXT ME | |
|
||||
5 | | TEXT ME | |
|
||||
8 | | TEXT ME | |
|
||||
5 | | TEXT ME | |
|
||||
3 | 8 | TEXT ME | |
|
||||
1 | 4 | TEXT ME | |
|
||||
1 | 4 | ABCDEF | |
|
||||
(7 rows)
|
||||
|
||||
-- unsupported type change
|
||||
ALTER TABLE test_alter_table ADD COLUMN i int;
|
||||
ALTER TABLE test_alter_table ADD COLUMN j float;
|
||||
ALTER TABLE test_alter_table ADD COLUMN k text;
|
||||
-- this is valid type change
|
||||
ALTER TABLE test_alter_table ALTER COLUMN i TYPE float;
|
||||
-- this is not valid
|
||||
ALTER TABLE test_alter_table ALTER COLUMN j TYPE int;
|
||||
-- text / varchar conversion is valid both ways
|
||||
ALTER TABLE test_alter_table ALTER COLUMN k TYPE varchar(20);
|
||||
ALTER TABLE test_alter_table ALTER COLUMN k TYPE text;
|
||||
DROP TABLE test_alter_table;
|
|
@ -0,0 +1,19 @@
|
|||
--
|
||||
-- Test the ANALYZE command for cstore_fdw tables.
|
||||
--
|
||||
-- ANALYZE uncompressed table
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM pg_stats WHERE tablename='contestant';
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
-- ANALYZE compressed table
|
||||
ANALYZE contestant_compressed;
|
||||
SELECT count(*) FROM pg_stats WHERE tablename='contestant_compressed';
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
DROP TABLE test_null_values;
|
||||
DROP TABLE test_other_types;
|
||||
DROP TABLE test_range_types;
|
||||
DROP TABLE test_enum_and_composite_types;
|
||||
DROP TYPE composite_type;
|
||||
DROP TYPE enum_type;
|
||||
DROP TABLE test_datetime_types;
|
||||
DROP TABLE test_array_types;
|
|
@ -0,0 +1,20 @@
|
|||
--
|
||||
-- Test the CREATE statements related to cstore.
|
||||
--
|
||||
-- Create uncompressed table
|
||||
CREATE TABLE contestant (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
USING cstore_tableam;
|
||||
-- Create compressed table with automatically determined file path
|
||||
-- COMPRESSED
|
||||
CREATE TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
USING cstore_tableam;
|
||||
-- Test that querying an empty table works
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM contestant;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
--
|
||||
-- Tests the different DROP commands for cstore_fdw tables.
|
||||
--
|
||||
-- DROP TABL
|
||||
-- DROP SCHEMA
|
||||
-- DROP EXTENSION
|
||||
-- DROP DATABASE
|
||||
--
|
||||
-- Note that travis does not create
|
||||
-- cstore_fdw extension in default database (postgres). This has caused
|
||||
-- different behavior between travis tests and local tests. Thus
|
||||
-- 'postgres' directory is excluded from comparison to have the same result.
|
||||
-- store postgres database oid
|
||||
SELECT oid postgres_oid FROM pg_database WHERE datname = 'postgres' \gset
|
||||
SELECT count(*) AS cstore_data_files_before_drop FROM cstore.cstore_data_files \gset
|
||||
-- DROP cstore_fdw tables
|
||||
DROP TABLE contestant;
|
||||
DROP TABLE contestant_compressed;
|
||||
-- make sure DROP deletes metadata
|
||||
SELECT :cstore_data_files_before_drop - count(*) FROM cstore.cstore_data_files;
|
||||
?column?
|
||||
----------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- Create a cstore_fdw table under a schema and drop it.
|
||||
CREATE SCHEMA test_schema;
|
||||
CREATE TABLE test_schema.test_table(data int) USING cstore_tableam;
|
||||
SELECT count(*) AS cstore_data_files_before_drop FROM cstore.cstore_data_files \gset
|
||||
DROP SCHEMA test_schema CASCADE;
|
||||
NOTICE: drop cascades to table test_schema.test_table
|
||||
SELECT :cstore_data_files_before_drop - count(*) FROM cstore.cstore_data_files;
|
||||
?column?
|
||||
----------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT current_database() datname \gset
|
||||
CREATE DATABASE db_to_drop;
|
||||
\c db_to_drop
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
|
||||
CREATE TABLE test_table(data int) USING cstore_tableam;
|
||||
DROP EXTENSION cstore_fdw CASCADE;
|
||||
NOTICE: drop cascades to table test_table
|
||||
-- test database drop
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
|
||||
CREATE TABLE test_table(data int) USING cstore_tableam;
|
||||
\c :datname
|
||||
DROP DATABASE db_to_drop;
|
|
@ -0,0 +1,18 @@
|
|||
--
|
||||
-- Test utility functions for cstore_fdw tables.
|
||||
--
|
||||
CREATE TABLE empty_table (a int) USING cstore_tableam;
|
||||
CREATE TABLE table_with_data (a int) USING cstore_tableam;
|
||||
CREATE TABLE non_cstore_table (a int);
|
||||
COPY table_with_data FROM STDIN;
|
||||
SELECT pg_relation_size('empty_table') < pg_relation_size('table_with_data');
|
||||
?column?
|
||||
----------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT cstore_table_size('non_cstore_table');
|
||||
ERROR: relation is not a cstore table
|
||||
DROP TABLE empty_table;
|
||||
DROP TABLE table_with_data;
|
||||
DROP TABLE non_cstore_table;
|
|
@ -0,0 +1,86 @@
|
|||
--
|
||||
-- Testing insert on cstore_fdw tables.
|
||||
--
|
||||
CREATE TABLE test_insert_command (a int) USING cstore_tableam;
|
||||
-- test single row inserts fail
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command values(1);
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command default values;
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- test inserting from another table succeed
|
||||
CREATE TABLE test_insert_command_data (a int);
|
||||
select count(*) from test_insert_command_data;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command_data values(1);
|
||||
select count(*) from test_insert_command_data;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command select * from test_insert_command_data;
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
drop table test_insert_command_data;
|
||||
drop table test_insert_command;
|
||||
-- test long attribute value insertion
|
||||
-- create sufficiently long text so that data is stored in toast
|
||||
CREATE TABLE test_long_text AS
|
||||
SELECT a as int_val, string_agg(random()::text, '') as text_val
|
||||
FROM generate_series(1, 10) a, generate_series(1, 1000) b
|
||||
GROUP BY a ORDER BY a;
|
||||
-- store hash values of text for later comparison
|
||||
CREATE TABLE test_long_text_hash AS
|
||||
SELECT int_val, md5(text_val) AS hash
|
||||
FROM test_long_text;
|
||||
CREATE TABLE test_cstore_long_text(int_val int, text_val text)
|
||||
USING cstore_tableam;
|
||||
-- store long text in cstore table
|
||||
INSERT INTO test_cstore_long_text SELECT * FROM test_long_text;
|
||||
-- drop source table to remove original text from toast
|
||||
DROP TABLE test_long_text;
|
||||
-- check if text data is still available in cstore table
|
||||
-- by comparing previously stored hash.
|
||||
SELECT a.int_val
|
||||
FROM test_long_text_hash a, test_cstore_long_text c
|
||||
WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val);
|
||||
int_val
|
||||
---------
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
10
|
||||
(10 rows)
|
||||
|
||||
DROP TABLE test_long_text_hash;
|
||||
DROP TABLE test_cstore_long_text;
|
|
@ -0,0 +1,37 @@
|
|||
CREATE SCHEMA am_cstore_join;
|
||||
SET search_path TO am_cstore_join;
|
||||
CREATE TABLE users (id int, name text) USING cstore_tableam;
|
||||
INSERT INTO users SELECT a, 'name' || a FROM generate_series(0,30-1) AS a;
|
||||
CREATE TABLE things (id int, user_id int, name text) USING cstore_tableam;
|
||||
INSERT INTO things SELECT a, a % 30, 'thing' || a FROM generate_series(1,300) AS a;
|
||||
-- force the nested loop to rescan the table
|
||||
SET enable_material TO off;
|
||||
SET enable_hashjoin TO off;
|
||||
SET enable_mergejoin TO off;
|
||||
SELECT count(*)
|
||||
FROM users
|
||||
JOIN things ON (users.id = things.user_id)
|
||||
WHERE things.id > 290;
|
||||
count
|
||||
-------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
-- verify the join uses a nested loop to trigger the rescan behaviour
|
||||
EXPLAIN (COSTS OFF)
|
||||
SELECT count(*)
|
||||
FROM users
|
||||
JOIN things ON (users.id = things.user_id)
|
||||
WHERE things.id > 299990;
|
||||
QUERY PLAN
|
||||
--------------------------------------------------
|
||||
Aggregate
|
||||
-> Nested Loop
|
||||
Join Filter: (users.id = things.user_id)
|
||||
-> Custom Scan (CStoreScan) on things
|
||||
Filter: (id > 299990)
|
||||
-> Custom Scan (CStoreScan) on users
|
||||
(6 rows)
|
||||
|
||||
SET client_min_messages TO warning;
|
||||
DROP SCHEMA am_cstore_join CASCADE;
|
|
@ -0,0 +1,105 @@
|
|||
--
|
||||
-- Test querying cstore_fdw tables.
|
||||
--
|
||||
-- Settings to make the result deterministic
|
||||
SET datestyle = "ISO, YMD";
|
||||
-- Query uncompressed data
|
||||
SELECT count(*) FROM contestant;
|
||||
count
|
||||
-------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
SELECT avg(rating), stddev_samp(rating) FROM contestant;
|
||||
avg | stddev_samp
|
||||
-----------------------+------------------
|
||||
2344.3750000000000000 | 433.746119785032
|
||||
(1 row)
|
||||
|
||||
SELECT country, avg(rating) FROM contestant WHERE rating > 2200
|
||||
GROUP BY country ORDER BY country;
|
||||
country | avg
|
||||
---------+-----------------------
|
||||
XA | 2203.0000000000000000
|
||||
XB | 2610.5000000000000000
|
||||
XC | 2236.0000000000000000
|
||||
XD | 3090.0000000000000000
|
||||
(4 rows)
|
||||
|
||||
SELECT * FROM contestant ORDER BY handle;
|
||||
handle | birthdate | rating | percentile | country | achievements
|
||||
--------+------------+--------+------------+---------+--------------
|
||||
a | 1990-01-10 | 2090 | 97.1 | XA | {a}
|
||||
b | 1990-11-01 | 2203 | 98.1 | XA | {a,b}
|
||||
c | 1988-11-01 | 2907 | 99.4 | XB | {w,y}
|
||||
d | 1985-05-05 | 2314 | 98.3 | XB | {}
|
||||
e | 1995-05-05 | 2236 | 98.2 | XC | {a}
|
||||
f | 1983-04-02 | 3090 | 99.6 | XD | {a,b,c,y}
|
||||
g | 1991-12-13 | 1803 | 85.1 | XD | {a,c}
|
||||
h | 1987-10-26 | 2112 | 95.4 | XD | {w,a}
|
||||
(8 rows)
|
||||
|
||||
-- Query compressed data
|
||||
SELECT count(*) FROM contestant_compressed;
|
||||
count
|
||||
-------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
SELECT avg(rating), stddev_samp(rating) FROM contestant_compressed;
|
||||
avg | stddev_samp
|
||||
-----------------------+------------------
|
||||
2344.3750000000000000 | 433.746119785032
|
||||
(1 row)
|
||||
|
||||
SELECT country, avg(rating) FROM contestant_compressed WHERE rating > 2200
|
||||
GROUP BY country ORDER BY country;
|
||||
country | avg
|
||||
---------+-----------------------
|
||||
XA | 2203.0000000000000000
|
||||
XB | 2610.5000000000000000
|
||||
XC | 2236.0000000000000000
|
||||
XD | 3090.0000000000000000
|
||||
(4 rows)
|
||||
|
||||
SELECT * FROM contestant_compressed ORDER BY handle;
|
||||
handle | birthdate | rating | percentile | country | achievements
|
||||
--------+------------+--------+------------+---------+--------------
|
||||
a | 1990-01-10 | 2090 | 97.1 | XA | {a}
|
||||
b | 1990-11-01 | 2203 | 98.1 | XA | {a,b}
|
||||
c | 1988-11-01 | 2907 | 99.4 | XB | {w,y}
|
||||
d | 1985-05-05 | 2314 | 98.3 | XB | {}
|
||||
e | 1995-05-05 | 2236 | 98.2 | XC | {a}
|
||||
f | 1983-04-02 | 3090 | 99.6 | XD | {a,b,c,y}
|
||||
g | 1991-12-13 | 1803 | 85.1 | XD | {a,c}
|
||||
h | 1987-10-26 | 2112 | 95.4 | XD | {w,a}
|
||||
(8 rows)
|
||||
|
||||
-- Verify that we handle whole-row references correctly
|
||||
SELECT to_json(v) FROM contestant v ORDER BY rating LIMIT 1;
|
||||
to_json
|
||||
------------------------------------------------------------------------------------------------------------------
|
||||
{"handle":"g","birthdate":"1991-12-13","rating":1803,"percentile":85.1,"country":"XD ","achievements":["a","c"]}
|
||||
(1 row)
|
||||
|
||||
-- Test variables used in expressions
|
||||
CREATE TABLE union_first (a int, b int) USING cstore_tableam;
|
||||
CREATE TABLE union_second (a int, b int) USING cstore_tableam;
|
||||
INSERT INTO union_first SELECT a, a FROM generate_series(1, 5) a;
|
||||
INSERT INTO union_second SELECT a, a FROM generate_series(11, 15) a;
|
||||
(SELECT a*1, b FROM union_first) union all (SELECT a*1, b FROM union_second);
|
||||
?column? | b
|
||||
----------+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
14 | 14
|
||||
15 | 15
|
||||
(10 rows)
|
||||
|
||||
DROP TABLE union_first, union_second;
|
|
@ -0,0 +1,77 @@
|
|||
--
|
||||
-- Testing we handle rollbacks properly
|
||||
--
|
||||
CREATE TABLE t(a int, b int) USING cstore_tableam;
|
||||
BEGIN;
|
||||
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
|
||||
ROLLBACK;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- check stripe metadata also have been rolled-back
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b
|
||||
WHERE a.relfilenode = b.relfilenode AND b.relname = 't';
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b
|
||||
WHERE a.relfilenode = b.relfilenode AND b.relname = 't';
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- savepoint rollback
|
||||
BEGIN;
|
||||
SAVEPOINT s0;
|
||||
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
|
||||
SAVEPOINT s1;
|
||||
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
30
|
||||
(1 row)
|
||||
|
||||
ROLLBACK TO SAVEPOINT s1;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
20
|
||||
(1 row)
|
||||
|
||||
ROLLBACK TO SAVEPOINT s0;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
|
||||
COMMIT;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
20
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b
|
||||
WHERE a.relfilenode = b.relfilenode AND b.relname = 't';
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
DROP TABLE t;
|
|
@ -0,0 +1,179 @@
|
|||
CREATE SCHEMA am_tableoptions;
|
||||
SET search_path TO am_tableoptions;
|
||||
CREATE TABLE table_options (a int) USING cstore_tableam;
|
||||
INSERT INTO table_options SELECT generate_series(1,100);
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 10000 | 150000 | none
|
||||
(1 row)
|
||||
|
||||
-- test changing the compression
|
||||
SELECT alter_cstore_table_set('table_options', compression => 'pglz');
|
||||
alter_cstore_table_set
|
||||
------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 10000 | 150000 | pglz
|
||||
(1 row)
|
||||
|
||||
-- test changing the block_row_count
|
||||
SELECT alter_cstore_table_set('table_options', block_row_count => 10);
|
||||
alter_cstore_table_set
|
||||
------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 10 | 150000 | pglz
|
||||
(1 row)
|
||||
|
||||
-- test changing the block_row_count
|
||||
SELECT alter_cstore_table_set('table_options', stripe_row_count => 100);
|
||||
alter_cstore_table_set
|
||||
------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 10 | 100 | pglz
|
||||
(1 row)
|
||||
|
||||
-- VACUUM FULL creates a new table, make sure it copies settings from the table you are vacuuming
|
||||
VACUUM FULL table_options;
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 10 | 100 | pglz
|
||||
(1 row)
|
||||
|
||||
-- set all settings at the same time
|
||||
SELECT alter_cstore_table_set('table_options', stripe_row_count => 1000, block_row_count => 100, compression => 'none');
|
||||
alter_cstore_table_set
|
||||
------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 100 | 1000 | none
|
||||
(1 row)
|
||||
|
||||
-- reset settings one by one to the version of the GUC's
|
||||
SET cstore.block_row_count TO 1000;
|
||||
SET cstore.stripe_row_count TO 10000;
|
||||
SET cstore.compression TO 'pglz';
|
||||
-- verify setting the GUC's didn't change the settings
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 100 | 1000 | none
|
||||
(1 row)
|
||||
|
||||
SELECT alter_cstore_table_reset('table_options', block_row_count => true);
|
||||
alter_cstore_table_reset
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 1000 | 1000 | none
|
||||
(1 row)
|
||||
|
||||
SELECT alter_cstore_table_reset('table_options', stripe_row_count => true);
|
||||
alter_cstore_table_reset
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 1000 | 10000 | none
|
||||
(1 row)
|
||||
|
||||
SELECT alter_cstore_table_reset('table_options', compression => true);
|
||||
alter_cstore_table_reset
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 1000 | 10000 | pglz
|
||||
(1 row)
|
||||
|
||||
-- verify resetting all settings at once work
|
||||
SET cstore.block_row_count TO 10000;
|
||||
SET cstore.stripe_row_count TO 100000;
|
||||
SET cstore.compression TO 'none';
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 1000 | 10000 | pglz
|
||||
(1 row)
|
||||
|
||||
SELECT alter_cstore_table_reset(
|
||||
'table_options',
|
||||
block_row_count => true,
|
||||
stripe_row_count => true,
|
||||
compression => true);
|
||||
alter_cstore_table_reset
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show table_options settings
|
||||
SELECT * FROM cstore.cstore_options
|
||||
WHERE regclass = 'table_options'::regclass;
|
||||
regclass | block_row_count | stripe_row_count | compression
|
||||
---------------+-----------------+------------------+-------------
|
||||
table_options | 10000 | 100000 | none
|
||||
(1 row)
|
||||
|
||||
-- verify edge cases
|
||||
-- first start with a table that is not a cstore table
|
||||
CREATE TABLE not_a_cstore_table (a int);
|
||||
SELECT alter_cstore_table_set('not_a_cstore_table', compression => 'pglz');
|
||||
ERROR: table not_a_cstore_table is not a cstore table
|
||||
SELECT alter_cstore_table_reset('not_a_cstore_table', compression => true);
|
||||
ERROR: table not_a_cstore_table is not a cstore table
|
||||
-- verify you can't use a compression that is not known
|
||||
SELECT alter_cstore_table_set('table_options', compression => 'foobar');
|
||||
ERROR: unknown compression type for cstore table: foobar
|
||||
SET client_min_messages TO warning;
|
||||
DROP SCHEMA am_tableoptions CASCADE;
|
|
@ -0,0 +1,65 @@
|
|||
create or replace function trs_before() returns trigger language plpgsql as $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'BEFORE STATEMENT %', TG_OP;
|
||||
RETURN NULL;
|
||||
END;
|
||||
$$;
|
||||
create or replace function trs_after() returns trigger language plpgsql as $$
|
||||
DECLARE
|
||||
r RECORD;
|
||||
BEGIN
|
||||
RAISE NOTICE 'AFTER STATEMENT %', TG_OP;
|
||||
IF (TG_OP = 'DELETE') THEN
|
||||
FOR R IN select * from old_table
|
||||
LOOP
|
||||
RAISE NOTICE ' (%)', r.i;
|
||||
END LOOP;
|
||||
ELSE
|
||||
FOR R IN select * from new_table
|
||||
LOOP
|
||||
RAISE NOTICE ' (%)', r.i;
|
||||
END LOOP;
|
||||
END IF;
|
||||
RETURN NULL;
|
||||
END;
|
||||
$$;
|
||||
create or replace function trr_before() returns trigger language plpgsql as $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'BEFORE ROW %: (%)', TG_OP, NEW.i;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$;
|
||||
create or replace function trr_after() returns trigger language plpgsql as $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'AFTER ROW %: (%)', TG_OP, NEW.i;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$;
|
||||
create table test_tr(i int) using cstore_tableam;
|
||||
create trigger tr_before_stmt before insert on test_tr
|
||||
for each statement execute procedure trs_before();
|
||||
create trigger tr_after_stmt after insert on test_tr
|
||||
referencing new table as new_table
|
||||
for each statement execute procedure trs_after();
|
||||
create trigger tr_before_row before insert on test_tr
|
||||
for each row execute procedure trr_before();
|
||||
-- after triggers require TIDs, which are not supported yet
|
||||
create trigger tr_after_row after insert on test_tr
|
||||
for each row execute procedure trr_after();
|
||||
ERROR: AFTER ROW triggers are not supported for columnstore access method
|
||||
HINT: Consider an AFTER STATEMENT trigger instead.
|
||||
insert into test_tr values(1);
|
||||
NOTICE: BEFORE STATEMENT INSERT
|
||||
NOTICE: BEFORE ROW INSERT: (1)
|
||||
NOTICE: AFTER STATEMENT INSERT
|
||||
NOTICE: (1)
|
||||
insert into test_tr values(2),(3),(4);
|
||||
NOTICE: BEFORE STATEMENT INSERT
|
||||
NOTICE: BEFORE ROW INSERT: (2)
|
||||
NOTICE: BEFORE ROW INSERT: (3)
|
||||
NOTICE: BEFORE ROW INSERT: (4)
|
||||
NOTICE: AFTER STATEMENT INSERT
|
||||
NOTICE: (2)
|
||||
NOTICE: (3)
|
||||
NOTICE: (4)
|
||||
drop table test_tr;
|
|
@ -0,0 +1,271 @@
|
|||
--
|
||||
-- Test the TRUNCATE TABLE command for cstore_fdw tables.
|
||||
--
|
||||
-- print whether we're using version > 10 to make version-specific tests clear
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
|
||||
version_above_ten
|
||||
-------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- CREATE a cstore_fdw table, fill with some data --
|
||||
CREATE TABLE cstore_truncate_test (a int, b int) USING cstore_tableam;
|
||||
CREATE TABLE cstore_truncate_test_second (a int, b int) USING cstore_tableam;
|
||||
-- COMPRESSED
|
||||
CREATE TABLE cstore_truncate_test_compressed (a int, b int) USING cstore_tableam;
|
||||
CREATE TABLE cstore_truncate_test_regular (a int, b int);
|
||||
SELECT count(*) AS cstore_data_files_before_truncate FROM cstore.cstore_data_files \gset
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
set cstore.compression = 'pglz';
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
set cstore.compression to default;
|
||||
-- query rows
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT COUNT(*) from cstore_truncate_test;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
20
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test_compressed;
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT pg_relation_size('cstore_truncate_test_compressed');
|
||||
pg_relation_size
|
||||
------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(10, 20) a;
|
||||
INSERT INTO cstore_truncate_test_second select a, a from generate_series(20, 30) a;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
----+----
|
||||
20 | 20
|
||||
21 | 21
|
||||
22 | 22
|
||||
23 | 23
|
||||
24 | 24
|
||||
25 | 25
|
||||
26 | 26
|
||||
27 | 27
|
||||
28 | 28
|
||||
29 | 29
|
||||
30 | 30
|
||||
(11 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
----+----
|
||||
10 | 10
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
14 | 14
|
||||
15 | 15
|
||||
16 | 16
|
||||
17 | 17
|
||||
18 | 18
|
||||
19 | 19
|
||||
20 | 20
|
||||
(11 rows)
|
||||
|
||||
-- make sure multi truncate works
|
||||
-- notice that the same table might be repeated
|
||||
TRUNCATE TABLE cstore_truncate_test,
|
||||
cstore_truncate_test_regular,
|
||||
cstore_truncate_test_second,
|
||||
cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- test if truncate on empty table works
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- make sure TRUNATE deletes metadata for old relfilenode
|
||||
SELECT :cstore_data_files_before_truncate - count(*) FROM cstore.cstore_data_files;
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- test if truncation in the same transaction that created the table works properly
|
||||
BEGIN;
|
||||
CREATE TABLE cstore_same_transaction_truncate(a int) USING cstore_tableam;
|
||||
INSERT INTO cstore_same_transaction_truncate SELECT * FROM generate_series(1, 100);
|
||||
TRUNCATE cstore_same_transaction_truncate;
|
||||
INSERT INTO cstore_same_transaction_truncate SELECT * FROM generate_series(20, 23);
|
||||
COMMIT;
|
||||
-- should output "1" for the newly created relation
|
||||
SELECT count(*) - :cstore_data_files_before_truncate FROM cstore.cstore_data_files;
|
||||
?column?
|
||||
----------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM cstore_same_transaction_truncate;
|
||||
a
|
||||
----
|
||||
20
|
||||
21
|
||||
22
|
||||
23
|
||||
(4 rows)
|
||||
|
||||
DROP TABLE cstore_same_transaction_truncate;
|
||||
-- test if a cached truncate from a pl/pgsql function works
|
||||
CREATE FUNCTION cstore_truncate_test_regular_func() RETURNS void AS $$
|
||||
BEGIN
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(1, 10) a;
|
||||
TRUNCATE TABLE cstore_truncate_test_regular;
|
||||
END;$$
|
||||
LANGUAGE plpgsql;
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- the cached plans are used stating from the second call
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP FUNCTION cstore_truncate_test_regular_func();
|
||||
DROP TABLE cstore_truncate_test, cstore_truncate_test_second;
|
||||
DROP TABLE cstore_truncate_test_regular;
|
||||
DROP TABLE cstore_truncate_test_compressed;
|
||||
-- test truncate with schema
|
||||
CREATE SCHEMA truncate_schema;
|
||||
-- COMPRESSED
|
||||
CREATE TABLE truncate_schema.truncate_tbl (id int) USING cstore_tableam;
|
||||
set cstore.compression = 'pglz';
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
set cstore.compression to default;
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
set cstore.compression = 'pglz';
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
set cstore.compression to default;
|
||||
-- create a user that can not truncate
|
||||
CREATE USER truncate_user;
|
||||
GRANT USAGE ON SCHEMA truncate_schema TO truncate_user;
|
||||
GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user;
|
||||
SELECT current_user \gset
|
||||
\c - truncate_user
|
||||
-- verify truncate command fails and check number of rows
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
ERROR: permission denied for table truncate_tbl
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
-- switch to super user, grant truncate to truncate_user
|
||||
\c - :current_user
|
||||
GRANT TRUNCATE ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
-- verify truncate_user can truncate now
|
||||
\c - truncate_user
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
\c - :current_user
|
||||
-- cleanup
|
||||
DROP SCHEMA truncate_schema CASCADE;
|
||||
NOTICE: drop cascades to table truncate_schema.truncate_tbl
|
||||
DROP USER truncate_user;
|
|
@ -0,0 +1,262 @@
|
|||
--
|
||||
-- Test the TRUNCATE TABLE command for cstore_fdw tables.
|
||||
--
|
||||
-- print whether we're using version > 10 to make version-specific tests clear
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
|
||||
version_above_ten
|
||||
-------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- Check that files for the automatically managed table exist in the
|
||||
-- cstore_fdw/{databaseoid} directory.
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- CREATE a cstore_fdw table, fill with some data --
|
||||
CREATE FOREIGN TABLE cstore_truncate_test (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_second (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_compressed (a int, b int) SERVER cstore_server OPTIONS (compression 'pglz');
|
||||
CREATE TABLE cstore_truncate_test_regular (a int, b int);
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
-- query rows
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT COUNT(*) from cstore_truncate_test;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
20
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test_compressed;
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT cstore_table_size('cstore_truncate_test_compressed');
|
||||
cstore_table_size
|
||||
-------------------
|
||||
26
|
||||
(1 row)
|
||||
|
||||
-- make sure data files still present
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(10, 20) a;
|
||||
INSERT INTO cstore_truncate_test_second select a, a from generate_series(20, 30) a;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
----+----
|
||||
20 | 20
|
||||
21 | 21
|
||||
22 | 22
|
||||
23 | 23
|
||||
24 | 24
|
||||
25 | 25
|
||||
26 | 26
|
||||
27 | 27
|
||||
28 | 28
|
||||
29 | 29
|
||||
30 | 30
|
||||
(11 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
----+----
|
||||
10 | 10
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
14 | 14
|
||||
15 | 15
|
||||
16 | 16
|
||||
17 | 17
|
||||
18 | 18
|
||||
19 | 19
|
||||
20 | 20
|
||||
(11 rows)
|
||||
|
||||
-- make sure multi truncate works
|
||||
-- notice that the same table might be repeated
|
||||
TRUNCATE TABLE cstore_truncate_test,
|
||||
cstore_truncate_test_regular,
|
||||
cstore_truncate_test_second,
|
||||
cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- test if truncate on empty table works
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- test if a cached truncate from a pl/pgsql function works
|
||||
CREATE FUNCTION cstore_truncate_test_regular_func() RETURNS void AS $$
|
||||
BEGIN
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(1, 10) a;
|
||||
TRUNCATE TABLE cstore_truncate_test_regular;
|
||||
END;$$
|
||||
LANGUAGE plpgsql;
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- the cached plans are used stating from the second call
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP FUNCTION cstore_truncate_test_regular_func();
|
||||
DROP FOREIGN TABLE cstore_truncate_test, cstore_truncate_test_second;
|
||||
DROP TABLE cstore_truncate_test_regular;
|
||||
DROP FOREIGN TABLE cstore_truncate_test_compressed;
|
||||
-- test truncate with schema
|
||||
CREATE SCHEMA truncate_schema;
|
||||
CREATE FOREIGN TABLE truncate_schema.truncate_tbl (id int) SERVER cstore_server OPTIONS(compression 'pglz');
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
-- create a user that can not truncate
|
||||
CREATE USER truncate_user;
|
||||
GRANT USAGE ON SCHEMA truncate_schema TO truncate_user;
|
||||
GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user;
|
||||
SELECT current_user \gset
|
||||
\c - truncate_user
|
||||
-- verify truncate command fails and check number of rows
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
ERROR: permission denied for relation truncate_tbl
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
-- switch to super user, grant truncate to truncate_user
|
||||
\c - :current_user
|
||||
GRANT TRUNCATE ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
-- verify truncate_user can truncate now
|
||||
\c - truncate_user
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
\c - :current_user
|
||||
-- cleanup
|
||||
DROP SCHEMA truncate_schema CASCADE;
|
||||
NOTICE: drop cascades to foreign table truncate_schema.truncate_tbl
|
||||
DROP USER truncate_user;
|
||||
-- verify files are removed
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,234 @@
|
|||
SELECT count(*) AS columnar_table_count FROM cstore.cstore_data_files \gset
|
||||
CREATE TABLE t(a int, b int) USING cstore_tableam;
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t';
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t SELECT i, i * i FROM generate_series(1, 10) i;
|
||||
INSERT INTO t SELECT i, i * i FROM generate_series(11, 20) i;
|
||||
INSERT INTO t SELECT i, i * i FROM generate_series(21, 30) i;
|
||||
SELECT sum(a), sum(b) FROM t;
|
||||
sum | sum
|
||||
-----+------
|
||||
465 | 9455
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t';
|
||||
count
|
||||
-------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
-- vacuum full should merge stripes together
|
||||
VACUUM FULL t;
|
||||
SELECT sum(a), sum(b) FROM t;
|
||||
sum | sum
|
||||
-----+------
|
||||
465 | 9455
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t';
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- test the case when all data cannot fit into a single stripe
|
||||
SELECT alter_cstore_table_set('t', stripe_row_count => 1000);
|
||||
alter_cstore_table_set
|
||||
------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t SELECT i, 2 * i FROM generate_series(1,2500) i;
|
||||
SELECT sum(a), sum(b) FROM t;
|
||||
sum | sum
|
||||
---------+---------
|
||||
3126715 | 6261955
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t';
|
||||
count
|
||||
-------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
VACUUM FULL t;
|
||||
SELECT sum(a), sum(b) FROM t;
|
||||
sum | sum
|
||||
---------+---------
|
||||
3126715 | 6261955
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t';
|
||||
count
|
||||
-------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
-- VACUUM FULL doesn't reclaim dropped columns, but converts them to NULLs
|
||||
ALTER TABLE t DROP COLUMN a;
|
||||
SELECT stripe, attr, block, minimum_value IS NULL, maximum_value IS NULL FROM cstore.cstore_skipnodes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t' ORDER BY 1, 2, 3;
|
||||
stripe | attr | block | ?column? | ?column?
|
||||
--------+------+-------+----------+----------
|
||||
1 | 1 | 0 | f | f
|
||||
1 | 2 | 0 | f | f
|
||||
2 | 1 | 0 | f | f
|
||||
2 | 2 | 0 | f | f
|
||||
3 | 1 | 0 | f | f
|
||||
3 | 2 | 0 | f | f
|
||||
(6 rows)
|
||||
|
||||
VACUUM FULL t;
|
||||
SELECT stripe, attr, block, minimum_value IS NULL, maximum_value IS NULL FROM cstore.cstore_skipnodes a, pg_class b WHERE a.relfilenode=b.relfilenode AND b.relname='t' ORDER BY 1, 2, 3;
|
||||
stripe | attr | block | ?column? | ?column?
|
||||
--------+------+-------+----------+----------
|
||||
1 | 1 | 0 | t | t
|
||||
1 | 2 | 0 | f | f
|
||||
2 | 1 | 0 | t | t
|
||||
2 | 2 | 0 | f | f
|
||||
3 | 1 | 0 | t | t
|
||||
3 | 2 | 0 | f | f
|
||||
(6 rows)
|
||||
|
||||
-- Make sure we cleaned-up the transient table metadata after VACUUM FULL commands
|
||||
SELECT count(*) - :columnar_table_count FROM cstore.cstore_data_files;
|
||||
?column?
|
||||
----------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- do this in a transaction so concurrent autovacuum doesn't interfere with results
|
||||
BEGIN;
|
||||
SAVEPOINT s1;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
2530
|
||||
(1 row)
|
||||
|
||||
SELECT pg_size_pretty(pg_relation_size('t'));
|
||||
pg_size_pretty
|
||||
----------------
|
||||
32 kB
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t SELECT i FROM generate_series(1, 10000) i;
|
||||
SELECT pg_size_pretty(pg_relation_size('t'));
|
||||
pg_size_pretty
|
||||
----------------
|
||||
112 kB
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
12530
|
||||
(1 row)
|
||||
|
||||
ROLLBACK TO SAVEPOINT s1;
|
||||
-- not truncated by VACUUM or autovacuum yet (being in transaction ensures this),
|
||||
-- so relation size should be same as before.
|
||||
SELECT pg_size_pretty(pg_relation_size('t'));
|
||||
pg_size_pretty
|
||||
----------------
|
||||
112 kB
|
||||
(1 row)
|
||||
|
||||
COMMIT;
|
||||
-- vacuum should truncate the relation to the usable space
|
||||
VACUUM VERBOSE t;
|
||||
INFO: statistics for "t":
|
||||
total file size: 114688, total data size: 10754
|
||||
total row count: 2530, stripe count: 3, average rows per stripe: 843
|
||||
block count: 3, containing data for dropped columns: 0, none compressed: 3, pglz compressed: 0
|
||||
|
||||
INFO: "t": truncated 14 to 4 pages
|
||||
DETAIL: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s
|
||||
SELECT pg_size_pretty(pg_relation_size('t'));
|
||||
pg_size_pretty
|
||||
----------------
|
||||
32 kB
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
2530
|
||||
(1 row)
|
||||
|
||||
-- add some stripes with different compression types and create some gaps,
|
||||
-- then vacuum to print stats
|
||||
BEGIN;
|
||||
SELECT alter_cstore_table_set('t',
|
||||
block_row_count => 1000,
|
||||
stripe_row_count => 2000,
|
||||
compression => 'pglz');
|
||||
alter_cstore_table_set
|
||||
------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SAVEPOINT s1;
|
||||
INSERT INTO t SELECT i FROM generate_series(1, 1500) i;
|
||||
ROLLBACK TO SAVEPOINT s1;
|
||||
INSERT INTO t SELECT i / 5 FROM generate_series(1, 1500) i;
|
||||
SELECT alter_cstore_table_set('t', compression => 'none');
|
||||
alter_cstore_table_set
|
||||
------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SAVEPOINT s2;
|
||||
INSERT INTO t SELECT i FROM generate_series(1, 1500) i;
|
||||
ROLLBACK TO SAVEPOINT s2;
|
||||
INSERT INTO t SELECT i / 5 FROM generate_series(1, 1500) i;
|
||||
COMMIT;
|
||||
VACUUM VERBOSE t;
|
||||
INFO: statistics for "t":
|
||||
total file size: 49152, total data size: 18808
|
||||
total row count: 5530, stripe count: 5, average rows per stripe: 1106
|
||||
block count: 7, containing data for dropped columns: 0, none compressed: 5, pglz compressed: 2
|
||||
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
5530
|
||||
(1 row)
|
||||
|
||||
-- check that we report blocks with data for dropped columns
|
||||
ALTER TABLE t ADD COLUMN c int;
|
||||
INSERT INTO t SELECT 1, i / 5 FROM generate_series(1, 1500) i;
|
||||
ALTER TABLE t DROP COLUMN c;
|
||||
VACUUM VERBOSE t;
|
||||
INFO: statistics for "t":
|
||||
total file size: 65536, total data size: 31372
|
||||
total row count: 7030, stripe count: 6, average rows per stripe: 1171
|
||||
block count: 11, containing data for dropped columns: 2, none compressed: 9, pglz compressed: 2
|
||||
|
||||
-- vacuum full should remove blocks for dropped columns
|
||||
-- note that, a block will be stored in non-compressed for if compression
|
||||
-- doesn't reduce its size.
|
||||
SELECT alter_cstore_table_set('t', compression => 'pglz');
|
||||
alter_cstore_table_set
|
||||
------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
VACUUM FULL t;
|
||||
VACUUM VERBOSE t;
|
||||
INFO: statistics for "t":
|
||||
total file size: 49152, total data size: 15728
|
||||
total row count: 7030, stripe count: 4, average rows per stripe: 1757
|
||||
block count: 8, containing data for dropped columns: 0, none compressed: 2, pglz compressed: 6
|
||||
|
||||
DROP TABLE t;
|
||||
-- Make sure we cleaned the metadata for t too
|
||||
SELECT count(*) - :columnar_table_count FROM cstore.cstore_data_files;
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-insert s1-begin s1-insert s2-vacuum s1-commit s2-select
|
||||
step s1-insert:
|
||||
INSERT INTO test_vacuum_vs_insert SELECT i, 2 * i FROM generate_series(1, 3) i;
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-insert:
|
||||
INSERT INTO test_vacuum_vs_insert SELECT i, 2 * i FROM generate_series(1, 3) i;
|
||||
|
||||
s2: INFO: statistics for "test_vacuum_vs_insert":
|
||||
total file size: 24576, total data size: 26
|
||||
total row count: 3, stripe count: 1, average rows per stripe: 3
|
||||
block count: 2, containing data for dropped columns: 0, none compressed: 2, pglz compressed: 0
|
||||
|
||||
s2: INFO: "test_vacuum_vs_insert": stopping truncate due to conflicting lock request
|
||||
step s2-vacuum:
|
||||
VACUUM VERBOSE test_vacuum_vs_insert;
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-select:
|
||||
SELECT * FROM test_vacuum_vs_insert;
|
||||
|
||||
a b
|
||||
|
||||
1 2
|
||||
2 4
|
||||
3 6
|
||||
1 2
|
||||
2 4
|
||||
3 6
|
||||
|
||||
starting permutation: s1-insert s1-begin s1-insert s2-vacuum-full s1-commit s2-select
|
||||
step s1-insert:
|
||||
INSERT INTO test_vacuum_vs_insert SELECT i, 2 * i FROM generate_series(1, 3) i;
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-insert:
|
||||
INSERT INTO test_vacuum_vs_insert SELECT i, 2 * i FROM generate_series(1, 3) i;
|
||||
|
||||
step s2-vacuum-full:
|
||||
VACUUM FULL VERBOSE test_vacuum_vs_insert;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
s2: INFO: vacuuming "public.test_vacuum_vs_insert"
|
||||
s2: INFO: "test_vacuum_vs_insert": found 0 removable, 6 nonremovable row versions in 3 pages
|
||||
DETAIL: 0 dead row versions cannot be removed yet.
|
||||
CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s.
|
||||
step s2-vacuum-full: <... completed>
|
||||
step s2-select:
|
||||
SELECT * FROM test_vacuum_vs_insert;
|
||||
|
||||
a b
|
||||
|
||||
1 2
|
||||
2 4
|
||||
3 6
|
||||
1 2
|
||||
2 4
|
||||
3 6
|
|
@ -0,0 +1,142 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-insert s2-insert s1-select s2-select s1-commit s2-commit s1-select
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-insert:
|
||||
INSERT INTO test_insert_concurrency SELECT i, 2 * i FROM generate_series(1, 3) i;
|
||||
|
||||
step s2-insert:
|
||||
INSERT INTO test_insert_concurrency SELECT i, 2 * i FROM generate_series(4, 6) i;
|
||||
|
||||
step s1-select:
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
|
||||
a b
|
||||
|
||||
1 2
|
||||
2 4
|
||||
3 6
|
||||
step s2-select:
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
|
||||
a b
|
||||
|
||||
4 8
|
||||
5 10
|
||||
6 12
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-select:
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
|
||||
a b
|
||||
|
||||
1 2
|
||||
2 4
|
||||
3 6
|
||||
4 8
|
||||
5 10
|
||||
6 12
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-copy s2-insert s1-select s2-select s1-commit s2-commit s1-select
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-copy:
|
||||
COPY test_insert_concurrency(a) FROM PROGRAM 'seq 11 13';
|
||||
|
||||
step s2-insert:
|
||||
INSERT INTO test_insert_concurrency SELECT i, 2 * i FROM generate_series(4, 6) i;
|
||||
|
||||
step s1-select:
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
|
||||
a b
|
||||
|
||||
11
|
||||
12
|
||||
13
|
||||
step s2-select:
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
|
||||
a b
|
||||
|
||||
4 8
|
||||
5 10
|
||||
6 12
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-select:
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
|
||||
a b
|
||||
|
||||
4 8
|
||||
5 10
|
||||
6 12
|
||||
11
|
||||
12
|
||||
13
|
||||
|
||||
starting permutation: s1-begin s2-begin s2-insert s1-copy s1-select s2-select s1-commit s2-commit s1-select
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-insert:
|
||||
INSERT INTO test_insert_concurrency SELECT i, 2 * i FROM generate_series(4, 6) i;
|
||||
|
||||
step s1-copy:
|
||||
COPY test_insert_concurrency(a) FROM PROGRAM 'seq 11 13';
|
||||
|
||||
step s1-select:
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
|
||||
a b
|
||||
|
||||
11
|
||||
12
|
||||
13
|
||||
step s2-select:
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
|
||||
a b
|
||||
|
||||
4 8
|
||||
5 10
|
||||
6 12
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-select:
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
|
||||
a b
|
||||
|
||||
4 8
|
||||
5 10
|
||||
6 12
|
||||
11
|
||||
12
|
||||
13
|
|
@ -0,0 +1,6 @@
|
|||
Parsed test spec with 1 sessions
|
||||
|
||||
starting permutation: s1a
|
||||
step s1a:
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
-- Install cstore_fdw
|
||||
CREATE EXTENSION cstore_fdw;
|
|
@ -0,0 +1,178 @@
|
|||
--
|
||||
-- Testing ALTER TABLE on cstore_fdw tables.
|
||||
--
|
||||
CREATE FOREIGN TABLE test_alter_table (a int, b int, c int) SERVER cstore_server;
|
||||
WITH sample_data AS (VALUES
|
||||
(1, 2, 3),
|
||||
(4, 5, 6),
|
||||
(7, 8, 9)
|
||||
)
|
||||
INSERT INTO test_alter_table SELECT * FROM sample_data;
|
||||
-- drop a column
|
||||
ALTER FOREIGN TABLE test_alter_table DROP COLUMN a;
|
||||
-- test analyze
|
||||
ANALYZE test_alter_table;
|
||||
-- verify select queries run as expected
|
||||
SELECT * FROM test_alter_table;
|
||||
b | c
|
||||
---+---
|
||||
2 | 3
|
||||
5 | 6
|
||||
8 | 9
|
||||
(3 rows)
|
||||
|
||||
SELECT a FROM test_alter_table;
|
||||
ERROR: column "a" does not exist
|
||||
LINE 1: SELECT a FROM test_alter_table;
|
||||
^
|
||||
SELECT b FROM test_alter_table;
|
||||
b
|
||||
---
|
||||
2
|
||||
5
|
||||
8
|
||||
(3 rows)
|
||||
|
||||
-- verify insert runs as expected
|
||||
INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
ERROR: INSERT has more expressions than target columns
|
||||
LINE 1: INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
^
|
||||
INSERT INTO test_alter_table (SELECT 5, 8);
|
||||
-- add a column with no defaults
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN d int;
|
||||
SELECT * FROM test_alter_table;
|
||||
b | c | d
|
||||
---+---+---
|
||||
2 | 3 |
|
||||
5 | 6 |
|
||||
8 | 9 |
|
||||
5 | 8 |
|
||||
(4 rows)
|
||||
|
||||
INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
SELECT * FROM test_alter_table;
|
||||
b | c | d
|
||||
---+---+---
|
||||
2 | 3 |
|
||||
5 | 6 |
|
||||
8 | 9 |
|
||||
5 | 8 |
|
||||
3 | 5 | 8
|
||||
(5 rows)
|
||||
|
||||
-- add a fixed-length column with default value
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN e int default 3;
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e
|
||||
---+---+---+---
|
||||
2 | 3 | | 3
|
||||
5 | 6 | | 3
|
||||
8 | 9 | | 3
|
||||
5 | 8 | | 3
|
||||
3 | 5 | 8 | 3
|
||||
(5 rows)
|
||||
|
||||
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8);
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e
|
||||
---+---+---+---
|
||||
2 | 3 | | 3
|
||||
5 | 6 | | 3
|
||||
8 | 9 | | 3
|
||||
5 | 8 | | 3
|
||||
3 | 5 | 8 | 3
|
||||
1 | 2 | 4 | 8
|
||||
(6 rows)
|
||||
|
||||
-- add a variable-length column with default value
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN f text DEFAULT 'TEXT ME';
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e | f
|
||||
---+---+---+---+---------
|
||||
2 | 3 | | 3 | TEXT ME
|
||||
5 | 6 | | 3 | TEXT ME
|
||||
8 | 9 | | 3 | TEXT ME
|
||||
5 | 8 | | 3 | TEXT ME
|
||||
3 | 5 | 8 | 3 | TEXT ME
|
||||
1 | 2 | 4 | 8 | TEXT ME
|
||||
(6 rows)
|
||||
|
||||
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8, 'ABCDEF');
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e | f
|
||||
---+---+---+---+---------
|
||||
2 | 3 | | 3 | TEXT ME
|
||||
5 | 6 | | 3 | TEXT ME
|
||||
8 | 9 | | 3 | TEXT ME
|
||||
5 | 8 | | 3 | TEXT ME
|
||||
3 | 5 | 8 | 3 | TEXT ME
|
||||
1 | 2 | 4 | 8 | TEXT ME
|
||||
1 | 2 | 4 | 8 | ABCDEF
|
||||
(7 rows)
|
||||
|
||||
-- drop couple of columns
|
||||
ALTER FOREIGN TABLE test_alter_table DROP COLUMN c;
|
||||
ALTER FOREIGN TABLE test_alter_table DROP COLUMN e;
|
||||
ANALYZE test_alter_table;
|
||||
SELECT * from test_alter_table;
|
||||
b | d | f
|
||||
---+---+---------
|
||||
2 | | TEXT ME
|
||||
5 | | TEXT ME
|
||||
8 | | TEXT ME
|
||||
5 | | TEXT ME
|
||||
3 | 8 | TEXT ME
|
||||
1 | 4 | TEXT ME
|
||||
1 | 4 | ABCDEF
|
||||
(7 rows)
|
||||
|
||||
SELECT count(*) from test_alter_table;
|
||||
count
|
||||
-------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
SELECT count(t.*) from test_alter_table t;
|
||||
count
|
||||
-------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
-- unsupported default values
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN g boolean DEFAULT isfinite(current_date);
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN h DATE DEFAULT current_date;
|
||||
SELECT * FROM test_alter_table;
|
||||
ERROR: unsupported default value for column "g"
|
||||
HINT: Expression is either mutable or does not evaluate to constant value
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN g DROP DEFAULT;
|
||||
SELECT * FROM test_alter_table;
|
||||
ERROR: unsupported default value for column "h"
|
||||
HINT: Expression is either mutable or does not evaluate to constant value
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN h DROP DEFAULT;
|
||||
ANALYZE test_alter_table;
|
||||
SELECT * FROM test_alter_table;
|
||||
b | d | f | g | h
|
||||
---+---+---------+---+---
|
||||
2 | | TEXT ME | |
|
||||
5 | | TEXT ME | |
|
||||
8 | | TEXT ME | |
|
||||
5 | | TEXT ME | |
|
||||
3 | 8 | TEXT ME | |
|
||||
1 | 4 | TEXT ME | |
|
||||
1 | 4 | ABCDEF | |
|
||||
(7 rows)
|
||||
|
||||
-- unsupported type change
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN i int;
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN j float;
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN k text;
|
||||
-- this is valid type change
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN i TYPE float;
|
||||
-- this is not valid
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN j TYPE int;
|
||||
ERROR: Column j cannot be cast automatically to type pg_catalog.int4
|
||||
-- text / varchar conversion is valid both ways
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN k TYPE varchar(20);
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN k TYPE text;
|
||||
DROP FOREIGN TABLE test_alter_table;
|
|
@ -0,0 +1,19 @@
|
|||
--
|
||||
-- Test the ANALYZE command for cstore_fdw tables.
|
||||
--
|
||||
-- ANALYZE uncompressed table
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM pg_stats WHERE tablename='contestant';
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
-- ANALYZE compressed table
|
||||
ANALYZE contestant_compressed;
|
||||
SELECT count(*) FROM pg_stats WHERE tablename='contestant_compressed';
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
DROP FOREIGN TABLE collation_block_filtering_test;
|
||||
DROP FOREIGN TABLE test_block_filtering;
|
||||
DROP FOREIGN TABLE test_null_values;
|
||||
DROP FOREIGN TABLE test_other_types;
|
||||
DROP FOREIGN TABLE test_range_types;
|
||||
DROP FOREIGN TABLE test_enum_and_composite_types;
|
||||
DROP TYPE composite_type;
|
||||
DROP TYPE enum_type;
|
||||
DROP FOREIGN TABLE test_datetime_types;
|
||||
DROP FOREIGN TABLE test_array_types;
|
|
@ -0,0 +1,55 @@
|
|||
--
|
||||
-- Tests the different DROP commands for cstore_fdw tables.
|
||||
--
|
||||
-- DROP FOREIGN TABL
|
||||
-- DROP SCHEMA
|
||||
-- DROP EXTENSION
|
||||
-- DROP DATABASE
|
||||
--
|
||||
-- Note that travis does not create
|
||||
-- cstore_fdw extension in default database (postgres). This has caused
|
||||
-- different behavior between travis tests and local tests. Thus
|
||||
-- 'postgres' directory is excluded from comparison to have the same result.
|
||||
-- store postgres database oid
|
||||
SELECT oid postgres_oid FROM pg_database WHERE datname = 'postgres' \gset
|
||||
SELECT count(*) AS cstore_data_files_before_drop FROM cstore.cstore_data_files \gset
|
||||
-- DROP cstore_fdw tables
|
||||
DROP FOREIGN TABLE contestant;
|
||||
DROP FOREIGN TABLE contestant_compressed;
|
||||
-- make sure DROP deletes metadata
|
||||
SELECT :cstore_data_files_before_drop - count(*) FROM cstore.cstore_data_files;
|
||||
?column?
|
||||
----------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- Create a cstore_fdw table under a schema and drop it.
|
||||
CREATE SCHEMA test_schema;
|
||||
CREATE FOREIGN TABLE test_schema.test_table(data int) SERVER cstore_server;
|
||||
SELECT count(*) AS cstore_data_files_before_drop FROM cstore.cstore_data_files \gset
|
||||
DROP SCHEMA test_schema CASCADE;
|
||||
NOTICE: drop cascades to foreign table test_schema.test_table
|
||||
SELECT :cstore_data_files_before_drop - count(*) FROM cstore.cstore_data_files;
|
||||
?column?
|
||||
----------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT current_database() datname \gset
|
||||
CREATE DATABASE db_to_drop;
|
||||
\c db_to_drop
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
|
||||
CREATE FOREIGN TABLE test_table(data int) SERVER cstore_server;
|
||||
DROP EXTENSION cstore_fdw CASCADE;
|
||||
NOTICE: drop cascades to 2 other objects
|
||||
DETAIL: drop cascades to server cstore_server
|
||||
drop cascades to foreign table test_table
|
||||
-- test database drop
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
|
||||
CREATE FOREIGN TABLE test_table(data int) SERVER cstore_server;
|
||||
\c :datname
|
||||
DROP DATABASE db_to_drop;
|
|
@ -0,0 +1,18 @@
|
|||
--
|
||||
-- Test utility functions for cstore_fdw tables.
|
||||
--
|
||||
CREATE FOREIGN TABLE empty_table (a int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE table_with_data (a int) SERVER cstore_server;
|
||||
CREATE TABLE non_cstore_table (a int);
|
||||
COPY table_with_data FROM STDIN;
|
||||
SELECT cstore_table_size('empty_table') < cstore_table_size('table_with_data');
|
||||
?column?
|
||||
----------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT cstore_table_size('non_cstore_table');
|
||||
ERROR: relation is not a cstore table
|
||||
DROP FOREIGN TABLE empty_table;
|
||||
DROP FOREIGN TABLE table_with_data;
|
||||
DROP TABLE non_cstore_table;
|
|
@ -0,0 +1,88 @@
|
|||
--
|
||||
-- Testing insert on cstore_fdw tables.
|
||||
--
|
||||
CREATE FOREIGN TABLE test_insert_command (a int) SERVER cstore_server;
|
||||
-- test single row inserts fail
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command values(1);
|
||||
ERROR: operation is not supported
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command default values;
|
||||
ERROR: operation is not supported
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- test inserting from another table succeed
|
||||
CREATE TABLE test_insert_command_data (a int);
|
||||
select count(*) from test_insert_command_data;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command_data values(1);
|
||||
select count(*) from test_insert_command_data;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command select * from test_insert_command_data;
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
drop table test_insert_command_data;
|
||||
drop foreign table test_insert_command;
|
||||
-- test long attribute value insertion
|
||||
-- create sufficiently long text so that data is stored in toast
|
||||
CREATE TABLE test_long_text AS
|
||||
SELECT a as int_val, string_agg(random()::text, '') as text_val
|
||||
FROM generate_series(1, 10) a, generate_series(1, 1000) b
|
||||
GROUP BY a ORDER BY a;
|
||||
-- store hash values of text for later comparison
|
||||
CREATE TABLE test_long_text_hash AS
|
||||
SELECT int_val, md5(text_val) AS hash
|
||||
FROM test_long_text;
|
||||
CREATE FOREIGN TABLE test_cstore_long_text(int_val int, text_val text)
|
||||
SERVER cstore_server;
|
||||
-- store long text in cstore table
|
||||
INSERT INTO test_cstore_long_text SELECT * FROM test_long_text;
|
||||
-- drop source table to remove original text from toast
|
||||
DROP TABLE test_long_text;
|
||||
-- check if text data is still available in cstore table
|
||||
-- by comparing previously stored hash.
|
||||
SELECT a.int_val
|
||||
FROM test_long_text_hash a, test_cstore_long_text c
|
||||
WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val);
|
||||
int_val
|
||||
---------
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
10
|
||||
(10 rows)
|
||||
|
||||
DROP TABLE test_long_text_hash;
|
||||
DROP FOREIGN TABLE test_cstore_long_text;
|
|
@ -0,0 +1,105 @@
|
|||
--
|
||||
-- Test querying cstore_fdw tables.
|
||||
--
|
||||
-- Settings to make the result deterministic
|
||||
SET datestyle = "ISO, YMD";
|
||||
-- Query uncompressed data
|
||||
SELECT count(*) FROM contestant;
|
||||
count
|
||||
-------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
SELECT avg(rating), stddev_samp(rating) FROM contestant;
|
||||
avg | stddev_samp
|
||||
-----------------------+------------------
|
||||
2344.3750000000000000 | 433.746119785032
|
||||
(1 row)
|
||||
|
||||
SELECT country, avg(rating) FROM contestant WHERE rating > 2200
|
||||
GROUP BY country ORDER BY country;
|
||||
country | avg
|
||||
---------+-----------------------
|
||||
XA | 2203.0000000000000000
|
||||
XB | 2610.5000000000000000
|
||||
XC | 2236.0000000000000000
|
||||
XD | 3090.0000000000000000
|
||||
(4 rows)
|
||||
|
||||
SELECT * FROM contestant ORDER BY handle;
|
||||
handle | birthdate | rating | percentile | country | achievements
|
||||
--------+------------+--------+------------+---------+--------------
|
||||
a | 1990-01-10 | 2090 | 97.1 | XA | {a}
|
||||
b | 1990-11-01 | 2203 | 98.1 | XA | {a,b}
|
||||
c | 1988-11-01 | 2907 | 99.4 | XB | {w,y}
|
||||
d | 1985-05-05 | 2314 | 98.3 | XB | {}
|
||||
e | 1995-05-05 | 2236 | 98.2 | XC | {a}
|
||||
f | 1983-04-02 | 3090 | 99.6 | XD | {a,b,c,y}
|
||||
g | 1991-12-13 | 1803 | 85.1 | XD | {a,c}
|
||||
h | 1987-10-26 | 2112 | 95.4 | XD | {w,a}
|
||||
(8 rows)
|
||||
|
||||
-- Query compressed data
|
||||
SELECT count(*) FROM contestant_compressed;
|
||||
count
|
||||
-------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
SELECT avg(rating), stddev_samp(rating) FROM contestant_compressed;
|
||||
avg | stddev_samp
|
||||
-----------------------+------------------
|
||||
2344.3750000000000000 | 433.746119785032
|
||||
(1 row)
|
||||
|
||||
SELECT country, avg(rating) FROM contestant_compressed WHERE rating > 2200
|
||||
GROUP BY country ORDER BY country;
|
||||
country | avg
|
||||
---------+-----------------------
|
||||
XA | 2203.0000000000000000
|
||||
XB | 2610.5000000000000000
|
||||
XC | 2236.0000000000000000
|
||||
XD | 3090.0000000000000000
|
||||
(4 rows)
|
||||
|
||||
SELECT * FROM contestant_compressed ORDER BY handle;
|
||||
handle | birthdate | rating | percentile | country | achievements
|
||||
--------+------------+--------+------------+---------+--------------
|
||||
a | 1990-01-10 | 2090 | 97.1 | XA | {a}
|
||||
b | 1990-11-01 | 2203 | 98.1 | XA | {a,b}
|
||||
c | 1988-11-01 | 2907 | 99.4 | XB | {w,y}
|
||||
d | 1985-05-05 | 2314 | 98.3 | XB | {}
|
||||
e | 1995-05-05 | 2236 | 98.2 | XC | {a}
|
||||
f | 1983-04-02 | 3090 | 99.6 | XD | {a,b,c,y}
|
||||
g | 1991-12-13 | 1803 | 85.1 | XD | {a,c}
|
||||
h | 1987-10-26 | 2112 | 95.4 | XD | {w,a}
|
||||
(8 rows)
|
||||
|
||||
-- Verify that we handle whole-row references correctly
|
||||
SELECT to_json(v) FROM contestant v ORDER BY rating LIMIT 1;
|
||||
to_json
|
||||
------------------------------------------------------------------------------------------------------------------
|
||||
{"handle":"g","birthdate":"1991-12-13","rating":1803,"percentile":85.1,"country":"XD ","achievements":["a","c"]}
|
||||
(1 row)
|
||||
|
||||
-- Test variables used in expressions
|
||||
CREATE FOREIGN TABLE union_first (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE union_second (a int, b int) SERVER cstore_server;
|
||||
INSERT INTO union_first SELECT a, a FROM generate_series(1, 5) a;
|
||||
INSERT INTO union_second SELECT a, a FROM generate_series(11, 15) a;
|
||||
(SELECT a*1, b FROM union_first) union all (SELECT a*1, b FROM union_second);
|
||||
?column? | b
|
||||
----------+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
14 | 14
|
||||
15 | 15
|
||||
(10 rows)
|
||||
|
||||
DROP FOREIGN TABLE union_first, union_second;
|
|
@ -0,0 +1,77 @@
|
|||
--
|
||||
-- Testing we handle rollbacks properly
|
||||
--
|
||||
CREATE FOREIGN TABLE t(a int, b int) SERVER cstore_server;
|
||||
BEGIN;
|
||||
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
|
||||
ROLLBACK;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- check stripe metadata also have been rolled-back
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b
|
||||
WHERE a.relfilenode = b.relfilenode AND b.relname = 't';
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b
|
||||
WHERE a.relfilenode = b.relfilenode AND b.relname = 't';
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- savepoint rollback
|
||||
BEGIN;
|
||||
SAVEPOINT s0;
|
||||
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
|
||||
SAVEPOINT s1;
|
||||
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
30
|
||||
(1 row)
|
||||
|
||||
ROLLBACK TO SAVEPOINT s1;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
20
|
||||
(1 row)
|
||||
|
||||
ROLLBACK TO SAVEPOINT s0;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i;
|
||||
COMMIT;
|
||||
SELECT count(*) FROM t;
|
||||
count
|
||||
-------
|
||||
20
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore.cstore_stripes a, pg_class b
|
||||
WHERE a.relfilenode = b.relfilenode AND b.relname = 't';
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
DROP FOREIGN TABLE t;
|
|
@ -0,0 +1,263 @@
|
|||
--
|
||||
-- Test the TRUNCATE TABLE command for cstore_fdw tables.
|
||||
--
|
||||
-- print whether we're using version > 10 to make version-specific tests clear
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
|
||||
version_above_ten
|
||||
-------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- CREATE a cstore_fdw table, fill with some data --
|
||||
CREATE FOREIGN TABLE cstore_truncate_test (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_second (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_compressed (a int, b int) SERVER cstore_server OPTIONS (compression 'pglz');
|
||||
CREATE TABLE cstore_truncate_test_regular (a int, b int);
|
||||
SELECT count(*) AS cstore_data_files_before_truncate FROM cstore.cstore_data_files \gset
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
-- query rows
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT COUNT(*) from cstore_truncate_test;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
20
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test_compressed;
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT cstore_table_size('cstore_truncate_test_compressed');
|
||||
cstore_table_size
|
||||
-------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(10, 20) a;
|
||||
INSERT INTO cstore_truncate_test_second select a, a from generate_series(20, 30) a;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
----+----
|
||||
20 | 20
|
||||
21 | 21
|
||||
22 | 22
|
||||
23 | 23
|
||||
24 | 24
|
||||
25 | 25
|
||||
26 | 26
|
||||
27 | 27
|
||||
28 | 28
|
||||
29 | 29
|
||||
30 | 30
|
||||
(11 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
----+----
|
||||
10 | 10
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
14 | 14
|
||||
15 | 15
|
||||
16 | 16
|
||||
17 | 17
|
||||
18 | 18
|
||||
19 | 19
|
||||
20 | 20
|
||||
(11 rows)
|
||||
|
||||
-- make sure multi truncate works
|
||||
-- notice that the same table might be repeated
|
||||
TRUNCATE TABLE cstore_truncate_test,
|
||||
cstore_truncate_test_regular,
|
||||
cstore_truncate_test_second,
|
||||
cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- test if truncate on empty table works
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- make sure TRUNATE deletes metadata for old relfilenode
|
||||
SELECT :cstore_data_files_before_truncate - count(*) FROM cstore.cstore_data_files;
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- test if truncation in the same transaction that created the table works properly
|
||||
BEGIN;
|
||||
CREATE FOREIGN TABLE cstore_same_transaction_truncate(a int) SERVER cstore_server;
|
||||
INSERT INTO cstore_same_transaction_truncate SELECT * FROM generate_series(1, 100);
|
||||
TRUNCATE cstore_same_transaction_truncate;
|
||||
INSERT INTO cstore_same_transaction_truncate SELECT * FROM generate_series(20, 23);
|
||||
COMMIT;
|
||||
-- should output "1" for the newly created relation
|
||||
SELECT count(*) - :cstore_data_files_before_truncate FROM cstore.cstore_data_files;
|
||||
?column?
|
||||
----------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM cstore_same_transaction_truncate;
|
||||
a
|
||||
----
|
||||
20
|
||||
21
|
||||
22
|
||||
23
|
||||
(4 rows)
|
||||
|
||||
DROP FOREIGN TABLE cstore_same_transaction_truncate;
|
||||
-- test if a cached truncate from a pl/pgsql function works
|
||||
CREATE FUNCTION cstore_truncate_test_regular_func() RETURNS void AS $$
|
||||
BEGIN
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(1, 10) a;
|
||||
TRUNCATE TABLE cstore_truncate_test_regular;
|
||||
END;$$
|
||||
LANGUAGE plpgsql;
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- the cached plans are used stating from the second call
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP FUNCTION cstore_truncate_test_regular_func();
|
||||
DROP FOREIGN TABLE cstore_truncate_test, cstore_truncate_test_second;
|
||||
DROP TABLE cstore_truncate_test_regular;
|
||||
DROP FOREIGN TABLE cstore_truncate_test_compressed;
|
||||
-- test truncate with schema
|
||||
CREATE SCHEMA truncate_schema;
|
||||
CREATE FOREIGN TABLE truncate_schema.truncate_tbl (id int) SERVER cstore_server OPTIONS(compression 'pglz');
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
-- create a user that can not truncate
|
||||
CREATE USER truncate_user;
|
||||
GRANT USAGE ON SCHEMA truncate_schema TO truncate_user;
|
||||
GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user;
|
||||
SELECT current_user \gset
|
||||
\c - truncate_user
|
||||
-- verify truncate command fails and check number of rows
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
ERROR: permission denied for table truncate_tbl
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
-- switch to super user, grant truncate to truncate_user
|
||||
\c - :current_user
|
||||
GRANT TRUNCATE ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
-- verify truncate_user can truncate now
|
||||
\c - truncate_user
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
\c - :current_user
|
||||
-- cleanup
|
||||
DROP SCHEMA truncate_schema CASCADE;
|
||||
NOTICE: drop cascades to foreign table truncate_schema.truncate_tbl
|
||||
DROP USER truncate_user;
|
|
@ -0,0 +1,262 @@
|
|||
--
|
||||
-- Test the TRUNCATE TABLE command for cstore_fdw tables.
|
||||
--
|
||||
-- print whether we're using version > 10 to make version-specific tests clear
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
|
||||
version_above_ten
|
||||
-------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- Check that files for the automatically managed table exist in the
|
||||
-- cstore_fdw/{databaseoid} directory.
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- CREATE a cstore_fdw table, fill with some data --
|
||||
CREATE FOREIGN TABLE cstore_truncate_test (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_second (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_compressed (a int, b int) SERVER cstore_server OPTIONS (compression 'pglz');
|
||||
CREATE TABLE cstore_truncate_test_regular (a int, b int);
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
-- query rows
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT COUNT(*) from cstore_truncate_test;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
20
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test_compressed;
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT cstore_table_size('cstore_truncate_test_compressed');
|
||||
cstore_table_size
|
||||
-------------------
|
||||
26
|
||||
(1 row)
|
||||
|
||||
-- make sure data files still present
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(10, 20) a;
|
||||
INSERT INTO cstore_truncate_test_second select a, a from generate_series(20, 30) a;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
----+----
|
||||
20 | 20
|
||||
21 | 21
|
||||
22 | 22
|
||||
23 | 23
|
||||
24 | 24
|
||||
25 | 25
|
||||
26 | 26
|
||||
27 | 27
|
||||
28 | 28
|
||||
29 | 29
|
||||
30 | 30
|
||||
(11 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
----+----
|
||||
10 | 10
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
14 | 14
|
||||
15 | 15
|
||||
16 | 16
|
||||
17 | 17
|
||||
18 | 18
|
||||
19 | 19
|
||||
20 | 20
|
||||
(11 rows)
|
||||
|
||||
-- make sure multi truncate works
|
||||
-- notice that the same table might be repeated
|
||||
TRUNCATE TABLE cstore_truncate_test,
|
||||
cstore_truncate_test_regular,
|
||||
cstore_truncate_test_second,
|
||||
cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- test if truncate on empty table works
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- test if a cached truncate from a pl/pgsql function works
|
||||
CREATE FUNCTION cstore_truncate_test_regular_func() RETURNS void AS $$
|
||||
BEGIN
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(1, 10) a;
|
||||
TRUNCATE TABLE cstore_truncate_test_regular;
|
||||
END;$$
|
||||
LANGUAGE plpgsql;
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- the cached plans are used stating from the second call
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP FUNCTION cstore_truncate_test_regular_func();
|
||||
DROP FOREIGN TABLE cstore_truncate_test, cstore_truncate_test_second;
|
||||
DROP TABLE cstore_truncate_test_regular;
|
||||
DROP FOREIGN TABLE cstore_truncate_test_compressed;
|
||||
-- test truncate with schema
|
||||
CREATE SCHEMA truncate_schema;
|
||||
CREATE FOREIGN TABLE truncate_schema.truncate_tbl (id int) SERVER cstore_server OPTIONS(compression 'pglz');
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
-- create a user that can not truncate
|
||||
CREATE USER truncate_user;
|
||||
GRANT USAGE ON SCHEMA truncate_schema TO truncate_user;
|
||||
GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user;
|
||||
SELECT current_user \gset
|
||||
\c - truncate_user
|
||||
-- verify truncate command fails and check number of rows
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
ERROR: permission denied for relation truncate_tbl
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
-- switch to super user, grant truncate to truncate_user
|
||||
\c - :current_user
|
||||
GRANT TRUNCATE ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
-- verify truncate_user can truncate now
|
||||
\c - truncate_user
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
\c - :current_user
|
||||
-- cleanup
|
||||
DROP SCHEMA truncate_schema CASCADE;
|
||||
NOTICE: drop cascades to foreign table truncate_schema.truncate_tbl
|
||||
DROP USER truncate_user;
|
||||
-- verify files are removed
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
--
|
||||
-- Test block filtering in cstore_fdw using min/max values in stripe skip lists.
|
||||
--
|
||||
|
||||
|
||||
--
|
||||
-- filtered_row_count returns number of rows filtered by the WHERE clause.
|
||||
-- If blocks get filtered by cstore_fdw, less rows are passed to WHERE
|
||||
-- clause, so this function should return a lower number.
|
||||
--
|
||||
CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS
|
||||
$$
|
||||
DECLARE
|
||||
result bigint;
|
||||
rec text;
|
||||
BEGIN
|
||||
result := 0;
|
||||
|
||||
FOR rec IN EXECUTE 'EXPLAIN ANALYZE ' || query LOOP
|
||||
IF rec ~ '^\s+Rows Removed by Filter' then
|
||||
result := regexp_replace(rec, '[^0-9]*', '', 'g');
|
||||
END IF;
|
||||
END LOOP;
|
||||
|
||||
RETURN result;
|
||||
END;
|
||||
$$ LANGUAGE PLPGSQL;
|
||||
|
||||
|
||||
-- Create and load data
|
||||
-- block_row_count '1000', stripe_row_count '2000'
|
||||
set cstore.stripe_row_count = 2000;
|
||||
set cstore.block_row_count = 1000;
|
||||
CREATE TABLE test_block_filtering (a int)
|
||||
USING cstore_tableam;
|
||||
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
|
||||
|
||||
-- Verify that filtered_row_count is less than 1000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 200');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 9900');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 9900');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
|
||||
|
||||
-- Verify that filtered_row_count is less than 2000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 1 AND 10');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN -10 AND 0');
|
||||
|
||||
|
||||
-- Load data for second time and verify that filtered_row_count is exactly twice as before
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
|
||||
set cstore.stripe_row_count to default;
|
||||
set cstore.block_row_count to default;
|
||||
|
||||
-- Verify that we are fine with collations which use a different alphabet order
|
||||
CREATE TABLE collation_block_filtering_test(A text collate "da_DK")
|
||||
USING cstore_tableam;
|
||||
COPY collation_block_filtering_test FROM STDIN;
|
||||
A
|
||||
Å
|
||||
B
|
||||
\.
|
||||
|
||||
SELECT * FROM collation_block_filtering_test WHERE A > 'B';
|
|
@ -0,0 +1,17 @@
|
|||
--
|
||||
-- Test copying data from cstore_fdw tables.
|
||||
--
|
||||
CREATE TABLE test_contestant(handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
USING cstore_tableam;
|
||||
|
||||
-- load table data from file
|
||||
COPY test_contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
|
||||
-- export using COPY table TO ...
|
||||
COPY test_contestant TO STDOUT;
|
||||
|
||||
-- export using COPY (SELECT * FROM table) TO ...
|
||||
COPY (select * from test_contestant) TO STDOUT;
|
||||
|
||||
DROP TABLE test_contestant CASCADE;
|
|
@ -0,0 +1,20 @@
|
|||
--
|
||||
-- Test the CREATE statements related to cstore.
|
||||
--
|
||||
|
||||
|
||||
-- Create uncompressed table
|
||||
CREATE TABLE contestant (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
USING cstore_tableam;
|
||||
|
||||
|
||||
-- Create compressed table with automatically determined file path
|
||||
-- COMPRESSED
|
||||
CREATE TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
USING cstore_tableam;
|
||||
|
||||
-- Test that querying an empty table works
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM contestant;
|
|
@ -0,0 +1,68 @@
|
|||
--
|
||||
-- Test loading and reading different data types to/from cstore_fdw foreign tables.
|
||||
--
|
||||
|
||||
|
||||
-- Settings to make the result deterministic
|
||||
SET datestyle = "ISO, YMD";
|
||||
SET timezone to 'GMT';
|
||||
SET intervalstyle TO 'POSTGRES_VERBOSE';
|
||||
|
||||
|
||||
-- Test array types
|
||||
CREATE TABLE test_array_types (int_array int[], bigint_array bigint[],
|
||||
text_array text[]) USING cstore_tableam;
|
||||
|
||||
COPY test_array_types FROM '@abs_srcdir@/data/array_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_array_types;
|
||||
|
||||
|
||||
-- Test date/time types
|
||||
CREATE TABLE test_datetime_types (timestamp timestamp,
|
||||
timestamp_with_timezone timestamp with time zone, date date, time time,
|
||||
interval interval) USING cstore_tableam;
|
||||
|
||||
COPY test_datetime_types FROM '@abs_srcdir@/data/datetime_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_datetime_types;
|
||||
|
||||
|
||||
-- Test enum and composite types
|
||||
CREATE TYPE enum_type AS ENUM ('a', 'b', 'c');
|
||||
CREATE TYPE composite_type AS (a int, b text);
|
||||
|
||||
CREATE TABLE test_enum_and_composite_types (enum enum_type,
|
||||
composite composite_type) USING cstore_tableam;
|
||||
|
||||
COPY test_enum_and_composite_types FROM
|
||||
'@abs_srcdir@/data/enum_and_composite_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_enum_and_composite_types;
|
||||
|
||||
|
||||
-- Test range types
|
||||
CREATE TABLE test_range_types (int4range int4range, int8range int8range,
|
||||
numrange numrange, tsrange tsrange) USING cstore_tableam;
|
||||
|
||||
COPY test_range_types FROM '@abs_srcdir@/data/range_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_range_types;
|
||||
|
||||
|
||||
-- Test other types
|
||||
CREATE TABLE test_other_types (bool boolean, bytea bytea, money money,
|
||||
inet inet, bitstring bit varying(5), uuid uuid, json json) USING cstore_tableam;
|
||||
|
||||
COPY test_other_types FROM '@abs_srcdir@/data/other_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_other_types;
|
||||
|
||||
|
||||
-- Test null values
|
||||
CREATE TABLE test_null_values (a int, b int[], c composite_type)
|
||||
USING cstore_tableam;
|
||||
|
||||
COPY test_null_values FROM '@abs_srcdir@/data/null_values.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_null_values;
|
|
@ -0,0 +1,46 @@
|
|||
--
|
||||
-- Test loading data into cstore_fdw tables.
|
||||
--
|
||||
|
||||
-- COPY with incorrect delimiter
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv'
|
||||
WITH DELIMITER '|'; -- ERROR
|
||||
|
||||
-- COPY with invalid program
|
||||
COPY contestant FROM PROGRAM 'invalid_program' WITH CSV; -- ERROR
|
||||
|
||||
-- COPY into uncompressed table from file
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv' WITH CSV;
|
||||
|
||||
-- COPY into compressed table
|
||||
set cstore.compression = 'pglz';
|
||||
COPY contestant_compressed FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant_compressed FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv'
|
||||
WITH CSV;
|
||||
set cstore.compression to default;
|
||||
|
||||
-- Test column list
|
||||
CREATE TABLE famous_constants (id int, name text, value real)
|
||||
USING cstore_tableam;
|
||||
COPY famous_constants (value, name, id) FROM STDIN WITH CSV;
|
||||
3.141,pi,1
|
||||
2.718,e,2
|
||||
0.577,gamma,3
|
||||
5.291e-11,bohr radius,4
|
||||
\.
|
||||
|
||||
COPY famous_constants (name, value) FROM STDIN WITH CSV;
|
||||
avagadro,6.022e23
|
||||
electron mass,9.109e-31
|
||||
proton mass,1.672e-27
|
||||
speed of light,2.997e8
|
||||
\.
|
||||
|
||||
SELECT * FROM famous_constants ORDER BY id, name;
|
||||
|
||||
DROP TABLE famous_constants;
|
|
@ -0,0 +1,69 @@
|
|||
--
|
||||
-- Test block filtering in cstore_fdw using min/max values in stripe skip lists.
|
||||
--
|
||||
|
||||
|
||||
--
|
||||
-- filtered_row_count returns number of rows filtered by the WHERE clause.
|
||||
-- If blocks get filtered by cstore_fdw, less rows are passed to WHERE
|
||||
-- clause, so this function should return a lower number.
|
||||
--
|
||||
CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS
|
||||
$$
|
||||
DECLARE
|
||||
result bigint;
|
||||
rec text;
|
||||
BEGIN
|
||||
result := 0;
|
||||
|
||||
FOR rec IN EXECUTE 'EXPLAIN ANALYZE ' || query LOOP
|
||||
IF rec ~ '^\s+Rows Removed by Filter' then
|
||||
result := regexp_replace(rec, '[^0-9]*', '', 'g');
|
||||
END IF;
|
||||
END LOOP;
|
||||
|
||||
RETURN result;
|
||||
END;
|
||||
$$ LANGUAGE PLPGSQL;
|
||||
|
||||
|
||||
-- Create and load data
|
||||
CREATE FOREIGN TABLE test_block_filtering (a int)
|
||||
SERVER cstore_server
|
||||
OPTIONS(block_row_count '1000', stripe_row_count '2000');
|
||||
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
|
||||
|
||||
-- Verify that filtered_row_count is less than 1000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 200');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 9900');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 9900');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
|
||||
|
||||
-- Verify that filtered_row_count is less than 2000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 1 AND 10');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN -10 AND 0');
|
||||
|
||||
|
||||
-- Load data for second time and verify that filtered_row_count is exactly twice as before
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
|
||||
|
||||
-- Verify that we are fine with collations which use a different alphabet order
|
||||
CREATE FOREIGN TABLE collation_block_filtering_test(A text collate "da_DK")
|
||||
SERVER cstore_server;
|
||||
COPY collation_block_filtering_test FROM STDIN;
|
||||
A
|
||||
Å
|
||||
B
|
||||
\.
|
||||
|
||||
SELECT * FROM collation_block_filtering_test WHERE A > 'B';
|
|
@ -0,0 +1,17 @@
|
|||
--
|
||||
-- Test copying data from cstore_fdw tables.
|
||||
--
|
||||
CREATE FOREIGN TABLE test_contestant(handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server;
|
||||
|
||||
-- load table data from file
|
||||
COPY test_contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
|
||||
-- export using COPY table TO ...
|
||||
COPY test_contestant TO STDOUT;
|
||||
|
||||
-- export using COPY (SELECT * FROM table) TO ...
|
||||
COPY (select * from test_contestant) TO STDOUT;
|
||||
|
||||
DROP FOREIGN TABLE test_contestant CASCADE;
|
|
@ -0,0 +1,39 @@
|
|||
--
|
||||
-- Test the CREATE statements related to cstore_fdw.
|
||||
--
|
||||
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
|
||||
|
||||
-- Validator tests
|
||||
CREATE FOREIGN TABLE test_validator_invalid_option ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(bad_option_name '1'); -- ERROR
|
||||
|
||||
CREATE FOREIGN TABLE test_validator_invalid_stripe_row_count ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(stripe_row_count '0'); -- ERROR
|
||||
|
||||
CREATE FOREIGN TABLE test_validator_invalid_block_row_count ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(block_row_count '0'); -- ERROR
|
||||
|
||||
CREATE FOREIGN TABLE test_validator_invalid_compression_type ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(compression 'invalid_compression'); -- ERROR
|
||||
|
||||
-- Create uncompressed table
|
||||
CREATE FOREIGN TABLE contestant (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server;
|
||||
|
||||
|
||||
-- Create compressed table with automatically determined file path
|
||||
CREATE FOREIGN TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server
|
||||
OPTIONS(compression 'pglz');
|
||||
|
||||
-- Test that querying an empty table works
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM contestant;
|
|
@ -0,0 +1,68 @@
|
|||
--
|
||||
-- Test loading and reading different data types to/from cstore_fdw foreign tables.
|
||||
--
|
||||
|
||||
|
||||
-- Settings to make the result deterministic
|
||||
SET datestyle = "ISO, YMD";
|
||||
SET timezone to 'GMT';
|
||||
SET intervalstyle TO 'POSTGRES_VERBOSE';
|
||||
|
||||
|
||||
-- Test array types
|
||||
CREATE FOREIGN TABLE test_array_types (int_array int[], bigint_array bigint[],
|
||||
text_array text[]) SERVER cstore_server;
|
||||
|
||||
COPY test_array_types FROM '@abs_srcdir@/data/array_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_array_types;
|
||||
|
||||
|
||||
-- Test date/time types
|
||||
CREATE FOREIGN TABLE test_datetime_types (timestamp timestamp,
|
||||
timestamp_with_timezone timestamp with time zone, date date, time time,
|
||||
interval interval) SERVER cstore_server;
|
||||
|
||||
COPY test_datetime_types FROM '@abs_srcdir@/data/datetime_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_datetime_types;
|
||||
|
||||
|
||||
-- Test enum and composite types
|
||||
CREATE TYPE enum_type AS ENUM ('a', 'b', 'c');
|
||||
CREATE TYPE composite_type AS (a int, b text);
|
||||
|
||||
CREATE FOREIGN TABLE test_enum_and_composite_types (enum enum_type,
|
||||
composite composite_type) SERVER cstore_server;
|
||||
|
||||
COPY test_enum_and_composite_types FROM
|
||||
'@abs_srcdir@/data/enum_and_composite_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_enum_and_composite_types;
|
||||
|
||||
|
||||
-- Test range types
|
||||
CREATE FOREIGN TABLE test_range_types (int4range int4range, int8range int8range,
|
||||
numrange numrange, tsrange tsrange) SERVER cstore_server;
|
||||
|
||||
COPY test_range_types FROM '@abs_srcdir@/data/range_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_range_types;
|
||||
|
||||
|
||||
-- Test other types
|
||||
CREATE FOREIGN TABLE test_other_types (bool boolean, bytea bytea, money money,
|
||||
inet inet, bitstring bit varying(5), uuid uuid, json json) SERVER cstore_server;
|
||||
|
||||
COPY test_other_types FROM '@abs_srcdir@/data/other_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_other_types;
|
||||
|
||||
|
||||
-- Test null values
|
||||
CREATE FOREIGN TABLE test_null_values (a int, b int[], c composite_type)
|
||||
SERVER cstore_server;
|
||||
|
||||
COPY test_null_values FROM '@abs_srcdir@/data/null_values.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_null_values;
|
|
@ -0,0 +1,44 @@
|
|||
--
|
||||
-- Test loading data into cstore_fdw tables.
|
||||
--
|
||||
|
||||
-- COPY with incorrect delimiter
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv'
|
||||
WITH DELIMITER '|'; -- ERROR
|
||||
|
||||
-- COPY with invalid program
|
||||
COPY contestant FROM PROGRAM 'invalid_program' WITH CSV; -- ERROR
|
||||
|
||||
-- COPY into uncompressed table from file
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv' WITH CSV;
|
||||
|
||||
-- COPY into compressed table
|
||||
COPY contestant_compressed FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant_compressed FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv'
|
||||
WITH CSV;
|
||||
|
||||
-- Test column list
|
||||
CREATE FOREIGN TABLE famous_constants (id int, name text, value real)
|
||||
SERVER cstore_server;
|
||||
COPY famous_constants (value, name, id) FROM STDIN WITH CSV;
|
||||
3.141,pi,1
|
||||
2.718,e,2
|
||||
0.577,gamma,3
|
||||
5.291e-11,bohr radius,4
|
||||
\.
|
||||
|
||||
COPY famous_constants (name, value) FROM STDIN WITH CSV;
|
||||
avagadro,6.022e23
|
||||
electron mass,9.109e-31
|
||||
proton mass,1.672e-27
|
||||
speed of light,2.997e8
|
||||
\.
|
||||
|
||||
SELECT * FROM famous_constants ORDER BY id, name;
|
||||
|
||||
DROP FOREIGN TABLE famous_constants;
|
|
@ -0,0 +1,56 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* mod.c
|
||||
*
|
||||
* This file contains module-level definitions.
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "fmgr.h"
|
||||
|
||||
#include "cstore.h"
|
||||
#include "mod.h"
|
||||
|
||||
#ifdef USE_TABLEAM
|
||||
#include "cstore_tableam.h"
|
||||
#endif
|
||||
|
||||
#ifdef USE_FDW
|
||||
#include "cstore_fdw.h"
|
||||
#endif
|
||||
|
||||
PG_MODULE_MAGIC;
|
||||
|
||||
void
|
||||
_PG_init(void)
|
||||
{
|
||||
cstore_init();
|
||||
|
||||
#ifdef USE_TABLEAM
|
||||
cstore_tableam_init();
|
||||
#endif
|
||||
|
||||
#ifdef USE_FDW
|
||||
cstore_fdw_init();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_PG_fini(void)
|
||||
{
|
||||
#if USE_TABLEAM
|
||||
cstore_tableam_finish();
|
||||
#endif
|
||||
|
||||
#ifdef USE_FDW
|
||||
cstore_fdw_finish();
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* mod.h
|
||||
*
|
||||
* Type and function declarations for CStore
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef MOD_H
|
||||
#define MOD_H
|
||||
|
||||
/* Function declarations for extension loading and unloading */
|
||||
extern void _PG_init(void);
|
||||
extern void _PG_fini(void);
|
||||
|
||||
#endif /* MOD_H */
|
|
@ -0,0 +1,120 @@
|
|||
--
|
||||
-- Test block filtering in cstore_fdw using min/max values in stripe skip lists.
|
||||
--
|
||||
--
|
||||
-- filtered_row_count returns number of rows filtered by the WHERE clause.
|
||||
-- If blocks get filtered by cstore_fdw, less rows are passed to WHERE
|
||||
-- clause, so this function should return a lower number.
|
||||
--
|
||||
CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS
|
||||
$$
|
||||
DECLARE
|
||||
result bigint;
|
||||
rec text;
|
||||
BEGIN
|
||||
result := 0;
|
||||
|
||||
FOR rec IN EXECUTE 'EXPLAIN ANALYZE ' || query LOOP
|
||||
IF rec ~ '^\s+Rows Removed by Filter' then
|
||||
result := regexp_replace(rec, '[^0-9]*', '', 'g');
|
||||
END IF;
|
||||
END LOOP;
|
||||
|
||||
RETURN result;
|
||||
END;
|
||||
$$ LANGUAGE PLPGSQL;
|
||||
-- Create and load data
|
||||
-- block_row_count '1000', stripe_row_count '2000'
|
||||
set cstore.stripe_row_count = 2000;
|
||||
set cstore.block_row_count = 1000;
|
||||
CREATE TABLE test_block_filtering (a int)
|
||||
USING cstore_tableam;
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
-- Verify that filtered_row_count is less than 1000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
801
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 200');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
200
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 9900');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
101
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 9900');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
900
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Verify that filtered_row_count is less than 2000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 1 AND 10');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
990
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
1979
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN -10 AND 0');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Load data for second time and verify that filtered_row_count is exactly twice as before
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
1602
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
3958
|
||||
(1 row)
|
||||
|
||||
set cstore.stripe_row_count to default;
|
||||
set cstore.block_row_count to default;
|
||||
-- Verify that we are fine with collations which use a different alphabet order
|
||||
CREATE TABLE collation_block_filtering_test(A text collate "da_DK")
|
||||
USING cstore_tableam;
|
||||
COPY collation_block_filtering_test FROM STDIN;
|
||||
SELECT * FROM collation_block_filtering_test WHERE A > 'B';
|
||||
a
|
||||
---
|
||||
Å
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
--
|
||||
-- Test copying data from cstore_fdw tables.
|
||||
--
|
||||
CREATE TABLE test_contestant(handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
USING cstore_tableam;
|
||||
-- load table data from file
|
||||
COPY test_contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
-- export using COPY table TO ...
|
||||
COPY test_contestant TO STDOUT;
|
||||
a 01-10-1990 2090 97.1 XA {a}
|
||||
b 11-01-1990 2203 98.1 XA {a,b}
|
||||
c 11-01-1988 2907 99.4 XB {w,y}
|
||||
d 05-05-1985 2314 98.3 XB {}
|
||||
e 05-05-1995 2236 98.2 XC {a}
|
||||
-- export using COPY (SELECT * FROM table) TO ...
|
||||
COPY (select * from test_contestant) TO STDOUT;
|
||||
a 01-10-1990 2090 97.1 XA {a}
|
||||
b 11-01-1990 2203 98.1 XA {a,b}
|
||||
c 11-01-1988 2907 99.4 XB {w,y}
|
||||
d 05-05-1985 2314 98.3 XB {}
|
||||
e 05-05-1995 2236 98.2 XC {a}
|
||||
DROP TABLE test_contestant CASCADE;
|
|
@ -0,0 +1,20 @@
|
|||
--
|
||||
-- Test the CREATE statements related to cstore.
|
||||
--
|
||||
-- Create uncompressed table
|
||||
CREATE TABLE contestant (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
USING cstore_tableam;
|
||||
-- Create compressed table with automatically determined file path
|
||||
-- COMPRESSED
|
||||
CREATE TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
USING cstore_tableam;
|
||||
-- Test that querying an empty table works
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM contestant;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
--
|
||||
-- Test loading and reading different data types to/from cstore_fdw foreign tables.
|
||||
--
|
||||
-- Settings to make the result deterministic
|
||||
SET datestyle = "ISO, YMD";
|
||||
SET timezone to 'GMT';
|
||||
SET intervalstyle TO 'POSTGRES_VERBOSE';
|
||||
-- Test array types
|
||||
CREATE TABLE test_array_types (int_array int[], bigint_array bigint[],
|
||||
text_array text[]) USING cstore_tableam;
|
||||
COPY test_array_types FROM '@abs_srcdir@/data/array_types.csv' WITH CSV;
|
||||
SELECT * FROM test_array_types;
|
||||
int_array | bigint_array | text_array
|
||||
--------------------------+--------------------------------------------+------------
|
||||
{1,2,3} | {1,2,3} | {a,b,c}
|
||||
{} | {} | {}
|
||||
{-2147483648,2147483647} | {-9223372036854775808,9223372036854775807} | {""}
|
||||
(3 rows)
|
||||
|
||||
-- Test date/time types
|
||||
CREATE TABLE test_datetime_types (timestamp timestamp,
|
||||
timestamp_with_timezone timestamp with time zone, date date, time time,
|
||||
interval interval) USING cstore_tableam;
|
||||
COPY test_datetime_types FROM '@abs_srcdir@/data/datetime_types.csv' WITH CSV;
|
||||
SELECT * FROM test_datetime_types;
|
||||
timestamp | timestamp_with_timezone | date | time | interval
|
||||
---------------------+-------------------------+------------+----------+-----------
|
||||
2000-01-02 04:05:06 | 1999-01-08 12:05:06+00 | 2000-01-02 | 04:05:06 | @ 4 hours
|
||||
1970-01-01 00:00:00 | infinity | -infinity | 00:00:00 | @ 0
|
||||
(2 rows)
|
||||
|
||||
-- Test enum and composite types
|
||||
CREATE TYPE enum_type AS ENUM ('a', 'b', 'c');
|
||||
CREATE TYPE composite_type AS (a int, b text);
|
||||
CREATE TABLE test_enum_and_composite_types (enum enum_type,
|
||||
composite composite_type) USING cstore_tableam;
|
||||
COPY test_enum_and_composite_types FROM
|
||||
'@abs_srcdir@/data/enum_and_composite_types.csv' WITH CSV;
|
||||
SELECT * FROM test_enum_and_composite_types;
|
||||
enum | composite
|
||||
------+-----------
|
||||
a | (2,b)
|
||||
b | (3,c)
|
||||
(2 rows)
|
||||
|
||||
-- Test range types
|
||||
CREATE TABLE test_range_types (int4range int4range, int8range int8range,
|
||||
numrange numrange, tsrange tsrange) USING cstore_tableam;
|
||||
COPY test_range_types FROM '@abs_srcdir@/data/range_types.csv' WITH CSV;
|
||||
SELECT * FROM test_range_types;
|
||||
int4range | int8range | numrange | tsrange
|
||||
-----------+-----------+----------+-----------------------------------------------
|
||||
[1,3) | [1,3) | [1,3) | ["2000-01-02 00:30:00","2010-02-03 12:30:00")
|
||||
empty | [1,) | (,) | empty
|
||||
(2 rows)
|
||||
|
||||
-- Test other types
|
||||
CREATE TABLE test_other_types (bool boolean, bytea bytea, money money,
|
||||
inet inet, bitstring bit varying(5), uuid uuid, json json) USING cstore_tableam;
|
||||
COPY test_other_types FROM '@abs_srcdir@/data/other_types.csv' WITH CSV;
|
||||
SELECT * FROM test_other_types;
|
||||
bool | bytea | money | inet | bitstring | uuid | json
|
||||
------+------------+-------+-------------+-----------+--------------------------------------+------------------
|
||||
f | \xdeadbeef | $1.00 | 192.168.1.2 | 10101 | a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 | {"key": "value"}
|
||||
t | \xcdb0 | $1.50 | 127.0.0.1 | | a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 | []
|
||||
(2 rows)
|
||||
|
||||
-- Test null values
|
||||
CREATE TABLE test_null_values (a int, b int[], c composite_type)
|
||||
USING cstore_tableam;
|
||||
COPY test_null_values FROM '@abs_srcdir@/data/null_values.csv' WITH CSV;
|
||||
SELECT * FROM test_null_values;
|
||||
a | b | c
|
||||
---+--------+-----
|
||||
| {NULL} | (,)
|
||||
| |
|
||||
(2 rows)
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
--
|
||||
-- Test loading data into cstore_fdw tables.
|
||||
--
|
||||
-- COPY with incorrect delimiter
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv'
|
||||
WITH DELIMITER '|'; -- ERROR
|
||||
ERROR: missing data for column "birthdate"
|
||||
CONTEXT: COPY contestant, line 1: "a,1990-01-10,2090,97.1,XA ,{a}"
|
||||
-- COPY with invalid program
|
||||
COPY contestant FROM PROGRAM 'invalid_program' WITH CSV; -- ERROR
|
||||
ERROR: program "invalid_program" failed
|
||||
DETAIL: command not found
|
||||
-- COPY into uncompressed table from file
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv' WITH CSV;
|
||||
-- COPY into compressed table
|
||||
set cstore.compression = 'pglz';
|
||||
COPY contestant_compressed FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant_compressed FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv'
|
||||
WITH CSV;
|
||||
set cstore.compression to default;
|
||||
-- Test column list
|
||||
CREATE TABLE famous_constants (id int, name text, value real)
|
||||
USING cstore_tableam;
|
||||
COPY famous_constants (value, name, id) FROM STDIN WITH CSV;
|
||||
COPY famous_constants (name, value) FROM STDIN WITH CSV;
|
||||
SELECT * FROM famous_constants ORDER BY id, name;
|
||||
id | name | value
|
||||
----+----------------+-----------
|
||||
1 | pi | 3.141
|
||||
2 | e | 2.718
|
||||
3 | gamma | 0.577
|
||||
4 | bohr radius | 5.291e-11
|
||||
| avagadro | 6.022e+23
|
||||
| electron mass | 9.109e-31
|
||||
| proton mass | 1.672e-27
|
||||
| speed of light | 2.997e+08
|
||||
(8 rows)
|
||||
|
||||
DROP TABLE famous_constants;
|
|
@ -0,0 +1,116 @@
|
|||
--
|
||||
-- Test block filtering in cstore_fdw using min/max values in stripe skip lists.
|
||||
--
|
||||
--
|
||||
-- filtered_row_count returns number of rows filtered by the WHERE clause.
|
||||
-- If blocks get filtered by cstore_fdw, less rows are passed to WHERE
|
||||
-- clause, so this function should return a lower number.
|
||||
--
|
||||
CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS
|
||||
$$
|
||||
DECLARE
|
||||
result bigint;
|
||||
rec text;
|
||||
BEGIN
|
||||
result := 0;
|
||||
|
||||
FOR rec IN EXECUTE 'EXPLAIN ANALYZE ' || query LOOP
|
||||
IF rec ~ '^\s+Rows Removed by Filter' then
|
||||
result := regexp_replace(rec, '[^0-9]*', '', 'g');
|
||||
END IF;
|
||||
END LOOP;
|
||||
|
||||
RETURN result;
|
||||
END;
|
||||
$$ LANGUAGE PLPGSQL;
|
||||
-- Create and load data
|
||||
CREATE FOREIGN TABLE test_block_filtering (a int)
|
||||
SERVER cstore_server
|
||||
OPTIONS(block_row_count '1000', stripe_row_count '2000');
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
-- Verify that filtered_row_count is less than 1000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
801
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 200');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
200
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 9900');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
101
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 9900');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
900
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Verify that filtered_row_count is less than 2000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 1 AND 10');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
990
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
1979
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN -10 AND 0');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Load data for second time and verify that filtered_row_count is exactly twice as before
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
1602
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
3958
|
||||
(1 row)
|
||||
|
||||
-- Verify that we are fine with collations which use a different alphabet order
|
||||
CREATE FOREIGN TABLE collation_block_filtering_test(A text collate "da_DK")
|
||||
SERVER cstore_server;
|
||||
COPY collation_block_filtering_test FROM STDIN;
|
||||
SELECT * FROM collation_block_filtering_test WHERE A > 'B';
|
||||
a
|
||||
---
|
||||
Å
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
--
|
||||
-- Test copying data from cstore_fdw tables.
|
||||
--
|
||||
CREATE FOREIGN TABLE test_contestant(handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server;
|
||||
-- load table data from file
|
||||
COPY test_contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
-- export using COPY table TO ...
|
||||
COPY test_contestant TO STDOUT;
|
||||
a 01-10-1990 2090 97.1 XA {a}
|
||||
b 11-01-1990 2203 98.1 XA {a,b}
|
||||
c 11-01-1988 2907 99.4 XB {w,y}
|
||||
d 05-05-1985 2314 98.3 XB {}
|
||||
e 05-05-1995 2236 98.2 XC {a}
|
||||
-- export using COPY (SELECT * FROM table) TO ...
|
||||
COPY (select * from test_contestant) TO STDOUT;
|
||||
a 01-10-1990 2090 97.1 XA {a}
|
||||
b 11-01-1990 2203 98.1 XA {a,b}
|
||||
c 11-01-1988 2907 99.4 XB {w,y}
|
||||
d 05-05-1985 2314 98.3 XB {}
|
||||
e 05-05-1995 2236 98.2 XC {a}
|
||||
DROP FOREIGN TABLE test_contestant CASCADE;
|
|
@ -0,0 +1,42 @@
|
|||
--
|
||||
-- Test the CREATE statements related to cstore_fdw.
|
||||
--
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
-- Validator tests
|
||||
CREATE FOREIGN TABLE test_validator_invalid_option ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(bad_option_name '1'); -- ERROR
|
||||
ERROR: invalid option "bad_option_name"
|
||||
HINT: Valid options in this context are: compression, stripe_row_count, block_row_count
|
||||
CREATE FOREIGN TABLE test_validator_invalid_stripe_row_count ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(stripe_row_count '0'); -- ERROR
|
||||
ERROR: invalid stripe row count
|
||||
HINT: Stripe row count must be an integer between 1000 and 10000000
|
||||
CREATE FOREIGN TABLE test_validator_invalid_block_row_count ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(block_row_count '0'); -- ERROR
|
||||
ERROR: invalid block row count
|
||||
HINT: Block row count must be an integer between 1000 and 100000
|
||||
CREATE FOREIGN TABLE test_validator_invalid_compression_type ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(compression 'invalid_compression'); -- ERROR
|
||||
ERROR: invalid compression type
|
||||
HINT: Valid options are: none, pglz
|
||||
-- Create uncompressed table
|
||||
CREATE FOREIGN TABLE contestant (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server;
|
||||
-- Create compressed table with automatically determined file path
|
||||
CREATE FOREIGN TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server
|
||||
OPTIONS(compression 'pglz');
|
||||
-- Test that querying an empty table works
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM contestant;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
--
|
||||
-- Test loading and reading different data types to/from cstore_fdw foreign tables.
|
||||
--
|
||||
-- Settings to make the result deterministic
|
||||
SET datestyle = "ISO, YMD";
|
||||
SET timezone to 'GMT';
|
||||
SET intervalstyle TO 'POSTGRES_VERBOSE';
|
||||
-- Test array types
|
||||
CREATE FOREIGN TABLE test_array_types (int_array int[], bigint_array bigint[],
|
||||
text_array text[]) SERVER cstore_server;
|
||||
COPY test_array_types FROM '@abs_srcdir@/data/array_types.csv' WITH CSV;
|
||||
SELECT * FROM test_array_types;
|
||||
int_array | bigint_array | text_array
|
||||
--------------------------+--------------------------------------------+------------
|
||||
{1,2,3} | {1,2,3} | {a,b,c}
|
||||
{} | {} | {}
|
||||
{-2147483648,2147483647} | {-9223372036854775808,9223372036854775807} | {""}
|
||||
(3 rows)
|
||||
|
||||
-- Test date/time types
|
||||
CREATE FOREIGN TABLE test_datetime_types (timestamp timestamp,
|
||||
timestamp_with_timezone timestamp with time zone, date date, time time,
|
||||
interval interval) SERVER cstore_server;
|
||||
COPY test_datetime_types FROM '@abs_srcdir@/data/datetime_types.csv' WITH CSV;
|
||||
SELECT * FROM test_datetime_types;
|
||||
timestamp | timestamp_with_timezone | date | time | interval
|
||||
---------------------+-------------------------+------------+----------+-----------
|
||||
2000-01-02 04:05:06 | 1999-01-08 12:05:06+00 | 2000-01-02 | 04:05:06 | @ 4 hours
|
||||
1970-01-01 00:00:00 | infinity | -infinity | 00:00:00 | @ 0
|
||||
(2 rows)
|
||||
|
||||
-- Test enum and composite types
|
||||
CREATE TYPE enum_type AS ENUM ('a', 'b', 'c');
|
||||
CREATE TYPE composite_type AS (a int, b text);
|
||||
CREATE FOREIGN TABLE test_enum_and_composite_types (enum enum_type,
|
||||
composite composite_type) SERVER cstore_server;
|
||||
COPY test_enum_and_composite_types FROM
|
||||
'@abs_srcdir@/data/enum_and_composite_types.csv' WITH CSV;
|
||||
SELECT * FROM test_enum_and_composite_types;
|
||||
enum | composite
|
||||
------+-----------
|
||||
a | (2,b)
|
||||
b | (3,c)
|
||||
(2 rows)
|
||||
|
||||
-- Test range types
|
||||
CREATE FOREIGN TABLE test_range_types (int4range int4range, int8range int8range,
|
||||
numrange numrange, tsrange tsrange) SERVER cstore_server;
|
||||
COPY test_range_types FROM '@abs_srcdir@/data/range_types.csv' WITH CSV;
|
||||
SELECT * FROM test_range_types;
|
||||
int4range | int8range | numrange | tsrange
|
||||
-----------+-----------+----------+-----------------------------------------------
|
||||
[1,3) | [1,3) | [1,3) | ["2000-01-02 00:30:00","2010-02-03 12:30:00")
|
||||
empty | [1,) | (,) | empty
|
||||
(2 rows)
|
||||
|
||||
-- Test other types
|
||||
CREATE FOREIGN TABLE test_other_types (bool boolean, bytea bytea, money money,
|
||||
inet inet, bitstring bit varying(5), uuid uuid, json json) SERVER cstore_server;
|
||||
COPY test_other_types FROM '@abs_srcdir@/data/other_types.csv' WITH CSV;
|
||||
SELECT * FROM test_other_types;
|
||||
bool | bytea | money | inet | bitstring | uuid | json
|
||||
------+------------+-------+-------------+-----------+--------------------------------------+------------------
|
||||
f | \xdeadbeef | $1.00 | 192.168.1.2 | 10101 | a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 | {"key": "value"}
|
||||
t | \xcdb0 | $1.50 | 127.0.0.1 | | a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 | []
|
||||
(2 rows)
|
||||
|
||||
-- Test null values
|
||||
CREATE FOREIGN TABLE test_null_values (a int, b int[], c composite_type)
|
||||
SERVER cstore_server;
|
||||
COPY test_null_values FROM '@abs_srcdir@/data/null_values.csv' WITH CSV;
|
||||
SELECT * FROM test_null_values;
|
||||
a | b | c
|
||||
---+--------+-----
|
||||
| {NULL} | (,)
|
||||
| |
|
||||
(2 rows)
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
--
|
||||
-- Test loading data into cstore_fdw tables.
|
||||
--
|
||||
-- COPY with incorrect delimiter
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv'
|
||||
WITH DELIMITER '|'; -- ERROR
|
||||
ERROR: missing data for column "birthdate"
|
||||
-- COPY with invalid program
|
||||
COPY contestant FROM PROGRAM 'invalid_program' WITH CSV; -- ERROR
|
||||
ERROR: program "invalid_program" failed
|
||||
DETAIL: command not found
|
||||
-- COPY into uncompressed table from file
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv' WITH CSV;
|
||||
-- COPY into compressed table
|
||||
COPY contestant_compressed FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant_compressed FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv'
|
||||
WITH CSV;
|
||||
-- Test column list
|
||||
CREATE FOREIGN TABLE famous_constants (id int, name text, value real)
|
||||
SERVER cstore_server;
|
||||
COPY famous_constants (value, name, id) FROM STDIN WITH CSV;
|
||||
COPY famous_constants (name, value) FROM STDIN WITH CSV;
|
||||
SELECT * FROM famous_constants ORDER BY id, name;
|
||||
id | name | value
|
||||
----+----------------+-----------
|
||||
1 | pi | 3.141
|
||||
2 | e | 2.718
|
||||
3 | gamma | 0.577
|
||||
4 | bohr radius | 5.291e-11
|
||||
| avagadro | 6.022e+23
|
||||
| electron mass | 9.109e-31
|
||||
| proton mass | 1.672e-27
|
||||
| speed of light | 2.997e+08
|
||||
(8 rows)
|
||||
|
||||
DROP FOREIGN TABLE famous_constants;
|
|
@ -0,0 +1,46 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE test_vacuum_vs_insert (a int, b int) USING cstore_tableam;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS test_vacuum_vs_insert CASCADE;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO test_vacuum_vs_insert SELECT i, 2 * i FROM generate_series(1, 3) i;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-vacuum"
|
||||
{
|
||||
VACUUM VERBOSE test_vacuum_vs_insert;
|
||||
}
|
||||
|
||||
step "s2-vacuum-full"
|
||||
{
|
||||
VACUUM FULL VERBOSE test_vacuum_vs_insert;
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT * FROM test_vacuum_vs_insert;
|
||||
}
|
||||
|
||||
permutation "s1-insert" "s1-begin" "s1-insert" "s2-vacuum" "s1-commit" "s2-select"
|
||||
permutation "s1-insert" "s1-begin" "s1-insert" "s2-vacuum-full" "s1-commit" "s2-select"
|
|
@ -0,0 +1,67 @@
|
|||
setup
|
||||
{
|
||||
CREATE TABLE test_insert_concurrency (a int, b int) USING cstore_tableam;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS test_insert_concurrency CASCADE;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-insert"
|
||||
{
|
||||
INSERT INTO test_insert_concurrency SELECT i, 2 * i FROM generate_series(1, 3) i;
|
||||
}
|
||||
|
||||
step "s1-copy"
|
||||
{
|
||||
COPY test_insert_concurrency(a) FROM PROGRAM 'seq 11 13';
|
||||
}
|
||||
|
||||
step "s1-select"
|
||||
{
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s2-insert"
|
||||
{
|
||||
INSERT INTO test_insert_concurrency SELECT i, 2 * i FROM generate_series(4, 6) i;
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
{
|
||||
SELECT * FROM test_insert_concurrency ORDER BY a;
|
||||
}
|
||||
|
||||
step "s2-commit"
|
||||
{
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
# writes shouldn't block writes or reads
|
||||
permutation "s1-begin" "s2-begin" "s1-insert" "s2-insert" "s1-select" "s2-select" "s1-commit" "s2-commit" "s1-select"
|
||||
|
||||
# copy vs insert
|
||||
permutation "s1-begin" "s2-begin" "s1-copy" "s2-insert" "s1-select" "s2-select" "s1-commit" "s2-commit" "s1-select"
|
||||
|
||||
# insert vs copy
|
||||
permutation "s1-begin" "s2-begin" "s2-insert" "s1-copy" "s1-select" "s2-select" "s1-commit" "s2-commit" "s1-select"
|
|
@ -0,0 +1,7 @@
|
|||
session "s1"
|
||||
step "s1a"
|
||||
{
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
}
|
||||
|
||||
permutation "s1a"
|
|
@ -0,0 +1,85 @@
|
|||
--
|
||||
-- Testing ALTER TABLE on cstore_fdw tables.
|
||||
--
|
||||
|
||||
CREATE TABLE test_alter_table (a int, b int, c int) USING cstore_tableam;
|
||||
|
||||
WITH sample_data AS (VALUES
|
||||
(1, 2, 3),
|
||||
(4, 5, 6),
|
||||
(7, 8, 9)
|
||||
)
|
||||
INSERT INTO test_alter_table SELECT * FROM sample_data;
|
||||
|
||||
-- drop a column
|
||||
ALTER TABLE test_alter_table DROP COLUMN a;
|
||||
|
||||
-- test analyze
|
||||
ANALYZE test_alter_table;
|
||||
|
||||
-- verify select queries run as expected
|
||||
SELECT * FROM test_alter_table;
|
||||
SELECT a FROM test_alter_table;
|
||||
SELECT b FROM test_alter_table;
|
||||
|
||||
-- verify insert runs as expected
|
||||
INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
INSERT INTO test_alter_table (SELECT 5, 8);
|
||||
|
||||
|
||||
-- add a column with no defaults
|
||||
ALTER TABLE test_alter_table ADD COLUMN d int;
|
||||
SELECT * FROM test_alter_table;
|
||||
INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
SELECT * FROM test_alter_table;
|
||||
|
||||
|
||||
-- add a fixed-length column with default value
|
||||
ALTER TABLE test_alter_table ADD COLUMN e int default 3;
|
||||
SELECT * from test_alter_table;
|
||||
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8);
|
||||
SELECT * from test_alter_table;
|
||||
|
||||
|
||||
-- add a variable-length column with default value
|
||||
ALTER TABLE test_alter_table ADD COLUMN f text DEFAULT 'TEXT ME';
|
||||
SELECT * from test_alter_table;
|
||||
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8, 'ABCDEF');
|
||||
SELECT * from test_alter_table;
|
||||
|
||||
|
||||
-- drop couple of columns
|
||||
ALTER TABLE test_alter_table DROP COLUMN c;
|
||||
ALTER TABLE test_alter_table DROP COLUMN e;
|
||||
ANALYZE test_alter_table;
|
||||
SELECT * from test_alter_table;
|
||||
SELECT count(*) from test_alter_table;
|
||||
SELECT count(t.*) from test_alter_table t;
|
||||
|
||||
|
||||
-- unsupported default values
|
||||
ALTER TABLE test_alter_table ADD COLUMN g boolean DEFAULT isfinite(current_date);
|
||||
ALTER TABLE test_alter_table ADD COLUMN h DATE DEFAULT current_date;
|
||||
SELECT * FROM test_alter_table;
|
||||
ALTER TABLE test_alter_table ALTER COLUMN g DROP DEFAULT;
|
||||
SELECT * FROM test_alter_table;
|
||||
ALTER TABLE test_alter_table ALTER COLUMN h DROP DEFAULT;
|
||||
ANALYZE test_alter_table;
|
||||
SELECT * FROM test_alter_table;
|
||||
|
||||
-- unsupported type change
|
||||
ALTER TABLE test_alter_table ADD COLUMN i int;
|
||||
ALTER TABLE test_alter_table ADD COLUMN j float;
|
||||
ALTER TABLE test_alter_table ADD COLUMN k text;
|
||||
|
||||
-- this is valid type change
|
||||
ALTER TABLE test_alter_table ALTER COLUMN i TYPE float;
|
||||
|
||||
-- this is not valid
|
||||
ALTER TABLE test_alter_table ALTER COLUMN j TYPE int;
|
||||
|
||||
-- text / varchar conversion is valid both ways
|
||||
ALTER TABLE test_alter_table ALTER COLUMN k TYPE varchar(20);
|
||||
ALTER TABLE test_alter_table ALTER COLUMN k TYPE text;
|
||||
|
||||
DROP TABLE test_alter_table;
|
|
@ -0,0 +1,11 @@
|
|||
--
|
||||
-- Test the ANALYZE command for cstore_fdw tables.
|
||||
--
|
||||
|
||||
-- ANALYZE uncompressed table
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM pg_stats WHERE tablename='contestant';
|
||||
|
||||
-- ANALYZE compressed table
|
||||
ANALYZE contestant_compressed;
|
||||
SELECT count(*) FROM pg_stats WHERE tablename='contestant_compressed';
|
|
@ -0,0 +1,9 @@
|
|||
|
||||
DROP TABLE test_null_values;
|
||||
DROP TABLE test_other_types;
|
||||
DROP TABLE test_range_types;
|
||||
DROP TABLE test_enum_and_composite_types;
|
||||
DROP TYPE composite_type;
|
||||
DROP TYPE enum_type;
|
||||
DROP TABLE test_datetime_types;
|
||||
DROP TABLE test_array_types;
|
|
@ -0,0 +1,54 @@
|
|||
--
|
||||
-- Tests the different DROP commands for cstore_fdw tables.
|
||||
--
|
||||
-- DROP TABL
|
||||
-- DROP SCHEMA
|
||||
-- DROP EXTENSION
|
||||
-- DROP DATABASE
|
||||
--
|
||||
|
||||
-- Note that travis does not create
|
||||
-- cstore_fdw extension in default database (postgres). This has caused
|
||||
-- different behavior between travis tests and local tests. Thus
|
||||
-- 'postgres' directory is excluded from comparison to have the same result.
|
||||
|
||||
-- store postgres database oid
|
||||
SELECT oid postgres_oid FROM pg_database WHERE datname = 'postgres' \gset
|
||||
|
||||
SELECT count(*) AS cstore_data_files_before_drop FROM cstore.cstore_data_files \gset
|
||||
|
||||
-- DROP cstore_fdw tables
|
||||
DROP TABLE contestant;
|
||||
DROP TABLE contestant_compressed;
|
||||
|
||||
-- make sure DROP deletes metadata
|
||||
SELECT :cstore_data_files_before_drop - count(*) FROM cstore.cstore_data_files;
|
||||
|
||||
-- Create a cstore_fdw table under a schema and drop it.
|
||||
CREATE SCHEMA test_schema;
|
||||
CREATE TABLE test_schema.test_table(data int) USING cstore_tableam;
|
||||
|
||||
SELECT count(*) AS cstore_data_files_before_drop FROM cstore.cstore_data_files \gset
|
||||
DROP SCHEMA test_schema CASCADE;
|
||||
SELECT :cstore_data_files_before_drop - count(*) FROM cstore.cstore_data_files;
|
||||
|
||||
SELECT current_database() datname \gset
|
||||
|
||||
CREATE DATABASE db_to_drop;
|
||||
\c db_to_drop
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
|
||||
|
||||
CREATE TABLE test_table(data int) USING cstore_tableam;
|
||||
|
||||
DROP EXTENSION cstore_fdw CASCADE;
|
||||
|
||||
-- test database drop
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
|
||||
|
||||
CREATE TABLE test_table(data int) USING cstore_tableam;
|
||||
|
||||
\c :datname
|
||||
|
||||
DROP DATABASE db_to_drop;
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue