mirror of https://github.com/citusdata/citus.git
Squash of original cstore_fdw
commit
abc9fbe1c3
|
@ -0,0 +1,57 @@
|
|||
# =====
|
||||
# = C =
|
||||
# =====
|
||||
|
||||
# Object files
|
||||
*.o
|
||||
*.ko
|
||||
*.obj
|
||||
*.elf
|
||||
|
||||
# Libraries
|
||||
*.lib
|
||||
*.a
|
||||
|
||||
# Shared objects (inc. Windows DLLs)
|
||||
*.dll
|
||||
*.so
|
||||
*.so.*
|
||||
*.dylib
|
||||
|
||||
# Executables
|
||||
*.exe
|
||||
*.app
|
||||
*.i*86
|
||||
*.x86_64
|
||||
*.hex
|
||||
|
||||
# ========
|
||||
# = Gcov =
|
||||
# ========
|
||||
|
||||
# gcc coverage testing tool files
|
||||
|
||||
*.gcno
|
||||
*.gcda
|
||||
*.gcov
|
||||
|
||||
# ====================
|
||||
# = Project-Specific =
|
||||
# ====================
|
||||
|
||||
/data/*.cstore
|
||||
/data/*.footer
|
||||
|
||||
/sql/block_filtering.sql
|
||||
/sql/copyto.sql
|
||||
/sql/create.sql
|
||||
/sql/data_types.sql
|
||||
/sql/load.sql
|
||||
|
||||
/expected/block_filtering.out
|
||||
/expected/copyto.out
|
||||
/expected/create.out
|
||||
/expected/data_types.out
|
||||
/expected/load.out
|
||||
|
||||
*.pb-c.*
|
|
@ -0,0 +1,42 @@
|
|||
sudo: required
|
||||
dist: bionic
|
||||
language: c
|
||||
cache:
|
||||
apt: true
|
||||
directories:
|
||||
- /home/travis/postgresql
|
||||
env:
|
||||
global:
|
||||
- enable_coverage=yes
|
||||
- PG_PRELOAD=cstore_fdw
|
||||
matrix:
|
||||
- PGVERSION=9.3
|
||||
- PGVERSION=9.4
|
||||
- PGVERSION=9.5
|
||||
- PGVERSION=9.6
|
||||
- PGVERSION=10
|
||||
- PGVERSION=11
|
||||
- PGVERSION=12
|
||||
|
||||
before_install:
|
||||
- git clone -b v0.7.13 --depth 1 https://github.com/citusdata/tools.git
|
||||
- sudo make -C tools install
|
||||
- setup_apt
|
||||
- nuke_pg
|
||||
install:
|
||||
- sudo apt-get install protobuf-c-compiler
|
||||
- sudo apt-get install libprotobuf-c0-dev
|
||||
- sudo locale-gen da_DK
|
||||
- sudo locale-gen da_DK.utf8
|
||||
- sudo pip install cpp-coveralls
|
||||
- install_pg
|
||||
- install_custom_pg
|
||||
before_script:
|
||||
- chmod 777 .
|
||||
- chmod 777 data
|
||||
- chmod 666 data/*
|
||||
- config_and_start_cluster
|
||||
script: pg_travis_test
|
||||
after_success:
|
||||
- sudo chmod 666 *.gcda
|
||||
- coveralls --exclude cstore.pb-c.c --exclude cstore.pb-c.h
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,45 @@
|
|||
{
|
||||
"name": "cstore_fdw",
|
||||
"abstract": "Columnar Store for PostgreSQL",
|
||||
"description": "PostgreSQL extension which implements a Columnar Store.",
|
||||
"version": "1.7.0",
|
||||
"maintainer": "Murat Tuncer <murat.tuncer@microsoft.com>",
|
||||
"license": "apache_2_0",
|
||||
"provides": {
|
||||
"cstore_fdw": {
|
||||
"abstract": "Foreign Data Wrapper for Columnar Store Tables",
|
||||
"file": "cstore_fdw--1.7.sql",
|
||||
"docfile": "README.md",
|
||||
"version": "1.7.0"
|
||||
}
|
||||
},
|
||||
"prereqs": {
|
||||
"runtime": {
|
||||
"requires": {
|
||||
"PostgreSQL": "9.3.0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"bugtracker": {
|
||||
"web": "http://github.com/citusdata/cstore_fdw/issues/"
|
||||
},
|
||||
"repository": {
|
||||
"url": "git://github.com/citusdata/cstore_fdw.git",
|
||||
"web": "https://github.com/citusdata/cstore_fdw/",
|
||||
"type": "git"
|
||||
}
|
||||
},
|
||||
"generated_by": "Murat Tuncer",
|
||||
"meta-spec": {
|
||||
"version": "1.0.0",
|
||||
"url": "http://pgxn.org/meta/spec.txt"
|
||||
},
|
||||
"tags": [
|
||||
"orc",
|
||||
"fdw",
|
||||
"foreign data wrapper",
|
||||
"cstore_fdw",
|
||||
"columnar store"
|
||||
]
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
# cstore_fdw/Makefile
|
||||
#
|
||||
# Copyright (c) 2016 Citus Data, Inc.
|
||||
#
|
||||
|
||||
MODULE_big = cstore_fdw
|
||||
|
||||
PG_CPPFLAGS = --std=c99
|
||||
SHLIB_LINK = -lprotobuf-c
|
||||
OBJS = cstore.pb-c.o cstore_fdw.o cstore_writer.o cstore_reader.o \
|
||||
cstore_metadata_serialization.o cstore_compression.o
|
||||
|
||||
EXTENSION = cstore_fdw
|
||||
DATA = cstore_fdw--1.7.sql cstore_fdw--1.6--1.7.sql cstore_fdw--1.5--1.6.sql cstore_fdw--1.4--1.5.sql \
|
||||
cstore_fdw--1.3--1.4.sql cstore_fdw--1.2--1.3.sql cstore_fdw--1.1--1.2.sql \
|
||||
cstore_fdw--1.0--1.1.sql
|
||||
|
||||
REGRESS = create load query analyze data_types functions block_filtering drop \
|
||||
insert copyto alter truncate
|
||||
EXTRA_CLEAN = cstore.pb-c.h cstore.pb-c.c data/*.cstore data/*.cstore.footer \
|
||||
sql/block_filtering.sql sql/create.sql sql/data_types.sql sql/load.sql \
|
||||
sql/copyto.sql expected/block_filtering.out expected/create.out \
|
||||
expected/data_types.out expected/load.out expected/copyto.out
|
||||
|
||||
ifeq ($(enable_coverage),yes)
|
||||
PG_CPPFLAGS += --coverage
|
||||
SHLIB_LINK += --coverage
|
||||
EXTRA_CLEAN += *.gcno
|
||||
endif
|
||||
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
PG_CPPFLAGS += -I/usr/local/include
|
||||
endif
|
||||
|
||||
#
|
||||
# Users need to specify their Postgres installation path through pg_config. For
|
||||
# example: /usr/local/pgsql/bin/pg_config or /usr/lib/postgresql/9.3/bin/pg_config
|
||||
#
|
||||
|
||||
PG_CONFIG = pg_config
|
||||
PGXS := $(shell $(PG_CONFIG) --pgxs)
|
||||
include $(PGXS)
|
||||
|
||||
ifndef MAJORVERSION
|
||||
MAJORVERSION := $(basename $(VERSION))
|
||||
endif
|
||||
|
||||
ifeq (,$(findstring $(MAJORVERSION), 9.3 9.4 9.5 9.6 10 11 12))
|
||||
$(error PostgreSQL 9.3 to 12 is required to compile this extension)
|
||||
endif
|
||||
|
||||
cstore.pb-c.c: cstore.proto
|
||||
protoc-c --c_out=. cstore.proto
|
||||
|
||||
installcheck: remove_cstore_files
|
||||
|
||||
remove_cstore_files:
|
||||
rm -f data/*.cstore data/*.cstore.footer
|
|
@ -0,0 +1,373 @@
|
|||
cstore_fdw
|
||||
==========
|
||||
|
||||
[][status]
|
||||
[][coverage]
|
||||
|
||||
Cstore_fdw is an open source columnar store extension for PostgreSQL. Columnar stores provide notable benefits for analytics use cases where data is loaded in batches. Cstore_fdw’s columnar nature delivers performance by only reading relevant data from disk, and it may compress data 6x-10x to reduce space requirements for data archival.
|
||||
|
||||
Cstore_fdw is developed by [Citus Data](https://www.citusdata.com) and can be used in combination with [Citus](https://github.com/citusdata/citus), a postgres extension that intelligently distributes your data and queries across many nodes so your database can scale and your queries are fast. If you have any questions about how Citus can help you scale or how to use Citus in combination with cstore_fdw, [please let us know](https://www.citusdata.com/about/contact_us/).
|
||||
|
||||
Join the [Mailing List][mailing-list] to stay on top of the latest developments for Cstore_fdw.
|
||||
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
This extension uses a format for its data layout that is inspired by ORC,
|
||||
the Optimized Row Columnar format. Like ORC, the cstore format improves
|
||||
upon RCFile developed at Facebook, and brings the following benefits:
|
||||
|
||||
* Compression: Reduces in-memory and on-disk data size by 2-4x. Can be extended
|
||||
to support different codecs.
|
||||
* Column projections: Only reads column data relevant to the query. Improves
|
||||
performance for I/O bound queries.
|
||||
* Skip indexes: Stores min/max statistics for row groups, and uses them to skip
|
||||
over unrelated rows.
|
||||
|
||||
Further, we used the Postgres foreign data wrapper APIs and type representations
|
||||
with this extension. This brings:
|
||||
|
||||
* Support for 40+ Postgres data types. The user can also create new types and
|
||||
use them.
|
||||
* Statistics collection. PostgreSQL's query optimizer uses these stats to
|
||||
evaluate different query plans and pick the best one.
|
||||
* Simple setup. Create foreign table and copy data. Run SQL.
|
||||
|
||||
|
||||
Building
|
||||
--------
|
||||
|
||||
cstore\_fdw depends on protobuf-c for serializing and deserializing table metadata.
|
||||
So we need to install these packages first:
|
||||
|
||||
# Fedora 17+, CentOS, and Amazon Linux
|
||||
sudo yum install protobuf-c-devel
|
||||
|
||||
# Ubuntu 10.4+
|
||||
sudo apt-get install protobuf-c-compiler
|
||||
sudo apt-get install libprotobuf-c0-dev
|
||||
|
||||
# Ubuntu 18.4+
|
||||
sudo apt-get install protobuf-c-compiler
|
||||
sudo apt-get install libprotobuf-c-dev
|
||||
|
||||
# Mac OS X
|
||||
brew install protobuf-c
|
||||
|
||||
**Note.** In CentOS 5, 6, and 7, you may need to install or update EPEL 5, 6, or 7 repositories.
|
||||
See [this page](https://support.rackspace.com/how-to/install-epel-and-additional-repositories-on-centos-and-red-hat/)
|
||||
for instructions.
|
||||
|
||||
**Note.** In Amazon Linux, the EPEL repository is installed by default, but not
|
||||
enabled. See [these instructions](http://aws.amazon.com/amazon-linux-ami/faqs/#epel)
|
||||
for how to enable it.
|
||||
|
||||
Once you have protobuf-c installed on your machine, you are ready to build
|
||||
cstore\_fdw. For this, you need to include the pg\_config directory path in
|
||||
your make command. This path is typically the same as your PostgreSQL
|
||||
installation's bin/ directory path. For example:
|
||||
|
||||
PATH=/usr/local/pgsql/bin/:$PATH make
|
||||
sudo PATH=/usr/local/pgsql/bin/:$PATH make install
|
||||
|
||||
**Note.** cstore_fdw requires PostgreSQL version from 9.3 to 12. It doesn't
|
||||
support earlier versions of PostgreSQL.
|
||||
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Before using cstore\_fdw, you need to add it to ```shared_preload_libraries```
|
||||
in your ```postgresql.conf``` and restart Postgres:
|
||||
|
||||
shared_preload_libraries = 'cstore_fdw' # (change requires restart)
|
||||
|
||||
The following parameters can be set on a cstore foreign table object.
|
||||
|
||||
* filename (optional): The absolute path to the location for storing table data.
|
||||
If you don't specify the filename option, cstore\_fdw will automatically
|
||||
choose the $PGDATA/cstore\_fdw directory to store the files. If specified the
|
||||
value of this parameter will be used as a prefix for all files created to
|
||||
store table data. For example, the value ```/cstore_fdw/my_table``` could result in
|
||||
the files ```/cstore_fdw/my_table``` and ```/cstore_fdw/my_table.footer``` being used
|
||||
to manage table data.
|
||||
* compression (optional): The compression used for compressing value streams.
|
||||
Valid options are ```none``` and ```pglz```. The default is ```none```.
|
||||
* stripe\_row\_count (optional): Number of rows per stripe. The default is
|
||||
```150000```. Reducing this decreases the amount memory used for loading data
|
||||
and querying, but also decreases the performance.
|
||||
* block\_row\_count (optional): Number of rows per column block. The default is
|
||||
```10000```. cstore\_fdw compresses, creates skip indexes, and reads from disk
|
||||
at the block granularity. Increasing this value helps with compression and results
|
||||
in fewer reads from disk. However, higher values also reduce the probability of
|
||||
skipping over unrelated row blocks.
|
||||
|
||||
|
||||
To load or append data into a cstore table, you have two options:
|
||||
|
||||
* You can use the [```COPY``` command][copy-command] to load or append data from
|
||||
a file, a program, or STDIN.
|
||||
* You can use the ```INSERT INTO cstore_table SELECT ...``` syntax to load or
|
||||
append data from another table.
|
||||
|
||||
You can use the [```ANALYZE``` command][analyze-command] to collect statistics
|
||||
about the table. These statistics help the query planner to help determine the
|
||||
most efficient execution plan for each query.
|
||||
|
||||
**Note.** We currently don't support updating table using DELETE, and UPDATE
|
||||
commands. We also don't support single row inserts.
|
||||
|
||||
|
||||
Updating from earlier versions to 1.7
|
||||
---------------------------------------
|
||||
|
||||
To update an existing cstore_fdw installation from versions earlier than 1.6
|
||||
you can take the following steps:
|
||||
|
||||
* Download and install cstore_fdw version 1.6 using instructions from the "Building"
|
||||
section,
|
||||
* Restart the PostgreSQL server,
|
||||
* Run ```ALTER EXTENSION cstore_fdw UPDATE;```
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
As an example, we demonstrate loading and querying data to/from a column store
|
||||
table from scratch here. Let's start with downloading and decompressing the data
|
||||
files.
|
||||
|
||||
wget http://examples.citusdata.com/customer_reviews_1998.csv.gz
|
||||
wget http://examples.citusdata.com/customer_reviews_1999.csv.gz
|
||||
|
||||
gzip -d customer_reviews_1998.csv.gz
|
||||
gzip -d customer_reviews_1999.csv.gz
|
||||
|
||||
Then, let's log into Postgres, and run the following commands to create a column
|
||||
store foreign table:
|
||||
|
||||
```SQL
|
||||
-- load extension first time after install
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
|
||||
-- create server object
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
|
||||
-- create foreign table
|
||||
CREATE FOREIGN TABLE customer_reviews
|
||||
(
|
||||
customer_id TEXT,
|
||||
review_date DATE,
|
||||
review_rating INTEGER,
|
||||
review_votes INTEGER,
|
||||
review_helpful_votes INTEGER,
|
||||
product_id CHAR(10),
|
||||
product_title TEXT,
|
||||
product_sales_rank BIGINT,
|
||||
product_group TEXT,
|
||||
product_category TEXT,
|
||||
product_subcategory TEXT,
|
||||
similar_product_ids CHAR(10)[]
|
||||
)
|
||||
SERVER cstore_server
|
||||
OPTIONS(compression 'pglz');
|
||||
```
|
||||
|
||||
Next, we load data into the table:
|
||||
|
||||
```SQL
|
||||
\COPY customer_reviews FROM 'customer_reviews_1998.csv' WITH CSV;
|
||||
\COPY customer_reviews FROM 'customer_reviews_1999.csv' WITH CSV;
|
||||
```
|
||||
|
||||
**Note.** If you are getting ```ERROR: cannot copy to foreign table
|
||||
"customer_reviews"``` when trying to run the COPY commands, double check that you
|
||||
have added cstore\_fdw to ```shared_preload_libraries``` in ```postgresql.conf```
|
||||
and restarted Postgres.
|
||||
|
||||
Next, we collect data distribution statistics about the table. This is optional,
|
||||
but usually very helpful:
|
||||
|
||||
```SQL
|
||||
ANALYZE customer_reviews;
|
||||
```
|
||||
|
||||
Finally, let's run some example SQL queries on the column store table.
|
||||
|
||||
```SQL
|
||||
-- Find all reviews a particular customer made on the Dune series in 1998.
|
||||
SELECT
|
||||
customer_id, review_date, review_rating, product_id, product_title
|
||||
FROM
|
||||
customer_reviews
|
||||
WHERE
|
||||
customer_id ='A27T7HVDXA3K2A' AND
|
||||
product_title LIKE '%Dune%' AND
|
||||
review_date >= '1998-01-01' AND
|
||||
review_date <= '1998-12-31';
|
||||
|
||||
-- Do we have a correlation between a book's title's length and its review ratings?
|
||||
SELECT
|
||||
width_bucket(length(product_title), 1, 50, 5) title_length_bucket,
|
||||
round(avg(review_rating), 2) AS review_average,
|
||||
count(*)
|
||||
FROM
|
||||
customer_reviews
|
||||
WHERE
|
||||
product_group = 'Book'
|
||||
GROUP BY
|
||||
title_length_bucket
|
||||
ORDER BY
|
||||
title_length_bucket;
|
||||
```
|
||||
|
||||
|
||||
Usage with Citus
|
||||
----------------
|
||||
|
||||
The example above illustrated how to load data into a PostgreSQL database running
|
||||
on a single host. However, sometimes your data is too large to analyze effectively
|
||||
on a single host. Citus is a product built by Citus Data that allows you to run
|
||||
a distributed PostgreSQL database to analyze your data using the power of multiple
|
||||
hosts. You can easily install and run other PostgreSQL extensions and foreign data
|
||||
wrappers—including cstore_fdw—alongside Citus.
|
||||
|
||||
You can create a cstore_fdw table and distribute it using the
|
||||
```create_distributed_table()``` UDF just like any other table. You can load data
|
||||
using the ```copy``` command as you would do in single node PostgreSQL.
|
||||
|
||||
Using Skip Indexes
|
||||
------------------
|
||||
|
||||
cstore_fdw partitions each column into multiple blocks. Skip indexes store minimum
|
||||
and maximum values for each of these blocks. While scanning the table, if min/max
|
||||
values of the block contradict the WHERE clause, then the block is completely
|
||||
skipped. This way, the query processes less data and hence finishes faster.
|
||||
|
||||
To use skip indexes more efficiently, you should load the data after sorting it
|
||||
on a column that is commonly used in the WHERE clause. This ensures that there is
|
||||
a minimum overlap between blocks and the chance of them being skipped is higher.
|
||||
|
||||
In practice, the data generally has an inherent dimension (for example a time field)
|
||||
on which it is naturally sorted. Usually, the queries also have a filter clause on
|
||||
that column (for example you want to query only the last week's data), and hence you
|
||||
don't need to sort the data in such cases.
|
||||
|
||||
|
||||
Uninstalling cstore_fdw
|
||||
-----------------------
|
||||
|
||||
Before uninstalling the extension, first you need to drop all the cstore tables:
|
||||
|
||||
postgres=# DROP FOREIGN TABLE cstore_table_1;
|
||||
...
|
||||
postgres=# DROP FOREIGN TABLE cstore_table_n;
|
||||
|
||||
Then, you should drop the cstore server and extension:
|
||||
|
||||
postgres=# DROP SERVER cstore_server;
|
||||
postgres=# DROP EXTENSION cstore_fdw;
|
||||
|
||||
cstore\_fdw automatically creates some directories inside the PostgreSQL's data
|
||||
directory to store its files. To remove them, you can run:
|
||||
|
||||
$ rm -rf $PGDATA/cstore_fdw
|
||||
|
||||
Then, you should remove cstore\_fdw from ```shared_preload_libraries``` in
|
||||
your ```postgresql.conf```:
|
||||
|
||||
shared_preload_libraries = '' # (change requires restart)
|
||||
|
||||
Finally, to uninstall the extension you can run the following command in the
|
||||
extension's source code directory. This will clean up all the files copied during
|
||||
the installation:
|
||||
|
||||
$ sudo PATH=/usr/local/pgsql/bin/:$PATH make uninstall
|
||||
|
||||
|
||||
Changeset
|
||||
---------
|
||||
### Version 1.7.0
|
||||
* (Fix) Add support for PostgreSQL 12
|
||||
* (Fix) Support count(t.*) from t type queries
|
||||
* (Fix) Build failures for MacOS 10.14+
|
||||
* (Fix) Make foreign scan parallel safe
|
||||
* (Fix) Add support for PostgreSQL 11 COPY
|
||||
### Version 1.6.2
|
||||
* (Fix) Add support for PostgreSQL 11
|
||||
### Version 1.6.1
|
||||
* (Fix) Fix crash during truncate (Cstore crashing server when enabled, not used)
|
||||
* (Fix) No such file or directory warning when attempting to drop database
|
||||
### Version 1.6
|
||||
* (Feature) Added support for PostgreSQL 10.
|
||||
* (Fix) Removed table files when a schema, extension or database is dropped.
|
||||
* (Fix) Removed unused code fragments.
|
||||
* (Fix) Fixed incorrect initialization of stripe buffers.
|
||||
* (Fix) Checked user access rights when executing truncate.
|
||||
* (Fix) Made copy command cancellable.
|
||||
* (Fix) Fixed namespace issue regarding drop table.
|
||||
|
||||
### Version 1.5.1
|
||||
* (Fix) Verify cstore_fdw server on CREATE FOREIGN TABLE command
|
||||
|
||||
### Version 1.5
|
||||
* (Feature) Added support for PostgreSQL 9.6.
|
||||
* (Fix) Removed table data when cstore_fdw table is indirectly dropped.
|
||||
* (Fix) Removed unused code fragments.
|
||||
* (Fix) Fixed column selection logic to return columns used in expressions.
|
||||
* (Fix) Prevented alter table command from changinf column type to incompatible types.
|
||||
|
||||
### Version 1.4.1
|
||||
|
||||
* (Fix) Compatibility fix for Citus [copy command][copy-command].
|
||||
|
||||
### Version 1.4
|
||||
|
||||
* (Feature) Added support for ```TRUNCATE TABLE```
|
||||
* (Fix) Added support for PostgreSQL 9.5
|
||||
|
||||
### Version 1.3
|
||||
|
||||
* (Feature) Added support for ```ALTER TABLE ADD COLUMN``` and ```ALTER TABLE DROP COLUMN```.
|
||||
* (Feature) Added column list support in ```COPY FROM```.
|
||||
* (Optimization) Improve row count estimation, which results in better plans.
|
||||
* (Fix) Fix the deadlock issue during concurrent inserts.
|
||||
* (Fix) Return correct result when using whole row references.
|
||||
|
||||
### Version 1.2
|
||||
|
||||
* (Feature) Added support for ```COPY TO```.
|
||||
* (Feature) Added support for ```INSERT INTO cstore_table SELECT ...```.
|
||||
* (Optimization) Improved memory usage.
|
||||
* (Fix) Dropping multiple cstore tables in a single command cleans-up files
|
||||
of all them.
|
||||
|
||||
### Version 1.1
|
||||
|
||||
* (Feature) Make filename option optional, and use a default directory inside
|
||||
$PGDATA to manage cstore tables.
|
||||
* (Feature) Automatically delete files on DROP FOREIGN TABLE.
|
||||
* (Fix) Return empty table if no data has been loaded. Previously, cstore_fdw
|
||||
errored out.
|
||||
* (Fix) Fix overestimating relation column counts when planning.
|
||||
* (Feature) Added cstore\_table\_size(tablename) for getting the size of a cstore
|
||||
table in bytes.
|
||||
|
||||
|
||||
Copyright
|
||||
---------
|
||||
|
||||
Copyright (c) 2017 Citus Data, Inc.
|
||||
|
||||
This module is free software; you can redistribute it and/or modify it under the
|
||||
Apache v2.0 License.
|
||||
|
||||
For all types of questions and comments about the wrapper, please contact us at
|
||||
engage @ citusdata.com.
|
||||
|
||||
[status]: https://travis-ci.org/citusdata/cstore_fdw
|
||||
[mailing-list]: https://groups.google.com/forum/#!forum/cstore-users
|
||||
[coverage]: https://coveralls.io/r/citusdata/cstore_fdw
|
||||
[copy-command]: http://www.postgresql.org/docs/current/static/sql-copy.html
|
||||
[analyze-command]: http://www.postgresql.org/docs/current/static/sql-analyze.html
|
|
@ -0,0 +1,41 @@
|
|||
To see the list of features and bug-fixes planned for next releases, see our
|
||||
[development roadmap][roadmap].
|
||||
|
||||
Requested Features
|
||||
------------------
|
||||
|
||||
* Improve write performance
|
||||
* Improve read performance
|
||||
* Add checksum logic
|
||||
* Add new compression methods
|
||||
* Enable INSERT/DELETE/UPDATE
|
||||
* Enable users other than superuser to safely create columnar tables (permissions)
|
||||
* Transactional semantics
|
||||
* Add config setting to make pg\_fsync() optional
|
||||
|
||||
|
||||
Known Issues
|
||||
------------
|
||||
|
||||
* Copy command ignores NOT NULL constraints.
|
||||
* Planning functions don't take into account average column width.
|
||||
* Planning functions don't correctly take into account block skipping benefits.
|
||||
* On 32-bit platforms, when file size is outside the 32-bit signed range, EXPLAIN
|
||||
command prints incorrect file size.
|
||||
* If two different columnar tables are configured to point to the same file,
|
||||
writes to the underlying file aren't protected from each other.
|
||||
* When a data load is in progress, concurrent reads on the table overestimate the
|
||||
page count.
|
||||
* We have a minor memory leak in CStoreEndWrite. We need to also free the
|
||||
comparisonFunctionArray.
|
||||
* block\_filtering test fails on Ubuntu because the "da\_DK" locale is not enabled
|
||||
by default.
|
||||
* We don't yet incorporate the compression method's impact on disk I/O into cost
|
||||
estimates.
|
||||
* CitusDB integration errors:
|
||||
* Concurrent staging cstore\_fdw tables doesn't work.
|
||||
* Setting a default value for column with ALTER TABLE has limited support for
|
||||
existing rows.
|
||||
|
||||
[roadmap]: https://github.com/citusdata/cstore_fdw/wiki/Roadmap
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
syntax = "proto2";
|
||||
|
||||
package protobuf;
|
||||
|
||||
enum CompressionType {
|
||||
// Values should match with the corresponding struct in cstore_fdw.h
|
||||
NONE = 0;
|
||||
PG_LZ = 1;
|
||||
};
|
||||
|
||||
message ColumnBlockSkipNode {
|
||||
optional uint64 rowCount = 1;
|
||||
optional bytes minimumValue = 2;
|
||||
optional bytes maximumValue = 3;
|
||||
optional uint64 valueBlockOffset = 4;
|
||||
optional uint64 valueLength = 5;
|
||||
optional CompressionType valueCompressionType = 6;
|
||||
optional uint64 existsBlockOffset = 7;
|
||||
optional uint64 existsLength = 8;
|
||||
}
|
||||
|
||||
message ColumnBlockSkipList {
|
||||
repeated ColumnBlockSkipNode blockSkipNodeArray = 1;
|
||||
}
|
||||
|
||||
message StripeFooter {
|
||||
repeated uint64 skipListSizeArray = 1;
|
||||
repeated uint64 existsSizeArray = 2;
|
||||
repeated uint64 valueSizeArray = 3;
|
||||
}
|
||||
|
||||
message StripeMetadata {
|
||||
optional uint64 fileOffset = 1;
|
||||
optional uint64 skipListLength = 2;
|
||||
optional uint64 dataLength = 3;
|
||||
optional uint64 footerLength = 4;
|
||||
}
|
||||
|
||||
message TableFooter {
|
||||
repeated StripeMetadata stripeMetadataArray = 1;
|
||||
optional uint32 blockRowCount = 2;
|
||||
}
|
||||
|
||||
message PostScript {
|
||||
optional uint64 tableFooterLength = 1;
|
||||
optional uint64 versionMajor = 2;
|
||||
optional uint64 versionMinor = 3;
|
||||
|
||||
// Leave this last in the record
|
||||
optional string magicNumber = 8000;
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_compression.c
|
||||
*
|
||||
* This file contains compression/decompression functions definitions
|
||||
* used in cstore_fdw.
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#include "postgres.h"
|
||||
#include "cstore_fdw.h"
|
||||
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
#include "common/pg_lzcompress.h"
|
||||
#else
|
||||
#include "utils/pg_lzcompress.h"
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
/*
|
||||
* The information at the start of the compressed data. This decription is taken
|
||||
* from pg_lzcompress in pre-9.5 version of PostgreSQL.
|
||||
*/
|
||||
typedef struct CStoreCompressHeader
|
||||
{
|
||||
int32 vl_len_; /* varlena header (do not touch directly!) */
|
||||
int32 rawsize;
|
||||
} CStoreCompressHeader;
|
||||
|
||||
/*
|
||||
* Utilities for manipulation of header information for compressed data
|
||||
*/
|
||||
|
||||
#define CSTORE_COMPRESS_HDRSZ ((int32) sizeof(CStoreCompressHeader))
|
||||
#define CSTORE_COMPRESS_RAWSIZE(ptr) (((CStoreCompressHeader *) (ptr))->rawsize)
|
||||
#define CSTORE_COMPRESS_RAWDATA(ptr) (((char *) (ptr)) + CSTORE_COMPRESS_HDRSZ)
|
||||
#define CSTORE_COMPRESS_SET_RAWSIZE(ptr, len) (((CStoreCompressHeader *) (ptr))->rawsize = (len))
|
||||
|
||||
#else
|
||||
|
||||
#define CSTORE_COMPRESS_HDRSZ (0)
|
||||
#define CSTORE_COMPRESS_RAWSIZE(ptr) (PGLZ_RAW_SIZE((PGLZ_Header *) buffer->data))
|
||||
#define CSTORE_COMPRESS_RAWDATA(ptr) (((PGLZ_Header *) (ptr)))
|
||||
#define CSTORE_COMPRESS_SET_RAWSIZE(ptr, len) (((CStoreCompressHeader *) (ptr))->rawsize = (len))
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* CompressBuffer compresses the given buffer with the given compression type
|
||||
* outputBuffer enlarged to contain compressed data. The function returns true
|
||||
* if compression is done, returns false if compression is not done.
|
||||
* outputBuffer is valid only if the function returns true.
|
||||
*/
|
||||
bool
|
||||
CompressBuffer(StringInfo inputBuffer, StringInfo outputBuffer,
|
||||
CompressionType compressionType)
|
||||
{
|
||||
uint64 maximumLength = PGLZ_MAX_OUTPUT(inputBuffer->len) + CSTORE_COMPRESS_HDRSZ;
|
||||
bool compressionResult = false;
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
int32 compressedByteCount = 0;
|
||||
#endif
|
||||
|
||||
if (compressionType != COMPRESSION_PG_LZ)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
resetStringInfo(outputBuffer);
|
||||
enlargeStringInfo(outputBuffer, maximumLength);
|
||||
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
compressedByteCount = pglz_compress((const char *) inputBuffer->data,
|
||||
inputBuffer->len,
|
||||
CSTORE_COMPRESS_RAWDATA(outputBuffer->data),
|
||||
PGLZ_strategy_always);
|
||||
if (compressedByteCount >= 0)
|
||||
{
|
||||
CSTORE_COMPRESS_SET_RAWSIZE(outputBuffer->data, inputBuffer->len);
|
||||
SET_VARSIZE_COMPRESSED(outputBuffer->data,
|
||||
compressedByteCount + CSTORE_COMPRESS_HDRSZ);
|
||||
compressionResult = true;
|
||||
}
|
||||
#else
|
||||
|
||||
compressionResult = pglz_compress(inputBuffer->data, inputBuffer->len,
|
||||
CSTORE_COMPRESS_RAWDATA(outputBuffer->data),
|
||||
PGLZ_strategy_always);
|
||||
#endif
|
||||
|
||||
if (compressionResult)
|
||||
{
|
||||
outputBuffer->len = VARSIZE(outputBuffer->data);
|
||||
}
|
||||
|
||||
return compressionResult;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DecompressBuffer decompresses the given buffer with the given compression
|
||||
* type. This function returns the buffer as-is when no compression is applied.
|
||||
*/
|
||||
StringInfo
|
||||
DecompressBuffer(StringInfo buffer, CompressionType compressionType)
|
||||
{
|
||||
StringInfo decompressedBuffer = NULL;
|
||||
|
||||
Assert(compressionType == COMPRESSION_NONE || compressionType == COMPRESSION_PG_LZ);
|
||||
|
||||
if (compressionType == COMPRESSION_NONE)
|
||||
{
|
||||
/* in case of no compression, return buffer */
|
||||
decompressedBuffer = buffer;
|
||||
}
|
||||
else if (compressionType == COMPRESSION_PG_LZ)
|
||||
{
|
||||
uint32 compressedDataSize = VARSIZE(buffer->data) - CSTORE_COMPRESS_HDRSZ;
|
||||
uint32 decompressedDataSize = CSTORE_COMPRESS_RAWSIZE(buffer->data);
|
||||
char *decompressedData = NULL;
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
int32 decompressedByteCount = 0;
|
||||
#endif
|
||||
|
||||
if (compressedDataSize + CSTORE_COMPRESS_HDRSZ != buffer->len)
|
||||
{
|
||||
ereport(ERROR, (errmsg("cannot decompress the buffer"),
|
||||
errdetail("Expected %u bytes, but received %u bytes",
|
||||
compressedDataSize, buffer->len)));
|
||||
}
|
||||
|
||||
decompressedData = palloc0(decompressedDataSize);
|
||||
|
||||
#if PG_VERSION_NUM >= 90500
|
||||
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
decompressedByteCount = pglz_decompress(CSTORE_COMPRESS_RAWDATA(buffer->data),
|
||||
compressedDataSize, decompressedData,
|
||||
decompressedDataSize, true);
|
||||
#else
|
||||
decompressedByteCount = pglz_decompress(CSTORE_COMPRESS_RAWDATA(buffer->data),
|
||||
compressedDataSize, decompressedData,
|
||||
decompressedDataSize);
|
||||
#endif
|
||||
|
||||
if (decompressedByteCount < 0)
|
||||
{
|
||||
ereport(ERROR, (errmsg("cannot decompress the buffer"),
|
||||
errdetail("compressed data is corrupted")));
|
||||
}
|
||||
#else
|
||||
pglz_decompress((PGLZ_Header *) buffer->data, decompressedData);
|
||||
#endif
|
||||
|
||||
decompressedBuffer = palloc0(sizeof(StringInfoData));
|
||||
decompressedBuffer->data = decompressedData;
|
||||
decompressedBuffer->len = decompressedDataSize;
|
||||
decompressedBuffer->maxlen = decompressedDataSize;
|
||||
}
|
||||
|
||||
return decompressedBuffer;
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/* cstore_fdw/cstore_fdw--1.0--1.1.sql */
|
||||
|
||||
-- complain if script is sourced in psql, rather than via ALTER EXTENSION UPDATE
|
||||
\echo Use "ALTER EXTENSION cstore_fdw UPDATE TO '1.1'" to load this file. \quit
|
||||
|
||||
CREATE FUNCTION cstore_ddl_event_end_trigger()
|
||||
RETURNS event_trigger
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE EVENT TRIGGER cstore_ddl_event_end
|
||||
ON ddl_command_end
|
||||
EXECUTE PROCEDURE cstore_ddl_event_end_trigger();
|
||||
|
||||
CREATE FUNCTION cstore_table_size(relation regclass)
|
||||
RETURNS bigint
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
-- cstore_fdw creates directories to store files for tables with automatically
|
||||
-- determined filename during the CREATE SERVER statement. Since this feature
|
||||
-- was newly added in v1.1, servers created with v1.0 did not create them. So,
|
||||
-- we create a server with v1.1 to ensure that the required directories are
|
||||
-- created to allow users to create automatically managed tables with old servers.
|
||||
CREATE SERVER cstore_server_for_updating_1_0_to_1_1 FOREIGN DATA WRAPPER cstore_fdw;
|
||||
DROP SERVER cstore_server_for_updating_1_0_to_1_1;
|
|
@ -0,0 +1,3 @@
|
|||
/* cstore_fdw/cstore_fdw--1.1--1.2.sql */
|
||||
|
||||
-- No new functions or definitions were added in 1.2
|
|
@ -0,0 +1,3 @@
|
|||
/* cstore_fdw/cstore_fdw--1.2--1.3.sql */
|
||||
|
||||
-- No new functions or definitions were added in 1.3
|
|
@ -0,0 +1,3 @@
|
|||
/* cstore_fdw/cstore_fdw--1.3--1.4.sql */
|
||||
|
||||
-- No new functions or definitions were added in 1.4
|
|
@ -0,0 +1,28 @@
|
|||
/* cstore_fdw/cstore_fdw--1.4--1.5.sql */
|
||||
|
||||
CREATE FUNCTION cstore_clean_table_resources(oid)
|
||||
RETURNS void
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE OR REPLACE FUNCTION cstore_drop_trigger()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $csdt$
|
||||
DECLARE v_obj record;
|
||||
BEGIN
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() LOOP
|
||||
|
||||
IF v_obj.object_type NOT IN ('table', 'foreign table') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
|
||||
PERFORM cstore_clean_table_resources(v_obj.objid);
|
||||
|
||||
END LOOP;
|
||||
END;
|
||||
$csdt$;
|
||||
|
||||
CREATE EVENT TRIGGER cstore_drop_event
|
||||
ON SQL_DROP
|
||||
EXECUTE PROCEDURE cstore_drop_trigger();
|
|
@ -0,0 +1,19 @@
|
|||
/* cstore_fdw/cstore_fdw--1.5--1.6.sql */
|
||||
|
||||
CREATE OR REPLACE FUNCTION cstore_drop_trigger()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $csdt$
|
||||
DECLARE v_obj record;
|
||||
BEGIN
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() LOOP
|
||||
|
||||
IF v_obj.object_type NOT IN ('table', 'foreign table') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
|
||||
PERFORM public.cstore_clean_table_resources(v_obj.objid);
|
||||
|
||||
END LOOP;
|
||||
END;
|
||||
$csdt$;
|
|
@ -0,0 +1,3 @@
|
|||
/* cstore_fdw/cstore_fdw--1.6--1.6.sql */
|
||||
|
||||
-- No new functions or definitions were added in 1.7
|
|
@ -0,0 +1,60 @@
|
|||
/* cstore_fdw/cstore_fdw--1.7.sql */
|
||||
|
||||
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
|
||||
\echo Use "CREATE EXTENSION cstore_fdw" to load this file. \quit
|
||||
|
||||
CREATE FUNCTION cstore_fdw_handler()
|
||||
RETURNS fdw_handler
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE FUNCTION cstore_fdw_validator(text[], oid)
|
||||
RETURNS void
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE FOREIGN DATA WRAPPER cstore_fdw
|
||||
HANDLER cstore_fdw_handler
|
||||
VALIDATOR cstore_fdw_validator;
|
||||
|
||||
CREATE FUNCTION cstore_ddl_event_end_trigger()
|
||||
RETURNS event_trigger
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE EVENT TRIGGER cstore_ddl_event_end
|
||||
ON ddl_command_end
|
||||
EXECUTE PROCEDURE cstore_ddl_event_end_trigger();
|
||||
|
||||
CREATE FUNCTION cstore_table_size(relation regclass)
|
||||
RETURNS bigint
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE OR REPLACE FUNCTION cstore_clean_table_resources(oid)
|
||||
RETURNS void
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE OR REPLACE FUNCTION cstore_drop_trigger()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $csdt$
|
||||
DECLARE v_obj record;
|
||||
BEGIN
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() LOOP
|
||||
|
||||
IF v_obj.object_type NOT IN ('table', 'foreign table') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
|
||||
PERFORM public.cstore_clean_table_resources(v_obj.objid);
|
||||
|
||||
END LOOP;
|
||||
END;
|
||||
$csdt$;
|
||||
|
||||
CREATE EVENT TRIGGER cstore_drop_event
|
||||
ON SQL_DROP
|
||||
EXECUTE PROCEDURE cstore_drop_trigger();
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,5 @@
|
|||
# cstore_fdw extension
|
||||
comment = 'foreign-data wrapper for flat cstore access'
|
||||
default_version = '1.7'
|
||||
module_pathname = '$libdir/cstore_fdw'
|
||||
relocatable = true
|
|
@ -0,0 +1,353 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_fdw.h
|
||||
*
|
||||
* Type and function declarations for CStore foreign data wrapper.
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef CSTORE_FDW_H
|
||||
#define CSTORE_FDW_H
|
||||
|
||||
#include "access/tupdesc.h"
|
||||
#include "fmgr.h"
|
||||
#include "catalog/pg_am.h"
|
||||
#include "catalog/pg_foreign_server.h"
|
||||
#include "catalog/pg_foreign_table.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "utils/rel.h"
|
||||
|
||||
|
||||
/* Defines for valid option names */
|
||||
#define OPTION_NAME_FILENAME "filename"
|
||||
#define OPTION_NAME_COMPRESSION_TYPE "compression"
|
||||
#define OPTION_NAME_STRIPE_ROW_COUNT "stripe_row_count"
|
||||
#define OPTION_NAME_BLOCK_ROW_COUNT "block_row_count"
|
||||
|
||||
/* Default values for option parameters */
|
||||
#define DEFAULT_COMPRESSION_TYPE COMPRESSION_NONE
|
||||
#define DEFAULT_STRIPE_ROW_COUNT 150000
|
||||
#define DEFAULT_BLOCK_ROW_COUNT 10000
|
||||
|
||||
/* Limits for option parameters */
|
||||
#define STRIPE_ROW_COUNT_MINIMUM 1000
|
||||
#define STRIPE_ROW_COUNT_MAXIMUM 10000000
|
||||
#define BLOCK_ROW_COUNT_MINIMUM 1000
|
||||
#define BLOCK_ROW_COUNT_MAXIMUM 100000
|
||||
|
||||
/* String representations of compression types */
|
||||
#define COMPRESSION_STRING_NONE "none"
|
||||
#define COMPRESSION_STRING_PG_LZ "pglz"
|
||||
#define COMPRESSION_STRING_DELIMITED_LIST "none, pglz"
|
||||
|
||||
/* CStore file signature */
|
||||
#define CSTORE_MAGIC_NUMBER "citus_cstore"
|
||||
#define CSTORE_VERSION_MAJOR 1
|
||||
#define CSTORE_VERSION_MINOR 7
|
||||
|
||||
/* miscellaneous defines */
|
||||
#define CSTORE_FDW_NAME "cstore_fdw"
|
||||
#define CSTORE_FOOTER_FILE_SUFFIX ".footer"
|
||||
#define CSTORE_TEMP_FILE_SUFFIX ".tmp"
|
||||
#define CSTORE_TUPLE_COST_MULTIPLIER 10
|
||||
#define CSTORE_POSTSCRIPT_SIZE_LENGTH 1
|
||||
#define CSTORE_POSTSCRIPT_SIZE_MAX 256
|
||||
|
||||
/* table containing information about how to partition distributed tables */
|
||||
#define CITUS_EXTENSION_NAME "citus"
|
||||
#define CITUS_PARTITION_TABLE_NAME "pg_dist_partition"
|
||||
|
||||
/* human-readable names for addressing columns of the pg_dist_partition table */
|
||||
#define ATTR_NUM_PARTITION_RELATION_ID 1
|
||||
#define ATTR_NUM_PARTITION_TYPE 2
|
||||
#define ATTR_NUM_PARTITION_KEY 3
|
||||
|
||||
|
||||
/*
|
||||
* CStoreValidOption keeps an option name and a context. When an option is passed
|
||||
* into cstore_fdw objects (server and foreign table), we compare this option's
|
||||
* name and context against those of valid options.
|
||||
*/
|
||||
typedef struct CStoreValidOption
|
||||
{
|
||||
const char *optionName;
|
||||
Oid optionContextId;
|
||||
|
||||
} CStoreValidOption;
|
||||
|
||||
|
||||
/* Array of options that are valid for cstore_fdw */
|
||||
static const uint32 ValidOptionCount = 4;
|
||||
static const CStoreValidOption ValidOptionArray[] =
|
||||
{
|
||||
/* foreign table options */
|
||||
{ OPTION_NAME_FILENAME, ForeignTableRelationId },
|
||||
{ OPTION_NAME_COMPRESSION_TYPE, ForeignTableRelationId },
|
||||
{ OPTION_NAME_STRIPE_ROW_COUNT, ForeignTableRelationId },
|
||||
{ OPTION_NAME_BLOCK_ROW_COUNT, ForeignTableRelationId }
|
||||
};
|
||||
|
||||
|
||||
/* Enumaration for cstore file's compression method */
|
||||
typedef enum
|
||||
{
|
||||
COMPRESSION_TYPE_INVALID = -1,
|
||||
COMPRESSION_NONE = 0,
|
||||
COMPRESSION_PG_LZ = 1,
|
||||
|
||||
COMPRESSION_COUNT
|
||||
|
||||
} CompressionType;
|
||||
|
||||
|
||||
/*
|
||||
* CStoreFdwOptions holds the option values to be used when reading or writing
|
||||
* a cstore file. To resolve these values, we first check foreign table's options,
|
||||
* and if not present, we then fall back to the default values specified above.
|
||||
*/
|
||||
typedef struct CStoreFdwOptions
|
||||
{
|
||||
char *filename;
|
||||
CompressionType compressionType;
|
||||
uint64 stripeRowCount;
|
||||
uint32 blockRowCount;
|
||||
|
||||
} CStoreFdwOptions;
|
||||
|
||||
|
||||
/*
|
||||
* StripeMetadata represents information about a stripe. This information is
|
||||
* stored in the cstore file's footer.
|
||||
*/
|
||||
typedef struct StripeMetadata
|
||||
{
|
||||
uint64 fileOffset;
|
||||
uint64 skipListLength;
|
||||
uint64 dataLength;
|
||||
uint64 footerLength;
|
||||
|
||||
} StripeMetadata;
|
||||
|
||||
|
||||
/* TableFooter represents the footer of a cstore file. */
|
||||
typedef struct TableFooter
|
||||
{
|
||||
List *stripeMetadataList;
|
||||
uint64 blockRowCount;
|
||||
|
||||
} TableFooter;
|
||||
|
||||
|
||||
/* ColumnBlockSkipNode contains statistics for a ColumnBlockData. */
|
||||
typedef struct ColumnBlockSkipNode
|
||||
{
|
||||
/* statistics about values of a column block */
|
||||
bool hasMinMax;
|
||||
Datum minimumValue;
|
||||
Datum maximumValue;
|
||||
uint64 rowCount;
|
||||
|
||||
/*
|
||||
* Offsets and sizes of value and exists streams in the column data.
|
||||
* These enable us to skip reading suppressed row blocks, and start reading
|
||||
* a block without reading previous blocks.
|
||||
*/
|
||||
uint64 valueBlockOffset;
|
||||
uint64 valueLength;
|
||||
uint64 existsBlockOffset;
|
||||
uint64 existsLength;
|
||||
|
||||
CompressionType valueCompressionType;
|
||||
|
||||
} ColumnBlockSkipNode;
|
||||
|
||||
|
||||
/*
|
||||
* StripeSkipList can be used for skipping row blocks. It contains a column block
|
||||
* skip node for each block of each column. blockSkipNodeArray[column][block]
|
||||
* is the entry for the specified column block.
|
||||
*/
|
||||
typedef struct StripeSkipList
|
||||
{
|
||||
ColumnBlockSkipNode **blockSkipNodeArray;
|
||||
uint32 columnCount;
|
||||
uint32 blockCount;
|
||||
|
||||
} StripeSkipList;
|
||||
|
||||
|
||||
/*
|
||||
* ColumnBlockData represents a block of data in a column. valueArray stores
|
||||
* the values of data, and existsArray stores whether a value is present.
|
||||
* valueBuffer is used to store (uncompressed) serialized values
|
||||
* referenced by Datum's in valueArray. It is only used for by-reference Datum's.
|
||||
* There is a one-to-one correspondence between valueArray and existsArray.
|
||||
*/
|
||||
typedef struct ColumnBlockData
|
||||
{
|
||||
bool *existsArray;
|
||||
Datum *valueArray;
|
||||
|
||||
/* valueBuffer keeps actual data for type-by-reference datums from valueArray. */
|
||||
StringInfo valueBuffer;
|
||||
|
||||
} ColumnBlockData;
|
||||
|
||||
|
||||
/*
|
||||
* ColumnBlockBuffers represents a block of serialized data in a column.
|
||||
* valueBuffer stores the serialized values of data, and existsBuffer stores
|
||||
* serialized value of presence information. valueCompressionType contains
|
||||
* compression type if valueBuffer is compressed. Finally rowCount has
|
||||
* the number of rows in this block.
|
||||
*/
|
||||
typedef struct ColumnBlockBuffers
|
||||
{
|
||||
StringInfo existsBuffer;
|
||||
StringInfo valueBuffer;
|
||||
CompressionType valueCompressionType;
|
||||
|
||||
} ColumnBlockBuffers;
|
||||
|
||||
|
||||
/*
|
||||
* ColumnBuffers represents data buffers for a column in a row stripe. Each
|
||||
* column is made of multiple column blocks.
|
||||
*/
|
||||
typedef struct ColumnBuffers
|
||||
{
|
||||
ColumnBlockBuffers **blockBuffersArray;
|
||||
|
||||
} ColumnBuffers;
|
||||
|
||||
|
||||
/* StripeBuffers represents data for a row stripe in a cstore file. */
|
||||
typedef struct StripeBuffers
|
||||
{
|
||||
uint32 columnCount;
|
||||
uint32 rowCount;
|
||||
ColumnBuffers **columnBuffersArray;
|
||||
|
||||
} StripeBuffers;
|
||||
|
||||
|
||||
/*
|
||||
* StripeFooter represents a stripe's footer. In this footer, we keep three
|
||||
* arrays of sizes. The number of elements in each of the arrays is equal
|
||||
* to the number of columns.
|
||||
*/
|
||||
typedef struct StripeFooter
|
||||
{
|
||||
uint32 columnCount;
|
||||
uint64 *skipListSizeArray;
|
||||
uint64 *existsSizeArray;
|
||||
uint64 *valueSizeArray;
|
||||
|
||||
} StripeFooter;
|
||||
|
||||
|
||||
/* TableReadState represents state of a cstore file read operation. */
|
||||
typedef struct TableReadState
|
||||
{
|
||||
FILE *tableFile;
|
||||
TableFooter *tableFooter;
|
||||
TupleDesc tupleDescriptor;
|
||||
|
||||
/*
|
||||
* List of Var pointers for columns in the query. We use this both for
|
||||
* getting vector of projected columns, and also when we want to build
|
||||
* base constraint to find selected row blocks.
|
||||
*/
|
||||
List *projectedColumnList;
|
||||
|
||||
List *whereClauseList;
|
||||
MemoryContext stripeReadContext;
|
||||
StripeBuffers *stripeBuffers;
|
||||
uint32 readStripeCount;
|
||||
uint64 stripeReadRowCount;
|
||||
ColumnBlockData **blockDataArray;
|
||||
int32 deserializedBlockIndex;
|
||||
|
||||
} TableReadState;
|
||||
|
||||
|
||||
/* TableWriteState represents state of a cstore file write operation. */
|
||||
typedef struct TableWriteState
|
||||
{
|
||||
FILE *tableFile;
|
||||
TableFooter *tableFooter;
|
||||
StringInfo tableFooterFilename;
|
||||
CompressionType compressionType;
|
||||
TupleDesc tupleDescriptor;
|
||||
FmgrInfo **comparisonFunctionArray;
|
||||
uint64 currentFileOffset;
|
||||
Relation relation;
|
||||
|
||||
MemoryContext stripeWriteContext;
|
||||
StripeBuffers *stripeBuffers;
|
||||
StripeSkipList *stripeSkipList;
|
||||
uint32 stripeMaxRowCount;
|
||||
ColumnBlockData **blockDataArray;
|
||||
/*
|
||||
* compressionBuffer buffer is used as temporary storage during
|
||||
* data value compression operation. It is kept here to minimize
|
||||
* memory allocations. It lives in stripeWriteContext and gets
|
||||
* deallocated when memory context is reset.
|
||||
*/
|
||||
StringInfo compressionBuffer;
|
||||
|
||||
} TableWriteState;
|
||||
|
||||
/* Function declarations for extension loading and unloading */
|
||||
extern void _PG_init(void);
|
||||
extern void _PG_fini(void);
|
||||
|
||||
/* event trigger function declarations */
|
||||
extern Datum cstore_ddl_event_end_trigger(PG_FUNCTION_ARGS);
|
||||
|
||||
/* Function declarations for utility UDFs */
|
||||
extern Datum cstore_table_size(PG_FUNCTION_ARGS);
|
||||
extern Datum cstore_clean_table_resources(PG_FUNCTION_ARGS);
|
||||
|
||||
/* Function declarations for foreign data wrapper */
|
||||
extern Datum cstore_fdw_handler(PG_FUNCTION_ARGS);
|
||||
extern Datum cstore_fdw_validator(PG_FUNCTION_ARGS);
|
||||
|
||||
/* Function declarations for writing to a cstore file */
|
||||
extern TableWriteState * CStoreBeginWrite(const char *filename,
|
||||
CompressionType compressionType,
|
||||
uint64 stripeMaxRowCount,
|
||||
uint32 blockRowCount,
|
||||
TupleDesc tupleDescriptor);
|
||||
extern void CStoreWriteRow(TableWriteState *state, Datum *columnValues,
|
||||
bool *columnNulls);
|
||||
extern void CStoreEndWrite(TableWriteState * state);
|
||||
|
||||
/* Function declarations for reading from a cstore file */
|
||||
extern TableReadState * CStoreBeginRead(const char *filename, TupleDesc tupleDescriptor,
|
||||
List *projectedColumnList, List *qualConditions);
|
||||
extern TableFooter * CStoreReadFooter(StringInfo tableFooterFilename);
|
||||
extern bool CStoreReadFinished(TableReadState *state);
|
||||
extern bool CStoreReadNextRow(TableReadState *state, Datum *columnValues,
|
||||
bool *columnNulls);
|
||||
extern void CStoreEndRead(TableReadState *state);
|
||||
|
||||
/* Function declarations for common functions */
|
||||
extern FmgrInfo * GetFunctionInfoOrNull(Oid typeId, Oid accessMethodId,
|
||||
int16 procedureId);
|
||||
extern ColumnBlockData ** CreateEmptyBlockDataArray(uint32 columnCount, bool *columnMask,
|
||||
uint32 blockRowCount);
|
||||
extern void FreeColumnBlockDataArray(ColumnBlockData **blockDataArray,
|
||||
uint32 columnCount);
|
||||
extern uint64 CStoreTableRowCount(const char *filename);
|
||||
extern bool CompressBuffer(StringInfo inputBuffer, StringInfo outputBuffer,
|
||||
CompressionType compressionType);
|
||||
extern StringInfo DecompressBuffer(StringInfo buffer, CompressionType compressionType);
|
||||
|
||||
|
||||
#endif /* CSTORE_FDW_H */
|
|
@ -0,0 +1,581 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_metadata_serialization.c
|
||||
*
|
||||
* This file contains function definitions for serializing/deserializing cstore
|
||||
* metadata.
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
|
||||
#include "postgres.h"
|
||||
#include "cstore_fdw.h"
|
||||
#include "cstore_metadata_serialization.h"
|
||||
#include "cstore.pb-c.h"
|
||||
#include "access/tupmacs.h"
|
||||
|
||||
|
||||
/* local functions forward declarations */
|
||||
static ProtobufCBinaryData DatumToProtobufBinary(Datum datum, bool typeByValue,
|
||||
int typeLength);
|
||||
static Datum ProtobufBinaryToDatum(ProtobufCBinaryData protobufBinary,
|
||||
bool typeByValue, int typeLength);
|
||||
|
||||
|
||||
/*
|
||||
* SerializePostScript serializes the given postscript and returns the result as
|
||||
* a StringInfo.
|
||||
*/
|
||||
StringInfo
|
||||
SerializePostScript(uint64 tableFooterLength)
|
||||
{
|
||||
StringInfo postscriptBuffer = NULL;
|
||||
Protobuf__PostScript protobufPostScript = PROTOBUF__POST_SCRIPT__INIT;
|
||||
uint8 *postscriptData = NULL;
|
||||
uint32 postscriptSize = 0;
|
||||
|
||||
protobufPostScript.has_tablefooterlength = true;
|
||||
protobufPostScript.tablefooterlength = tableFooterLength;
|
||||
protobufPostScript.has_versionmajor = true;
|
||||
protobufPostScript.versionmajor = CSTORE_VERSION_MAJOR;
|
||||
protobufPostScript.has_versionminor = true;
|
||||
protobufPostScript.versionminor = CSTORE_VERSION_MINOR;
|
||||
protobufPostScript.magicnumber = pstrdup(CSTORE_MAGIC_NUMBER);
|
||||
|
||||
postscriptSize = protobuf__post_script__get_packed_size(&protobufPostScript);
|
||||
postscriptData = palloc0(postscriptSize);
|
||||
protobuf__post_script__pack(&protobufPostScript, postscriptData);
|
||||
|
||||
postscriptBuffer = palloc0(sizeof(StringInfoData));
|
||||
postscriptBuffer->len = postscriptSize;
|
||||
postscriptBuffer->maxlen = postscriptSize;
|
||||
postscriptBuffer->data = (char *) postscriptData;
|
||||
|
||||
return postscriptBuffer;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SerializeTableFooter serializes the given table footer and returns the result
|
||||
* as a StringInfo.
|
||||
*/
|
||||
StringInfo
|
||||
SerializeTableFooter(TableFooter *tableFooter)
|
||||
{
|
||||
StringInfo tableFooterBuffer = NULL;
|
||||
Protobuf__TableFooter protobufTableFooter = PROTOBUF__TABLE_FOOTER__INIT;
|
||||
Protobuf__StripeMetadata **stripeMetadataArray = NULL;
|
||||
ListCell *stripeMetadataCell = NULL;
|
||||
uint8 *tableFooterData = NULL;
|
||||
uint32 tableFooterSize = 0;
|
||||
uint32 stripeIndex = 0;
|
||||
|
||||
List *stripeMetadataList = tableFooter->stripeMetadataList;
|
||||
uint32 stripeCount = list_length(stripeMetadataList);
|
||||
stripeMetadataArray = palloc0(stripeCount * sizeof(Protobuf__StripeMetadata *));
|
||||
|
||||
foreach(stripeMetadataCell, stripeMetadataList)
|
||||
{
|
||||
StripeMetadata *stripeMetadata = lfirst(stripeMetadataCell);
|
||||
|
||||
Protobuf__StripeMetadata *protobufStripeMetadata = NULL;
|
||||
protobufStripeMetadata = palloc0(sizeof(Protobuf__StripeMetadata));
|
||||
protobuf__stripe_metadata__init(protobufStripeMetadata);
|
||||
protobufStripeMetadata->has_fileoffset = true;
|
||||
protobufStripeMetadata->fileoffset = stripeMetadata->fileOffset;
|
||||
protobufStripeMetadata->has_skiplistlength = true;
|
||||
protobufStripeMetadata->skiplistlength = stripeMetadata->skipListLength;
|
||||
protobufStripeMetadata->has_datalength = true;
|
||||
protobufStripeMetadata->datalength = stripeMetadata->dataLength;
|
||||
protobufStripeMetadata->has_footerlength = true;
|
||||
protobufStripeMetadata->footerlength = stripeMetadata->footerLength;
|
||||
|
||||
stripeMetadataArray[stripeIndex] = protobufStripeMetadata;
|
||||
stripeIndex++;
|
||||
}
|
||||
|
||||
protobufTableFooter.n_stripemetadataarray = stripeCount;
|
||||
protobufTableFooter.stripemetadataarray = stripeMetadataArray;
|
||||
protobufTableFooter.has_blockrowcount = true;
|
||||
protobufTableFooter.blockrowcount = tableFooter->blockRowCount;
|
||||
|
||||
tableFooterSize = protobuf__table_footer__get_packed_size(&protobufTableFooter);
|
||||
tableFooterData = palloc0(tableFooterSize);
|
||||
protobuf__table_footer__pack(&protobufTableFooter, tableFooterData);
|
||||
|
||||
tableFooterBuffer = palloc0(sizeof(StringInfoData));
|
||||
tableFooterBuffer->len = tableFooterSize;
|
||||
tableFooterBuffer->maxlen = tableFooterSize;
|
||||
tableFooterBuffer->data = (char *) tableFooterData;
|
||||
|
||||
return tableFooterBuffer;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SerializeStripeFooter serializes given stripe footer and returns the result
|
||||
* as a StringInfo.
|
||||
*/
|
||||
StringInfo
|
||||
SerializeStripeFooter(StripeFooter *stripeFooter)
|
||||
{
|
||||
StringInfo stripeFooterBuffer = NULL;
|
||||
Protobuf__StripeFooter protobufStripeFooter = PROTOBUF__STRIPE_FOOTER__INIT;
|
||||
uint8 *stripeFooterData = NULL;
|
||||
uint32 stripeFooterSize = 0;
|
||||
|
||||
protobufStripeFooter.n_skiplistsizearray = stripeFooter->columnCount;
|
||||
protobufStripeFooter.skiplistsizearray = (uint64_t *) stripeFooter->skipListSizeArray;
|
||||
protobufStripeFooter.n_existssizearray = stripeFooter->columnCount;
|
||||
protobufStripeFooter.existssizearray = (uint64_t *) stripeFooter->existsSizeArray;
|
||||
protobufStripeFooter.n_valuesizearray = stripeFooter->columnCount;
|
||||
protobufStripeFooter.valuesizearray = (uint64_t *) stripeFooter->valueSizeArray;
|
||||
|
||||
stripeFooterSize = protobuf__stripe_footer__get_packed_size(&protobufStripeFooter);
|
||||
stripeFooterData = palloc0(stripeFooterSize);
|
||||
protobuf__stripe_footer__pack(&protobufStripeFooter, stripeFooterData);
|
||||
|
||||
stripeFooterBuffer = palloc0(sizeof(StringInfoData));
|
||||
stripeFooterBuffer->len = stripeFooterSize;
|
||||
stripeFooterBuffer->maxlen = stripeFooterSize;
|
||||
stripeFooterBuffer->data = (char *) stripeFooterData;
|
||||
|
||||
return stripeFooterBuffer;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SerializeColumnSkipList serializes a column skip list, where the colum skip
|
||||
* list includes all block skip nodes for that column. The function then returns
|
||||
* the result as a string info.
|
||||
*/
|
||||
StringInfo
|
||||
SerializeColumnSkipList(ColumnBlockSkipNode *blockSkipNodeArray, uint32 blockCount,
|
||||
bool typeByValue, int typeLength)
|
||||
{
|
||||
StringInfo blockSkipListBuffer = NULL;
|
||||
Protobuf__ColumnBlockSkipList protobufBlockSkipList =
|
||||
PROTOBUF__COLUMN_BLOCK_SKIP_LIST__INIT;
|
||||
Protobuf__ColumnBlockSkipNode **protobufBlockSkipNodeArray = NULL;
|
||||
uint32 blockIndex = 0;
|
||||
uint8 *blockSkipListData = NULL;
|
||||
uint32 blockSkipListSize = 0;
|
||||
|
||||
protobufBlockSkipNodeArray = palloc0(blockCount *
|
||||
sizeof(Protobuf__ColumnBlockSkipNode *));
|
||||
for (blockIndex = 0; blockIndex < blockCount; blockIndex++)
|
||||
{
|
||||
ColumnBlockSkipNode blockSkipNode = blockSkipNodeArray[blockIndex];
|
||||
Protobuf__ColumnBlockSkipNode *protobufBlockSkipNode = NULL;
|
||||
ProtobufCBinaryData binaryMinimumValue = {0, 0};
|
||||
ProtobufCBinaryData binaryMaximumValue = {0, 0};
|
||||
|
||||
if (blockSkipNode.hasMinMax)
|
||||
{
|
||||
binaryMinimumValue = DatumToProtobufBinary(blockSkipNode.minimumValue,
|
||||
typeByValue, typeLength);
|
||||
binaryMaximumValue = DatumToProtobufBinary(blockSkipNode.maximumValue,
|
||||
typeByValue, typeLength);
|
||||
}
|
||||
|
||||
protobufBlockSkipNode = palloc0(sizeof(Protobuf__ColumnBlockSkipNode));
|
||||
protobuf__column_block_skip_node__init(protobufBlockSkipNode);
|
||||
protobufBlockSkipNode->has_rowcount = true;
|
||||
protobufBlockSkipNode->rowcount = blockSkipNode.rowCount;
|
||||
protobufBlockSkipNode->has_minimumvalue = blockSkipNode.hasMinMax;
|
||||
protobufBlockSkipNode->minimumvalue = binaryMinimumValue;
|
||||
protobufBlockSkipNode->has_maximumvalue = blockSkipNode.hasMinMax;
|
||||
protobufBlockSkipNode->maximumvalue = binaryMaximumValue;
|
||||
protobufBlockSkipNode->has_valueblockoffset = true;
|
||||
protobufBlockSkipNode->valueblockoffset = blockSkipNode.valueBlockOffset;
|
||||
protobufBlockSkipNode->has_valuelength = true;
|
||||
protobufBlockSkipNode->valuelength = blockSkipNode.valueLength;
|
||||
protobufBlockSkipNode->has_existsblockoffset = true;
|
||||
protobufBlockSkipNode->existsblockoffset = blockSkipNode.existsBlockOffset;
|
||||
protobufBlockSkipNode->has_existslength = true;
|
||||
protobufBlockSkipNode->existslength = blockSkipNode.existsLength;
|
||||
protobufBlockSkipNode->has_valuecompressiontype = true;
|
||||
protobufBlockSkipNode->valuecompressiontype =
|
||||
(Protobuf__CompressionType) blockSkipNode.valueCompressionType;
|
||||
|
||||
protobufBlockSkipNodeArray[blockIndex] = protobufBlockSkipNode;
|
||||
}
|
||||
|
||||
protobufBlockSkipList.n_blockskipnodearray = blockCount;
|
||||
protobufBlockSkipList.blockskipnodearray = protobufBlockSkipNodeArray;
|
||||
|
||||
blockSkipListSize =
|
||||
protobuf__column_block_skip_list__get_packed_size(&protobufBlockSkipList);
|
||||
blockSkipListData = palloc0(blockSkipListSize);
|
||||
protobuf__column_block_skip_list__pack(&protobufBlockSkipList, blockSkipListData);
|
||||
|
||||
blockSkipListBuffer = palloc0(sizeof(StringInfoData));
|
||||
blockSkipListBuffer->len = blockSkipListSize;
|
||||
blockSkipListBuffer->maxlen = blockSkipListSize;
|
||||
blockSkipListBuffer->data = (char *) blockSkipListData;
|
||||
|
||||
return blockSkipListBuffer;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeserializePostScript deserializes the given postscript buffer and returns
|
||||
* the size of table footer in tableFooterLength pointer.
|
||||
*/
|
||||
void
|
||||
DeserializePostScript(StringInfo buffer, uint64 *tableFooterLength)
|
||||
{
|
||||
Protobuf__PostScript *protobufPostScript = NULL;
|
||||
protobufPostScript = protobuf__post_script__unpack(NULL, buffer->len,
|
||||
(uint8 *) buffer->data);
|
||||
if (protobufPostScript == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("invalid postscript buffer")));
|
||||
}
|
||||
|
||||
if (protobufPostScript->versionmajor != CSTORE_VERSION_MAJOR ||
|
||||
protobufPostScript->versionminor > CSTORE_VERSION_MINOR)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("invalid column store version number")));
|
||||
}
|
||||
else if (strncmp(protobufPostScript->magicnumber, CSTORE_MAGIC_NUMBER,
|
||||
NAMEDATALEN) != 0)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("invalid magic number")));
|
||||
}
|
||||
|
||||
(*tableFooterLength) = protobufPostScript->tablefooterlength;
|
||||
|
||||
protobuf__post_script__free_unpacked(protobufPostScript, NULL);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeserializeTableFooter deserializes the given buffer and returns the result as
|
||||
* a TableFooter struct.
|
||||
*/
|
||||
TableFooter *
|
||||
DeserializeTableFooter(StringInfo buffer)
|
||||
{
|
||||
TableFooter *tableFooter = NULL;
|
||||
Protobuf__TableFooter *protobufTableFooter = NULL;
|
||||
List *stripeMetadataList = NIL;
|
||||
uint64 blockRowCount = 0;
|
||||
uint32 stripeCount = 0;
|
||||
uint32 stripeIndex = 0;
|
||||
|
||||
protobufTableFooter = protobuf__table_footer__unpack(NULL, buffer->len,
|
||||
(uint8 *) buffer->data);
|
||||
if (protobufTableFooter == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("invalid table footer buffer")));
|
||||
}
|
||||
|
||||
if (!protobufTableFooter->has_blockrowcount)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("missing required table footer metadata fields")));
|
||||
}
|
||||
else if (protobufTableFooter->blockrowcount < BLOCK_ROW_COUNT_MINIMUM ||
|
||||
protobufTableFooter->blockrowcount > BLOCK_ROW_COUNT_MAXIMUM)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("invalid block row count")));
|
||||
}
|
||||
blockRowCount = protobufTableFooter->blockrowcount;
|
||||
|
||||
stripeCount = protobufTableFooter->n_stripemetadataarray;
|
||||
for (stripeIndex = 0; stripeIndex < stripeCount; stripeIndex++)
|
||||
{
|
||||
StripeMetadata *stripeMetadata = NULL;
|
||||
Protobuf__StripeMetadata *protobufStripeMetadata = NULL;
|
||||
|
||||
protobufStripeMetadata = protobufTableFooter->stripemetadataarray[stripeIndex];
|
||||
if (!protobufStripeMetadata->has_fileoffset ||
|
||||
!protobufStripeMetadata->has_skiplistlength ||
|
||||
!protobufStripeMetadata->has_datalength ||
|
||||
!protobufStripeMetadata->has_footerlength)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("missing required stripe metadata fields")));
|
||||
}
|
||||
|
||||
stripeMetadata = palloc0(sizeof(StripeMetadata));
|
||||
stripeMetadata->fileOffset = protobufStripeMetadata->fileoffset;
|
||||
stripeMetadata->skipListLength = protobufStripeMetadata->skiplistlength;
|
||||
stripeMetadata->dataLength = protobufStripeMetadata->datalength;
|
||||
stripeMetadata->footerLength = protobufStripeMetadata->footerlength;
|
||||
|
||||
stripeMetadataList = lappend(stripeMetadataList, stripeMetadata);
|
||||
}
|
||||
|
||||
protobuf__table_footer__free_unpacked(protobufTableFooter, NULL);
|
||||
|
||||
tableFooter = palloc0(sizeof(TableFooter));
|
||||
tableFooter->stripeMetadataList = stripeMetadataList;
|
||||
tableFooter->blockRowCount = blockRowCount;
|
||||
|
||||
return tableFooter;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeserializeStripeFooter deserializes the given buffer and returns the result
|
||||
* as a StripeFooter struct.
|
||||
*/
|
||||
StripeFooter *
|
||||
DeserializeStripeFooter(StringInfo buffer)
|
||||
{
|
||||
StripeFooter *stripeFooter = NULL;
|
||||
Protobuf__StripeFooter *protobufStripeFooter = NULL;
|
||||
uint64 *skipListSizeArray = NULL;
|
||||
uint64 *existsSizeArray = NULL;
|
||||
uint64 *valueSizeArray = NULL;
|
||||
uint64 sizeArrayLength = 0;
|
||||
uint32 columnCount = 0;
|
||||
|
||||
protobufStripeFooter = protobuf__stripe_footer__unpack(NULL, buffer->len,
|
||||
(uint8 *) buffer->data);
|
||||
if (protobufStripeFooter == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("invalid stripe footer buffer")));
|
||||
}
|
||||
|
||||
columnCount = protobufStripeFooter->n_skiplistsizearray;
|
||||
if (protobufStripeFooter->n_existssizearray != columnCount ||
|
||||
protobufStripeFooter->n_valuesizearray != columnCount)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("stripe size array lengths don't match")));
|
||||
}
|
||||
|
||||
sizeArrayLength = columnCount * sizeof(uint64);
|
||||
|
||||
skipListSizeArray = palloc0(sizeArrayLength);
|
||||
existsSizeArray = palloc0(sizeArrayLength);
|
||||
valueSizeArray = palloc0(sizeArrayLength);
|
||||
|
||||
memcpy(skipListSizeArray, protobufStripeFooter->skiplistsizearray, sizeArrayLength);
|
||||
memcpy(existsSizeArray, protobufStripeFooter->existssizearray, sizeArrayLength);
|
||||
memcpy(valueSizeArray, protobufStripeFooter->valuesizearray, sizeArrayLength);
|
||||
|
||||
protobuf__stripe_footer__free_unpacked(protobufStripeFooter, NULL);
|
||||
|
||||
stripeFooter = palloc0(sizeof(StripeFooter));
|
||||
stripeFooter->skipListSizeArray = skipListSizeArray;
|
||||
stripeFooter->existsSizeArray = existsSizeArray;
|
||||
stripeFooter->valueSizeArray = valueSizeArray;
|
||||
stripeFooter->columnCount = columnCount;
|
||||
|
||||
return stripeFooter;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeserializeBlockCount deserializes the given column skip list buffer and
|
||||
* returns the number of blocks in column skip list.
|
||||
*/
|
||||
uint32
|
||||
DeserializeBlockCount(StringInfo buffer)
|
||||
{
|
||||
uint32 blockCount = 0;
|
||||
Protobuf__ColumnBlockSkipList *protobufBlockSkipList = NULL;
|
||||
|
||||
protobufBlockSkipList =
|
||||
protobuf__column_block_skip_list__unpack(NULL, buffer->len,
|
||||
(uint8 *) buffer->data);
|
||||
if (protobufBlockSkipList == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("invalid skip list buffer")));
|
||||
}
|
||||
|
||||
blockCount = protobufBlockSkipList->n_blockskipnodearray;
|
||||
|
||||
protobuf__column_block_skip_list__free_unpacked(protobufBlockSkipList, NULL);
|
||||
|
||||
return blockCount;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeserializeRowCount deserializes the given column skip list buffer and
|
||||
* returns the total number of rows in block skip list.
|
||||
*/
|
||||
uint32
|
||||
DeserializeRowCount(StringInfo buffer)
|
||||
{
|
||||
uint32 rowCount = 0;
|
||||
Protobuf__ColumnBlockSkipList *protobufBlockSkipList = NULL;
|
||||
uint32 blockIndex = 0;
|
||||
uint32 blockCount = 0;
|
||||
|
||||
protobufBlockSkipList =
|
||||
protobuf__column_block_skip_list__unpack(NULL, buffer->len,
|
||||
(uint8 *) buffer->data);
|
||||
if (protobufBlockSkipList == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("invalid skip list buffer")));
|
||||
}
|
||||
|
||||
blockCount = (uint32) protobufBlockSkipList->n_blockskipnodearray;
|
||||
for (blockIndex = 0; blockIndex < blockCount; blockIndex++)
|
||||
{
|
||||
Protobuf__ColumnBlockSkipNode *protobufBlockSkipNode =
|
||||
protobufBlockSkipList->blockskipnodearray[blockIndex];
|
||||
rowCount += protobufBlockSkipNode->rowcount;
|
||||
}
|
||||
|
||||
protobuf__column_block_skip_list__free_unpacked(protobufBlockSkipList, NULL);
|
||||
|
||||
return rowCount;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeserializeColumnSkipList deserializes the given buffer and returns the result as
|
||||
* a ColumnBlockSkipNode array. If the number of unpacked block skip nodes are not
|
||||
* equal to the given block count function errors out.
|
||||
*/
|
||||
ColumnBlockSkipNode *
|
||||
DeserializeColumnSkipList(StringInfo buffer, bool typeByValue, int typeLength,
|
||||
uint32 blockCount)
|
||||
{
|
||||
ColumnBlockSkipNode *blockSkipNodeArray = NULL;
|
||||
uint32 blockIndex = 0;
|
||||
Protobuf__ColumnBlockSkipList *protobufBlockSkipList = NULL;
|
||||
|
||||
protobufBlockSkipList =
|
||||
protobuf__column_block_skip_list__unpack(NULL, buffer->len,
|
||||
(uint8 *) buffer->data);
|
||||
if (protobufBlockSkipList == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("invalid skip list buffer")));
|
||||
}
|
||||
|
||||
if (protobufBlockSkipList->n_blockskipnodearray != blockCount)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("block skip node count and block count don't match")));
|
||||
}
|
||||
|
||||
blockSkipNodeArray = palloc0(blockCount * sizeof(ColumnBlockSkipNode));
|
||||
|
||||
for (blockIndex = 0; blockIndex < blockCount; blockIndex++)
|
||||
{
|
||||
Protobuf__ColumnBlockSkipNode *protobufBlockSkipNode = NULL;
|
||||
ColumnBlockSkipNode *blockSkipNode = NULL;
|
||||
bool hasMinMax = false;
|
||||
Datum minimumValue = 0;
|
||||
Datum maximumValue = 0;
|
||||
|
||||
protobufBlockSkipNode = protobufBlockSkipList->blockskipnodearray[blockIndex];
|
||||
if (!protobufBlockSkipNode->has_rowcount ||
|
||||
!protobufBlockSkipNode->has_existsblockoffset ||
|
||||
!protobufBlockSkipNode->has_valueblockoffset ||
|
||||
!protobufBlockSkipNode->has_existslength ||
|
||||
!protobufBlockSkipNode->has_valuelength ||
|
||||
!protobufBlockSkipNode->has_valuecompressiontype)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("missing required block skip node metadata")));
|
||||
}
|
||||
|
||||
if (protobufBlockSkipNode->has_minimumvalue !=
|
||||
protobufBlockSkipNode->has_maximumvalue)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not unpack column store"),
|
||||
errdetail("has minimum and has maximum fields "
|
||||
"don't match")));
|
||||
}
|
||||
|
||||
hasMinMax = protobufBlockSkipNode->has_minimumvalue;
|
||||
if (hasMinMax)
|
||||
{
|
||||
minimumValue = ProtobufBinaryToDatum(protobufBlockSkipNode->minimumvalue,
|
||||
typeByValue, typeLength);
|
||||
maximumValue = ProtobufBinaryToDatum(protobufBlockSkipNode->maximumvalue,
|
||||
typeByValue, typeLength);
|
||||
}
|
||||
|
||||
blockSkipNode = &blockSkipNodeArray[blockIndex];
|
||||
blockSkipNode->rowCount = protobufBlockSkipNode->rowcount;
|
||||
blockSkipNode->hasMinMax = hasMinMax;
|
||||
blockSkipNode->minimumValue = minimumValue;
|
||||
blockSkipNode->maximumValue = maximumValue;
|
||||
blockSkipNode->existsBlockOffset = protobufBlockSkipNode->existsblockoffset;
|
||||
blockSkipNode->valueBlockOffset = protobufBlockSkipNode->valueblockoffset;
|
||||
blockSkipNode->existsLength = protobufBlockSkipNode->existslength;
|
||||
blockSkipNode->valueLength = protobufBlockSkipNode->valuelength;
|
||||
blockSkipNode->valueCompressionType =
|
||||
(CompressionType) protobufBlockSkipNode->valuecompressiontype;
|
||||
}
|
||||
|
||||
protobuf__column_block_skip_list__free_unpacked(protobufBlockSkipList, NULL);
|
||||
|
||||
return blockSkipNodeArray;
|
||||
}
|
||||
|
||||
|
||||
/* Converts a datum to a ProtobufCBinaryData. */
|
||||
static ProtobufCBinaryData
|
||||
DatumToProtobufBinary(Datum datum, bool datumTypeByValue, int datumTypeLength)
|
||||
{
|
||||
ProtobufCBinaryData protobufBinary = {0, 0};
|
||||
|
||||
int datumLength = att_addlength_datum(0, datumTypeLength, datum);
|
||||
char *datumBuffer = palloc0(datumLength);
|
||||
|
||||
if (datumTypeLength > 0)
|
||||
{
|
||||
if (datumTypeByValue)
|
||||
{
|
||||
store_att_byval(datumBuffer, datum, datumTypeLength);
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(datumBuffer, DatumGetPointer(datum), datumTypeLength);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(datumBuffer, DatumGetPointer(datum), datumLength);
|
||||
}
|
||||
|
||||
protobufBinary.data = (uint8 *) datumBuffer;
|
||||
protobufBinary.len = datumLength;
|
||||
|
||||
return protobufBinary;
|
||||
}
|
||||
|
||||
|
||||
/* Converts the given ProtobufCBinaryData to a Datum. */
|
||||
static Datum
|
||||
ProtobufBinaryToDatum(ProtobufCBinaryData protobufBinary, bool datumTypeByValue,
|
||||
int datumTypeLength)
|
||||
{
|
||||
Datum datum = 0;
|
||||
|
||||
/*
|
||||
* We copy the protobuf data so the result of this function lives even
|
||||
* after the unpacked protobuf struct is freed.
|
||||
*/
|
||||
char *binaryDataCopy = palloc0(protobufBinary.len);
|
||||
memcpy(binaryDataCopy, protobufBinary.data, protobufBinary.len);
|
||||
|
||||
datum = fetch_att(binaryDataCopy, datumTypeByValue, datumTypeLength);
|
||||
|
||||
return datum;
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_metadata_serialization.h
|
||||
*
|
||||
* Type and function declarations to serialize/deserialize cstore metadata.
|
||||
*
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef CSTORE_SERIALIZATION_H
|
||||
#define CSTORE_SERIALIZATION_H
|
||||
|
||||
#include "catalog/pg_attribute.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "cstore_fdw.h"
|
||||
|
||||
|
||||
/* Function declarations for metadata serialization */
|
||||
extern StringInfo SerializePostScript(uint64 tableFooterLength);
|
||||
extern StringInfo SerializeTableFooter(TableFooter *tableFooter);
|
||||
extern StringInfo SerializeStripeFooter(StripeFooter *stripeFooter);
|
||||
extern StringInfo SerializeColumnSkipList(ColumnBlockSkipNode *blockSkipNodeArray,
|
||||
uint32 blockCount, bool typeByValue,
|
||||
int typeLength);
|
||||
|
||||
/* Function declarations for metadata deserialization */
|
||||
extern void DeserializePostScript(StringInfo buffer, uint64 *tableFooterLength);
|
||||
extern TableFooter * DeserializeTableFooter(StringInfo buffer);
|
||||
extern uint32 DeserializeBlockCount(StringInfo buffer);
|
||||
extern uint32 DeserializeRowCount(StringInfo buffer);
|
||||
extern StripeFooter * DeserializeStripeFooter(StringInfo buffer);
|
||||
extern ColumnBlockSkipNode * DeserializeColumnSkipList(StringInfo buffer,
|
||||
bool typeByValue, int typeLength,
|
||||
uint32 blockCount);
|
||||
|
||||
|
||||
#endif /* CSTORE_SERIALIZATION_H */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,58 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cstore_version_compat.h
|
||||
*
|
||||
* Compatibility macros for writing code agnostic to PostgreSQL versions
|
||||
*
|
||||
* Copyright (c) 2018, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef CSTORE_COMPAT_H
|
||||
#define CSTORE_COMPAT_H
|
||||
|
||||
#if PG_VERSION_NUM < 100000
|
||||
|
||||
/* Accessor for the i'th attribute of tupdesc. */
|
||||
#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)])
|
||||
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM < 110000
|
||||
#define ALLOCSET_DEFAULT_SIZES ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE
|
||||
#define ACLCHECK_OBJECT_TABLE ACL_KIND_CLASS
|
||||
#else
|
||||
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
|
||||
|
||||
#define ExplainPropertyLong(qlabel, value, es) \
|
||||
ExplainPropertyInteger(qlabel, NULL, value, es)
|
||||
#endif
|
||||
|
||||
#define PREVIOUS_UTILITY (PreviousProcessUtilityHook != NULL \
|
||||
? PreviousProcessUtilityHook : standard_ProcessUtility)
|
||||
#if PG_VERSION_NUM >= 100000
|
||||
#define CALL_PREVIOUS_UTILITY(parseTree, queryString, context, paramListInfo, \
|
||||
destReceiver, completionTag) \
|
||||
PREVIOUS_UTILITY(plannedStatement, queryString, context, paramListInfo, \
|
||||
queryEnvironment, destReceiver, completionTag)
|
||||
#else
|
||||
#define CALL_PREVIOUS_UTILITY(parseTree, queryString, context, paramListInfo, \
|
||||
destReceiver, completionTag) \
|
||||
PREVIOUS_UTILITY(parseTree, queryString, context, paramListInfo, destReceiver, \
|
||||
completionTag)
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM < 120000
|
||||
#define TTS_EMPTY(slot) ((slot)->tts_isempty)
|
||||
#define ExecForceStoreHeapTuple(tuple, slot, shouldFree) \
|
||||
ExecStoreTuple(newTuple, tupleSlot, InvalidBuffer, shouldFree);
|
||||
#define HeapScanDesc TableScanDesc
|
||||
#define table_beginscan heap_beginscan
|
||||
#define table_endscan heap_endscan
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* CSTORE_COMPAT_H */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,3 @@
|
|||
"{1,2,3}","{1,2,3}","{a,b,c}"
|
||||
{},{},{}
|
||||
"{-2147483648,2147483647}","{-9223372036854775808,9223372036854775807}","{""""}"
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,5 @@
|
|||
a,1990-01-10,2090,97.1,XA ,{a}
|
||||
b,1990-11-01,2203,98.1,XA ,"{a,b}"
|
||||
c,1988-11-01,2907,99.4,XB ,"{w,y}"
|
||||
d,1985-05-05,2314,98.3,XB ,{}
|
||||
e,1995-05-05,2236,98.2,XC ,{a}
|
|
|
@ -0,0 +1,3 @@
|
|||
f,1983-04-02,3090,99.6,XD ,"{a,b,c,y}"
|
||||
g,1991-12-13,1803,85.1,XD ,"{a,c}"
|
||||
h,1987-10-26,2112,95.4,XD ,"{w,a}"
|
|
|
@ -0,0 +1,2 @@
|
|||
2000-01-02 04:05:06,1999-01-08 14:05:06+02,2000-01-02,04:05:06,04:00:00
|
||||
1970-01-01 00:00:00,infinity,-infinity,00:00:00,00:00:00
|
|
|
@ -0,0 +1,2 @@
|
|||
a,"(2,b)"
|
||||
b,"(3,c)"
|
|
|
@ -0,0 +1,2 @@
|
|||
,{NULL},"(,)"
|
||||
,,
|
|
|
@ -0,0 +1,2 @@
|
|||
f,\xdeadbeef,$1.00,192.168.1.2,10101,a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11,"{""key"": ""value""}"
|
||||
t,\xcdb0,$1.50,127.0.0.1,"",a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11,[]
|
|
|
@ -0,0 +1,2 @@
|
|||
"[1,3)","[1,3)","[1,3)","[""2000-01-02 00:30:00"",""2010-02-03 12:30:00"")"
|
||||
empty,"[1,)","(,)",empty
|
|
|
@ -0,0 +1,178 @@
|
|||
--
|
||||
-- Testing ALTER TABLE on cstore_fdw tables.
|
||||
--
|
||||
CREATE FOREIGN TABLE test_alter_table (a int, b int, c int) SERVER cstore_server;
|
||||
WITH sample_data AS (VALUES
|
||||
(1, 2, 3),
|
||||
(4, 5, 6),
|
||||
(7, 8, 9)
|
||||
)
|
||||
INSERT INTO test_alter_table SELECT * FROM sample_data;
|
||||
-- drop a column
|
||||
ALTER FOREIGN TABLE test_alter_table DROP COLUMN a;
|
||||
-- test analyze
|
||||
ANALYZE test_alter_table;
|
||||
-- verify select queries run as expected
|
||||
SELECT * FROM test_alter_table;
|
||||
b | c
|
||||
---+---
|
||||
2 | 3
|
||||
5 | 6
|
||||
8 | 9
|
||||
(3 rows)
|
||||
|
||||
SELECT a FROM test_alter_table;
|
||||
ERROR: column "a" does not exist
|
||||
LINE 1: SELECT a FROM test_alter_table;
|
||||
^
|
||||
SELECT b FROM test_alter_table;
|
||||
b
|
||||
---
|
||||
2
|
||||
5
|
||||
8
|
||||
(3 rows)
|
||||
|
||||
-- verify insert runs as expected
|
||||
INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
ERROR: INSERT has more expressions than target columns
|
||||
LINE 1: INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
^
|
||||
INSERT INTO test_alter_table (SELECT 5, 8);
|
||||
-- add a column with no defaults
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN d int;
|
||||
SELECT * FROM test_alter_table;
|
||||
b | c | d
|
||||
---+---+---
|
||||
2 | 3 |
|
||||
5 | 6 |
|
||||
8 | 9 |
|
||||
5 | 8 |
|
||||
(4 rows)
|
||||
|
||||
INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
SELECT * FROM test_alter_table;
|
||||
b | c | d
|
||||
---+---+---
|
||||
2 | 3 |
|
||||
5 | 6 |
|
||||
8 | 9 |
|
||||
5 | 8 |
|
||||
3 | 5 | 8
|
||||
(5 rows)
|
||||
|
||||
-- add a fixed-length column with default value
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN e int default 3;
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e
|
||||
---+---+---+---
|
||||
2 | 3 | | 3
|
||||
5 | 6 | | 3
|
||||
8 | 9 | | 3
|
||||
5 | 8 | | 3
|
||||
3 | 5 | 8 | 3
|
||||
(5 rows)
|
||||
|
||||
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8);
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e
|
||||
---+---+---+---
|
||||
2 | 3 | | 3
|
||||
5 | 6 | | 3
|
||||
8 | 9 | | 3
|
||||
5 | 8 | | 3
|
||||
3 | 5 | 8 | 3
|
||||
1 | 2 | 4 | 8
|
||||
(6 rows)
|
||||
|
||||
-- add a variable-length column with default value
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN f text DEFAULT 'TEXT ME';
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e | f
|
||||
---+---+---+---+---------
|
||||
2 | 3 | | 3 | TEXT ME
|
||||
5 | 6 | | 3 | TEXT ME
|
||||
8 | 9 | | 3 | TEXT ME
|
||||
5 | 8 | | 3 | TEXT ME
|
||||
3 | 5 | 8 | 3 | TEXT ME
|
||||
1 | 2 | 4 | 8 | TEXT ME
|
||||
(6 rows)
|
||||
|
||||
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8, 'ABCDEF');
|
||||
SELECT * from test_alter_table;
|
||||
b | c | d | e | f
|
||||
---+---+---+---+---------
|
||||
2 | 3 | | 3 | TEXT ME
|
||||
5 | 6 | | 3 | TEXT ME
|
||||
8 | 9 | | 3 | TEXT ME
|
||||
5 | 8 | | 3 | TEXT ME
|
||||
3 | 5 | 8 | 3 | TEXT ME
|
||||
1 | 2 | 4 | 8 | TEXT ME
|
||||
1 | 2 | 4 | 8 | ABCDEF
|
||||
(7 rows)
|
||||
|
||||
-- drop couple of columns
|
||||
ALTER FOREIGN TABLE test_alter_table DROP COLUMN c;
|
||||
ALTER FOREIGN TABLE test_alter_table DROP COLUMN e;
|
||||
ANALYZE test_alter_table;
|
||||
SELECT * from test_alter_table;
|
||||
b | d | f
|
||||
---+---+---------
|
||||
2 | | TEXT ME
|
||||
5 | | TEXT ME
|
||||
8 | | TEXT ME
|
||||
5 | | TEXT ME
|
||||
3 | 8 | TEXT ME
|
||||
1 | 4 | TEXT ME
|
||||
1 | 4 | ABCDEF
|
||||
(7 rows)
|
||||
|
||||
SELECT count(*) from test_alter_table;
|
||||
count
|
||||
-------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
SELECT count(t.*) from test_alter_table t;
|
||||
count
|
||||
-------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
-- unsupported default values
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN g boolean DEFAULT isfinite(current_date);
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN h DATE DEFAULT current_date;
|
||||
SELECT * FROM test_alter_table;
|
||||
ERROR: unsupported default value for column "g"
|
||||
HINT: Expression is either mutable or does not evaluate to constant value
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN g DROP DEFAULT;
|
||||
SELECT * FROM test_alter_table;
|
||||
ERROR: unsupported default value for column "h"
|
||||
HINT: Expression is either mutable or does not evaluate to constant value
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN h DROP DEFAULT;
|
||||
ANALYZE test_alter_table;
|
||||
SELECT * FROM test_alter_table;
|
||||
b | d | f | g | h
|
||||
---+---+---------+---+---
|
||||
2 | | TEXT ME | |
|
||||
5 | | TEXT ME | |
|
||||
8 | | TEXT ME | |
|
||||
5 | | TEXT ME | |
|
||||
3 | 8 | TEXT ME | |
|
||||
1 | 4 | TEXT ME | |
|
||||
1 | 4 | ABCDEF | |
|
||||
(7 rows)
|
||||
|
||||
-- unsupported type change
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN i int;
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN j float;
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN k text;
|
||||
-- this is valid type change
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN i TYPE float;
|
||||
-- this is not valid
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN j TYPE int;
|
||||
ERROR: Column j cannot be cast automatically to type pg_catalog.int4
|
||||
-- text / varchar conversion is valid both ways
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN k TYPE varchar(20);
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN k TYPE text;
|
||||
DROP FOREIGN TABLE test_alter_table;
|
|
@ -0,0 +1,19 @@
|
|||
--
|
||||
-- Test the ANALYZE command for cstore_fdw tables.
|
||||
--
|
||||
-- ANALYZE uncompressed table
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM pg_stats WHERE tablename='contestant';
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
-- ANALYZE compressed table
|
||||
ANALYZE contestant_compressed;
|
||||
SELECT count(*) FROM pg_stats WHERE tablename='contestant_compressed';
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
--
|
||||
-- Tests the different DROP commands for cstore_fdw tables.
|
||||
--
|
||||
-- DROP FOREIGN TABL
|
||||
-- DROP SCHEMA
|
||||
-- DROP EXTENSION
|
||||
-- DROP DATABASE
|
||||
--
|
||||
-- Note that travis does not create
|
||||
-- cstore_fdw extension in default database (postgres). This has caused
|
||||
-- different behavior between travis tests and local tests. Thus
|
||||
-- 'postgres' directory is excluded from comparison to have the same result.
|
||||
-- store postgres database oid
|
||||
SELECT oid postgres_oid FROM pg_database WHERE datname = 'postgres' \gset
|
||||
-- Check that files for the automatically managed table exist in the
|
||||
-- cstore_fdw/{databaseoid} directory.
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- DROP cstore_fdw tables
|
||||
DROP FOREIGN TABLE contestant;
|
||||
DROP FOREIGN TABLE contestant_compressed;
|
||||
-- Create a cstore_fdw table under a schema and drop it.
|
||||
CREATE SCHEMA test_schema;
|
||||
CREATE FOREIGN TABLE test_schema.test_table(data int) SERVER cstore_server;
|
||||
DROP SCHEMA test_schema CASCADE;
|
||||
NOTICE: drop cascades to foreign table test_schema.test_table
|
||||
-- Check that the files have been deleted and the directory is empty after the
|
||||
-- DROP table command.
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT current_database() datname \gset
|
||||
CREATE DATABASE db_to_drop;
|
||||
\c db_to_drop
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
|
||||
CREATE FOREIGN TABLE test_table(data int) SERVER cstore_server;
|
||||
-- should see 2 files, data and footer file for single table
|
||||
SELECT count(*) FROM pg_ls_dir('cstore_fdw/' || :databaseoid);
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- should see 2 directories 1 for each database, excluding postgres database
|
||||
SELECT count(*) FROM pg_ls_dir('cstore_fdw') WHERE pg_ls_dir != :postgres_oid::text;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
DROP EXTENSION cstore_fdw CASCADE;
|
||||
NOTICE: drop cascades to 2 other objects
|
||||
DETAIL: drop cascades to server cstore_server
|
||||
drop cascades to foreign table test_table
|
||||
-- should only see 1 directory here
|
||||
SELECT count(*) FROM pg_ls_dir('cstore_fdw') WHERE pg_ls_dir != :postgres_oid::text;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- test database drop
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
|
||||
CREATE FOREIGN TABLE test_table(data int) SERVER cstore_server;
|
||||
-- should see 2 directories 1 for each database
|
||||
SELECT count(*) FROM pg_ls_dir('cstore_fdw') WHERE pg_ls_dir != :postgres_oid::text;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
\c :datname
|
||||
DROP DATABASE db_to_drop;
|
||||
-- should only see 1 directory for the default database
|
||||
SELECT count(*) FROM pg_ls_dir('cstore_fdw') WHERE pg_ls_dir != :postgres_oid::text;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
--
|
||||
-- Test utility functions for cstore_fdw tables.
|
||||
--
|
||||
CREATE FOREIGN TABLE empty_table (a int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE table_with_data (a int) SERVER cstore_server;
|
||||
CREATE TABLE non_cstore_table (a int);
|
||||
COPY table_with_data FROM STDIN;
|
||||
SELECT cstore_table_size('empty_table') < cstore_table_size('table_with_data');
|
||||
?column?
|
||||
----------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT cstore_table_size('non_cstore_table');
|
||||
ERROR: relation is not a cstore table
|
||||
DROP FOREIGN TABLE empty_table;
|
||||
DROP FOREIGN TABLE table_with_data;
|
||||
DROP TABLE non_cstore_table;
|
|
@ -0,0 +1,88 @@
|
|||
--
|
||||
-- Testing insert on cstore_fdw tables.
|
||||
--
|
||||
CREATE FOREIGN TABLE test_insert_command (a int) SERVER cstore_server;
|
||||
-- test single row inserts fail
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command values(1);
|
||||
ERROR: operation is not supported
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command default values;
|
||||
ERROR: operation is not supported
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- test inserting from another table succeed
|
||||
CREATE TABLE test_insert_command_data (a int);
|
||||
select count(*) from test_insert_command_data;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command_data values(1);
|
||||
select count(*) from test_insert_command_data;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
insert into test_insert_command select * from test_insert_command_data;
|
||||
select count(*) from test_insert_command;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
drop table test_insert_command_data;
|
||||
drop foreign table test_insert_command;
|
||||
-- test long attribute value insertion
|
||||
-- create sufficiently long text so that data is stored in toast
|
||||
CREATE TABLE test_long_text AS
|
||||
SELECT a as int_val, string_agg(random()::text, '') as text_val
|
||||
FROM generate_series(1, 10) a, generate_series(1, 1000) b
|
||||
GROUP BY a ORDER BY a;
|
||||
-- store hash values of text for later comparison
|
||||
CREATE TABLE test_long_text_hash AS
|
||||
SELECT int_val, md5(text_val) AS hash
|
||||
FROM test_long_text;
|
||||
CREATE FOREIGN TABLE test_cstore_long_text(int_val int, text_val text)
|
||||
SERVER cstore_server;
|
||||
-- store long text in cstore table
|
||||
INSERT INTO test_cstore_long_text SELECT * FROM test_long_text;
|
||||
-- drop source table to remove original text from toast
|
||||
DROP TABLE test_long_text;
|
||||
-- check if text data is still available in cstore table
|
||||
-- by comparing previously stored hash.
|
||||
SELECT a.int_val
|
||||
FROM test_long_text_hash a, test_cstore_long_text c
|
||||
WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val);
|
||||
int_val
|
||||
---------
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
10
|
||||
(10 rows)
|
||||
|
||||
DROP TABLE test_long_text_hash;
|
||||
DROP FOREIGN TABLE test_cstore_long_text;
|
|
@ -0,0 +1,105 @@
|
|||
--
|
||||
-- Test querying cstore_fdw tables.
|
||||
--
|
||||
-- Settings to make the result deterministic
|
||||
SET datestyle = "ISO, YMD";
|
||||
-- Query uncompressed data
|
||||
SELECT count(*) FROM contestant;
|
||||
count
|
||||
-------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
SELECT avg(rating), stddev_samp(rating) FROM contestant;
|
||||
avg | stddev_samp
|
||||
-----------------------+------------------
|
||||
2344.3750000000000000 | 433.746119785032
|
||||
(1 row)
|
||||
|
||||
SELECT country, avg(rating) FROM contestant WHERE rating > 2200
|
||||
GROUP BY country ORDER BY country;
|
||||
country | avg
|
||||
---------+-----------------------
|
||||
XA | 2203.0000000000000000
|
||||
XB | 2610.5000000000000000
|
||||
XC | 2236.0000000000000000
|
||||
XD | 3090.0000000000000000
|
||||
(4 rows)
|
||||
|
||||
SELECT * FROM contestant ORDER BY handle;
|
||||
handle | birthdate | rating | percentile | country | achievements
|
||||
--------+------------+--------+------------+---------+--------------
|
||||
a | 1990-01-10 | 2090 | 97.1 | XA | {a}
|
||||
b | 1990-11-01 | 2203 | 98.1 | XA | {a,b}
|
||||
c | 1988-11-01 | 2907 | 99.4 | XB | {w,y}
|
||||
d | 1985-05-05 | 2314 | 98.3 | XB | {}
|
||||
e | 1995-05-05 | 2236 | 98.2 | XC | {a}
|
||||
f | 1983-04-02 | 3090 | 99.6 | XD | {a,b,c,y}
|
||||
g | 1991-12-13 | 1803 | 85.1 | XD | {a,c}
|
||||
h | 1987-10-26 | 2112 | 95.4 | XD | {w,a}
|
||||
(8 rows)
|
||||
|
||||
-- Query compressed data
|
||||
SELECT count(*) FROM contestant_compressed;
|
||||
count
|
||||
-------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
SELECT avg(rating), stddev_samp(rating) FROM contestant_compressed;
|
||||
avg | stddev_samp
|
||||
-----------------------+------------------
|
||||
2344.3750000000000000 | 433.746119785032
|
||||
(1 row)
|
||||
|
||||
SELECT country, avg(rating) FROM contestant_compressed WHERE rating > 2200
|
||||
GROUP BY country ORDER BY country;
|
||||
country | avg
|
||||
---------+-----------------------
|
||||
XA | 2203.0000000000000000
|
||||
XB | 2610.5000000000000000
|
||||
XC | 2236.0000000000000000
|
||||
XD | 3090.0000000000000000
|
||||
(4 rows)
|
||||
|
||||
SELECT * FROM contestant_compressed ORDER BY handle;
|
||||
handle | birthdate | rating | percentile | country | achievements
|
||||
--------+------------+--------+------------+---------+--------------
|
||||
a | 1990-01-10 | 2090 | 97.1 | XA | {a}
|
||||
b | 1990-11-01 | 2203 | 98.1 | XA | {a,b}
|
||||
c | 1988-11-01 | 2907 | 99.4 | XB | {w,y}
|
||||
d | 1985-05-05 | 2314 | 98.3 | XB | {}
|
||||
e | 1995-05-05 | 2236 | 98.2 | XC | {a}
|
||||
f | 1983-04-02 | 3090 | 99.6 | XD | {a,b,c,y}
|
||||
g | 1991-12-13 | 1803 | 85.1 | XD | {a,c}
|
||||
h | 1987-10-26 | 2112 | 95.4 | XD | {w,a}
|
||||
(8 rows)
|
||||
|
||||
-- Verify that we handle whole-row references correctly
|
||||
SELECT to_json(v) FROM contestant v ORDER BY rating LIMIT 1;
|
||||
to_json
|
||||
------------------------------------------------------------------------------------------------------------------
|
||||
{"handle":"g","birthdate":"1991-12-13","rating":1803,"percentile":85.1,"country":"XD ","achievements":["a","c"]}
|
||||
(1 row)
|
||||
|
||||
-- Test variables used in expressions
|
||||
CREATE FOREIGN TABLE union_first (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE union_second (a int, b int) SERVER cstore_server;
|
||||
INSERT INTO union_first SELECT a, a FROM generate_series(1, 5) a;
|
||||
INSERT INTO union_second SELECT a, a FROM generate_series(11, 15) a;
|
||||
(SELECT a*1, b FROM union_first) union all (SELECT a*1, b FROM union_second);
|
||||
?column? | b
|
||||
----------+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
14 | 14
|
||||
15 | 15
|
||||
(10 rows)
|
||||
|
||||
DROP FOREIGN TABLE union_first, union_second;
|
|
@ -0,0 +1,262 @@
|
|||
--
|
||||
-- Test the TRUNCATE TABLE command for cstore_fdw tables.
|
||||
--
|
||||
-- print whether we're using version > 10 to make version-specific tests clear
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
|
||||
version_above_ten
|
||||
-------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- Check that files for the automatically managed table exist in the
|
||||
-- cstore_fdw/{databaseoid} directory.
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- CREATE a cstore_fdw table, fill with some data --
|
||||
CREATE FOREIGN TABLE cstore_truncate_test (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_second (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_compressed (a int, b int) SERVER cstore_server OPTIONS (compression 'pglz');
|
||||
CREATE TABLE cstore_truncate_test_regular (a int, b int);
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
-- query rows
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT COUNT(*) from cstore_truncate_test;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
20
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test_compressed;
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT cstore_table_size('cstore_truncate_test_compressed');
|
||||
cstore_table_size
|
||||
-------------------
|
||||
26
|
||||
(1 row)
|
||||
|
||||
-- make sure data files still present
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(10, 20) a;
|
||||
INSERT INTO cstore_truncate_test_second select a, a from generate_series(20, 30) a;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
----+----
|
||||
20 | 20
|
||||
21 | 21
|
||||
22 | 22
|
||||
23 | 23
|
||||
24 | 24
|
||||
25 | 25
|
||||
26 | 26
|
||||
27 | 27
|
||||
28 | 28
|
||||
29 | 29
|
||||
30 | 30
|
||||
(11 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
----+----
|
||||
10 | 10
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
14 | 14
|
||||
15 | 15
|
||||
16 | 16
|
||||
17 | 17
|
||||
18 | 18
|
||||
19 | 19
|
||||
20 | 20
|
||||
(11 rows)
|
||||
|
||||
-- make sure multi truncate works
|
||||
-- notice that the same table might be repeated
|
||||
TRUNCATE TABLE cstore_truncate_test,
|
||||
cstore_truncate_test_regular,
|
||||
cstore_truncate_test_second,
|
||||
cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- test if truncate on empty table works
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- test if a cached truncate from a pl/pgsql function works
|
||||
CREATE FUNCTION cstore_truncate_test_regular_func() RETURNS void AS $$
|
||||
BEGIN
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(1, 10) a;
|
||||
TRUNCATE TABLE cstore_truncate_test_regular;
|
||||
END;$$
|
||||
LANGUAGE plpgsql;
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- the cached plans are used stating from the second call
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP FUNCTION cstore_truncate_test_regular_func();
|
||||
DROP FOREIGN TABLE cstore_truncate_test, cstore_truncate_test_second;
|
||||
DROP TABLE cstore_truncate_test_regular;
|
||||
DROP FOREIGN TABLE cstore_truncate_test_compressed;
|
||||
-- test truncate with schema
|
||||
CREATE SCHEMA truncate_schema;
|
||||
CREATE FOREIGN TABLE truncate_schema.truncate_tbl (id int) SERVER cstore_server OPTIONS(compression 'pglz');
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
-- create a user that can not truncate
|
||||
CREATE USER truncate_user;
|
||||
GRANT USAGE ON SCHEMA truncate_schema TO truncate_user;
|
||||
GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user;
|
||||
SELECT current_user \gset
|
||||
\c - truncate_user
|
||||
-- verify truncate command fails and check number of rows
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
ERROR: permission denied for table truncate_tbl
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
-- switch to super user, grant truncate to truncate_user
|
||||
\c - :current_user
|
||||
GRANT TRUNCATE ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
-- verify truncate_user can truncate now
|
||||
\c - truncate_user
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
\c - :current_user
|
||||
-- cleanup
|
||||
DROP SCHEMA truncate_schema CASCADE;
|
||||
NOTICE: drop cascades to foreign table truncate_schema.truncate_tbl
|
||||
DROP USER truncate_user;
|
||||
-- verify files are removed
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,262 @@
|
|||
--
|
||||
-- Test the TRUNCATE TABLE command for cstore_fdw tables.
|
||||
--
|
||||
-- print whether we're using version > 10 to make version-specific tests clear
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
|
||||
version_above_ten
|
||||
-------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- Check that files for the automatically managed table exist in the
|
||||
-- cstore_fdw/{databaseoid} directory.
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- CREATE a cstore_fdw table, fill with some data --
|
||||
CREATE FOREIGN TABLE cstore_truncate_test (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_second (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_compressed (a int, b int) SERVER cstore_server OPTIONS (compression 'pglz');
|
||||
CREATE TABLE cstore_truncate_test_regular (a int, b int);
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
-- query rows
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT COUNT(*) from cstore_truncate_test;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
20
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test_compressed;
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT cstore_table_size('cstore_truncate_test_compressed');
|
||||
cstore_table_size
|
||||
-------------------
|
||||
26
|
||||
(1 row)
|
||||
|
||||
-- make sure data files still present
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(10, 20) a;
|
||||
INSERT INTO cstore_truncate_test_second select a, a from generate_series(20, 30) a;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
----+----
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
6 | 6
|
||||
7 | 7
|
||||
8 | 8
|
||||
9 | 9
|
||||
10 | 10
|
||||
(10 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
----+----
|
||||
20 | 20
|
||||
21 | 21
|
||||
22 | 22
|
||||
23 | 23
|
||||
24 | 24
|
||||
25 | 25
|
||||
26 | 26
|
||||
27 | 27
|
||||
28 | 28
|
||||
29 | 29
|
||||
30 | 30
|
||||
(11 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
----+----
|
||||
10 | 10
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
14 | 14
|
||||
15 | 15
|
||||
16 | 16
|
||||
17 | 17
|
||||
18 | 18
|
||||
19 | 19
|
||||
20 | 20
|
||||
(11 rows)
|
||||
|
||||
-- make sure multi truncate works
|
||||
-- notice that the same table might be repeated
|
||||
TRUNCATE TABLE cstore_truncate_test,
|
||||
cstore_truncate_test_regular,
|
||||
cstore_truncate_test_second,
|
||||
cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- test if truncate on empty table works
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
a | b
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- test if a cached truncate from a pl/pgsql function works
|
||||
CREATE FUNCTION cstore_truncate_test_regular_func() RETURNS void AS $$
|
||||
BEGIN
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(1, 10) a;
|
||||
TRUNCATE TABLE cstore_truncate_test_regular;
|
||||
END;$$
|
||||
LANGUAGE plpgsql;
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- the cached plans are used stating from the second call
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
cstore_truncate_test_regular_func
|
||||
-----------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP FUNCTION cstore_truncate_test_regular_func();
|
||||
DROP FOREIGN TABLE cstore_truncate_test, cstore_truncate_test_second;
|
||||
DROP TABLE cstore_truncate_test_regular;
|
||||
DROP FOREIGN TABLE cstore_truncate_test_compressed;
|
||||
-- test truncate with schema
|
||||
CREATE SCHEMA truncate_schema;
|
||||
CREATE FOREIGN TABLE truncate_schema.truncate_tbl (id int) SERVER cstore_server OPTIONS(compression 'pglz');
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
-- create a user that can not truncate
|
||||
CREATE USER truncate_user;
|
||||
GRANT USAGE ON SCHEMA truncate_schema TO truncate_user;
|
||||
GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user;
|
||||
SELECT current_user \gset
|
||||
\c - truncate_user
|
||||
-- verify truncate command fails and check number of rows
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
ERROR: permission denied for relation truncate_tbl
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
-- switch to super user, grant truncate to truncate_user
|
||||
\c - :current_user
|
||||
GRANT TRUNCATE ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
-- verify truncate_user can truncate now
|
||||
\c - truncate_user
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
\c - :current_user
|
||||
-- cleanup
|
||||
DROP SCHEMA truncate_schema CASCADE;
|
||||
NOTICE: drop cascades to foreign table truncate_schema.truncate_tbl
|
||||
DROP USER truncate_user;
|
||||
-- verify files are removed
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
--
|
||||
-- Test block filtering in cstore_fdw using min/max values in stripe skip lists.
|
||||
--
|
||||
|
||||
|
||||
--
|
||||
-- filtered_row_count returns number of rows filtered by the WHERE clause.
|
||||
-- If blocks get filtered by cstore_fdw, less rows are passed to WHERE
|
||||
-- clause, so this function should return a lower number.
|
||||
--
|
||||
CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS
|
||||
$$
|
||||
DECLARE
|
||||
result bigint;
|
||||
rec text;
|
||||
BEGIN
|
||||
result := 0;
|
||||
|
||||
FOR rec IN EXECUTE 'EXPLAIN ANALYZE ' || query LOOP
|
||||
IF rec ~ '^\s+Rows Removed by Filter' then
|
||||
result := regexp_replace(rec, '[^0-9]*', '', 'g');
|
||||
END IF;
|
||||
END LOOP;
|
||||
|
||||
RETURN result;
|
||||
END;
|
||||
$$ LANGUAGE PLPGSQL;
|
||||
|
||||
|
||||
-- Create and load data
|
||||
CREATE FOREIGN TABLE test_block_filtering (a int)
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/block_filtering.cstore',
|
||||
block_row_count '1000', stripe_row_count '2000');
|
||||
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
|
||||
|
||||
-- Verify that filtered_row_count is less than 1000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 200');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 9900');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 9900');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
|
||||
|
||||
-- Verify that filtered_row_count is less than 2000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 1 AND 10');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN -10 AND 0');
|
||||
|
||||
|
||||
-- Load data for second time and verify that filtered_row_count is exactly twice as before
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
|
||||
|
||||
-- Verify that we are fine with collations which use a different alphabet order
|
||||
CREATE FOREIGN TABLE collation_block_filtering_test(A text collate "da_DK")
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/collation_block_filtering.cstore');
|
||||
COPY collation_block_filtering_test FROM STDIN;
|
||||
A
|
||||
Å
|
||||
B
|
||||
\.
|
||||
|
||||
SELECT * FROM collation_block_filtering_test WHERE A > 'B';
|
|
@ -0,0 +1,18 @@
|
|||
--
|
||||
-- Test copying data from cstore_fdw tables.
|
||||
--
|
||||
CREATE FOREIGN TABLE test_contestant(handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/test_contestant.cstore');
|
||||
|
||||
-- load table data from file
|
||||
COPY test_contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
|
||||
-- export using COPY table TO ...
|
||||
COPY test_contestant TO STDOUT;
|
||||
|
||||
-- export using COPY (SELECT * FROM table) TO ...
|
||||
COPY (select * from test_contestant) TO STDOUT;
|
||||
|
||||
DROP FOREIGN TABLE test_contestant CASCADE;
|
|
@ -0,0 +1,49 @@
|
|||
--
|
||||
-- Test the CREATE statements related to cstore_fdw.
|
||||
--
|
||||
|
||||
|
||||
-- Install cstore_fdw
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
|
||||
|
||||
-- Validator tests
|
||||
CREATE FOREIGN TABLE test_validator_invalid_option ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename 'data.cstore', bad_option_name '1'); -- ERROR
|
||||
|
||||
CREATE FOREIGN TABLE test_validator_invalid_stripe_row_count ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename 'data.cstore', stripe_row_count '0'); -- ERROR
|
||||
|
||||
CREATE FOREIGN TABLE test_validator_invalid_block_row_count ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename 'data.cstore', block_row_count '0'); -- ERROR
|
||||
|
||||
CREATE FOREIGN TABLE test_validator_invalid_compression_type ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename 'data.cstore', compression 'invalid_compression'); -- ERROR
|
||||
|
||||
-- Invalid file path test
|
||||
CREATE FOREIGN TABLE test_invalid_file_path ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename 'bad_directory_path/bad_file_path'); --ERROR
|
||||
|
||||
-- Create uncompressed table
|
||||
CREATE FOREIGN TABLE contestant (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/contestant.cstore');
|
||||
|
||||
|
||||
-- Create compressed table with automatically determined file path
|
||||
CREATE FOREIGN TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server
|
||||
OPTIONS(compression 'pglz');
|
||||
|
||||
-- Test that querying an empty table works
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM contestant;
|
|
@ -0,0 +1,74 @@
|
|||
--
|
||||
-- Test loading and reading different data types to/from cstore_fdw foreign tables.
|
||||
--
|
||||
|
||||
|
||||
-- Settings to make the result deterministic
|
||||
SET datestyle = "ISO, YMD";
|
||||
SET timezone to 'GMT';
|
||||
SET intervalstyle TO 'POSTGRES_VERBOSE';
|
||||
|
||||
|
||||
-- Test array types
|
||||
CREATE FOREIGN TABLE test_array_types (int_array int[], bigint_array bigint[],
|
||||
text_array text[]) SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/array_types.cstore');
|
||||
|
||||
COPY test_array_types FROM '@abs_srcdir@/data/array_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_array_types;
|
||||
|
||||
|
||||
-- Test date/time types
|
||||
CREATE FOREIGN TABLE test_datetime_types (timestamp timestamp,
|
||||
timestamp_with_timezone timestamp with time zone, date date, time time,
|
||||
interval interval) SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/datetime_types.cstore');
|
||||
|
||||
COPY test_datetime_types FROM '@abs_srcdir@/data/datetime_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_datetime_types;
|
||||
|
||||
|
||||
-- Test enum and composite types
|
||||
CREATE TYPE enum_type AS ENUM ('a', 'b', 'c');
|
||||
CREATE TYPE composite_type AS (a int, b text);
|
||||
|
||||
CREATE FOREIGN TABLE test_enum_and_composite_types (enum enum_type,
|
||||
composite composite_type) SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/enum_and_composite_types.cstore');
|
||||
|
||||
COPY test_enum_and_composite_types FROM
|
||||
'@abs_srcdir@/data/enum_and_composite_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_enum_and_composite_types;
|
||||
|
||||
|
||||
-- Test range types
|
||||
CREATE FOREIGN TABLE test_range_types (int4range int4range, int8range int8range,
|
||||
numrange numrange, tsrange tsrange) SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/range_types.cstore');
|
||||
|
||||
COPY test_range_types FROM '@abs_srcdir@/data/range_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_range_types;
|
||||
|
||||
|
||||
-- Test other types
|
||||
CREATE FOREIGN TABLE test_other_types (bool boolean, bytea bytea, money money,
|
||||
inet inet, bitstring bit varying(5), uuid uuid, json json) SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/other_types.cstore');
|
||||
|
||||
COPY test_other_types FROM '@abs_srcdir@/data/other_types.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_other_types;
|
||||
|
||||
|
||||
-- Test null values
|
||||
CREATE FOREIGN TABLE test_null_values (a int, b int[], c composite_type)
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/null_values.cstore');
|
||||
|
||||
COPY test_null_values FROM '@abs_srcdir@/data/null_values.csv' WITH CSV;
|
||||
|
||||
SELECT * FROM test_null_values;
|
|
@ -0,0 +1,44 @@
|
|||
--
|
||||
-- Test loading data into cstore_fdw tables.
|
||||
--
|
||||
|
||||
-- COPY with incorrect delimiter
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv'
|
||||
WITH DELIMITER '|'; -- ERROR
|
||||
|
||||
-- COPY with invalid program
|
||||
COPY contestant FROM PROGRAM 'invalid_program' WITH CSV; -- ERROR
|
||||
|
||||
-- COPY into uncompressed table from file
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv' WITH CSV;
|
||||
|
||||
-- COPY into compressed table
|
||||
COPY contestant_compressed FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant_compressed FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv'
|
||||
WITH CSV;
|
||||
|
||||
-- Test column list
|
||||
CREATE FOREIGN TABLE famous_constants (id int, name text, value real)
|
||||
SERVER cstore_server;
|
||||
COPY famous_constants (value, name, id) FROM STDIN WITH CSV;
|
||||
3.141,pi,1
|
||||
2.718,e,2
|
||||
0.577,gamma,3
|
||||
5.291e-11,bohr radius,4
|
||||
\.
|
||||
|
||||
COPY famous_constants (name, value) FROM STDIN WITH CSV;
|
||||
avagadro,6.022e23
|
||||
electron mass,9.109e-31
|
||||
proton mass,1.672e-27
|
||||
speed of light,2.997e8
|
||||
\.
|
||||
|
||||
SELECT * FROM famous_constants ORDER BY id, name;
|
||||
|
||||
DROP FOREIGN TABLE famous_constants;
|
|
@ -0,0 +1,118 @@
|
|||
--
|
||||
-- Test block filtering in cstore_fdw using min/max values in stripe skip lists.
|
||||
--
|
||||
--
|
||||
-- filtered_row_count returns number of rows filtered by the WHERE clause.
|
||||
-- If blocks get filtered by cstore_fdw, less rows are passed to WHERE
|
||||
-- clause, so this function should return a lower number.
|
||||
--
|
||||
CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS
|
||||
$$
|
||||
DECLARE
|
||||
result bigint;
|
||||
rec text;
|
||||
BEGIN
|
||||
result := 0;
|
||||
|
||||
FOR rec IN EXECUTE 'EXPLAIN ANALYZE ' || query LOOP
|
||||
IF rec ~ '^\s+Rows Removed by Filter' then
|
||||
result := regexp_replace(rec, '[^0-9]*', '', 'g');
|
||||
END IF;
|
||||
END LOOP;
|
||||
|
||||
RETURN result;
|
||||
END;
|
||||
$$ LANGUAGE PLPGSQL;
|
||||
-- Create and load data
|
||||
CREATE FOREIGN TABLE test_block_filtering (a int)
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/block_filtering.cstore',
|
||||
block_row_count '1000', stripe_row_count '2000');
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
-- Verify that filtered_row_count is less than 1000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
801
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 200');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
200
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 9900');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
101
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a > 9900');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
900
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Verify that filtered_row_count is less than 2000 for the following queries
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 1 AND 10');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
990
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
1979
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN -10 AND 0');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Load data for second time and verify that filtered_row_count is exactly twice as before
|
||||
COPY test_block_filtering FROM '@abs_srcdir@/data/block_filtering.csv' WITH CSV;
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 200');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
1602
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a < 0');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT filtered_row_count('SELECT count(*) FROM test_block_filtering WHERE a BETWEEN 990 AND 2010');
|
||||
filtered_row_count
|
||||
--------------------
|
||||
3958
|
||||
(1 row)
|
||||
|
||||
-- Verify that we are fine with collations which use a different alphabet order
|
||||
CREATE FOREIGN TABLE collation_block_filtering_test(A text collate "da_DK")
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/collation_block_filtering.cstore');
|
||||
COPY collation_block_filtering_test FROM STDIN;
|
||||
SELECT * FROM collation_block_filtering_test WHERE A > 'B';
|
||||
a
|
||||
---
|
||||
Å
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
--
|
||||
-- Test copying data from cstore_fdw tables.
|
||||
--
|
||||
CREATE FOREIGN TABLE test_contestant(handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/test_contestant.cstore');
|
||||
-- load table data from file
|
||||
COPY test_contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
-- export using COPY table TO ...
|
||||
COPY test_contestant TO STDOUT;
|
||||
a 01-10-1990 2090 97.1 XA {a}
|
||||
b 11-01-1990 2203 98.1 XA {a,b}
|
||||
c 11-01-1988 2907 99.4 XB {w,y}
|
||||
d 05-05-1985 2314 98.3 XB {}
|
||||
e 05-05-1995 2236 98.2 XC {a}
|
||||
-- export using COPY (SELECT * FROM table) TO ...
|
||||
COPY (select * from test_contestant) TO STDOUT;
|
||||
a 01-10-1990 2090 97.1 XA {a}
|
||||
b 11-01-1990 2203 98.1 XA {a,b}
|
||||
c 11-01-1988 2907 99.4 XB {w,y}
|
||||
d 05-05-1985 2314 98.3 XB {}
|
||||
e 05-05-1995 2236 98.2 XC {a}
|
||||
DROP FOREIGN TABLE test_contestant CASCADE;
|
|
@ -0,0 +1,50 @@
|
|||
--
|
||||
-- Test the CREATE statements related to cstore_fdw.
|
||||
--
|
||||
-- Install cstore_fdw
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
-- Validator tests
|
||||
CREATE FOREIGN TABLE test_validator_invalid_option ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename 'data.cstore', bad_option_name '1'); -- ERROR
|
||||
ERROR: invalid option "bad_option_name"
|
||||
HINT: Valid options in this context are: filename, compression, stripe_row_count, block_row_count
|
||||
CREATE FOREIGN TABLE test_validator_invalid_stripe_row_count ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename 'data.cstore', stripe_row_count '0'); -- ERROR
|
||||
ERROR: invalid stripe row count
|
||||
HINT: Stripe row count must be an integer between 1000 and 10000000
|
||||
CREATE FOREIGN TABLE test_validator_invalid_block_row_count ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename 'data.cstore', block_row_count '0'); -- ERROR
|
||||
ERROR: invalid block row count
|
||||
HINT: Block row count must be an integer between 1000 and 100000
|
||||
CREATE FOREIGN TABLE test_validator_invalid_compression_type ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename 'data.cstore', compression 'invalid_compression'); -- ERROR
|
||||
ERROR: invalid compression type
|
||||
HINT: Valid options are: none, pglz
|
||||
-- Invalid file path test
|
||||
CREATE FOREIGN TABLE test_invalid_file_path ()
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename 'bad_directory_path/bad_file_path'); --ERROR
|
||||
ERROR: could not open file "bad_directory_path/bad_file_path" for writing: No such file or directory
|
||||
-- Create uncompressed table
|
||||
CREATE FOREIGN TABLE contestant (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/contestant.cstore');
|
||||
-- Create compressed table with automatically determined file path
|
||||
CREATE FOREIGN TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT,
|
||||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
SERVER cstore_server
|
||||
OPTIONS(compression 'pglz');
|
||||
-- Test that querying an empty table works
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM contestant;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
--
|
||||
-- Test loading and reading different data types to/from cstore_fdw foreign tables.
|
||||
--
|
||||
-- Settings to make the result deterministic
|
||||
SET datestyle = "ISO, YMD";
|
||||
SET timezone to 'GMT';
|
||||
SET intervalstyle TO 'POSTGRES_VERBOSE';
|
||||
-- Test array types
|
||||
CREATE FOREIGN TABLE test_array_types (int_array int[], bigint_array bigint[],
|
||||
text_array text[]) SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/array_types.cstore');
|
||||
COPY test_array_types FROM '@abs_srcdir@/data/array_types.csv' WITH CSV;
|
||||
SELECT * FROM test_array_types;
|
||||
int_array | bigint_array | text_array
|
||||
--------------------------+--------------------------------------------+------------
|
||||
{1,2,3} | {1,2,3} | {a,b,c}
|
||||
{} | {} | {}
|
||||
{-2147483648,2147483647} | {-9223372036854775808,9223372036854775807} | {""}
|
||||
(3 rows)
|
||||
|
||||
-- Test date/time types
|
||||
CREATE FOREIGN TABLE test_datetime_types (timestamp timestamp,
|
||||
timestamp_with_timezone timestamp with time zone, date date, time time,
|
||||
interval interval) SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/datetime_types.cstore');
|
||||
COPY test_datetime_types FROM '@abs_srcdir@/data/datetime_types.csv' WITH CSV;
|
||||
SELECT * FROM test_datetime_types;
|
||||
timestamp | timestamp_with_timezone | date | time | interval
|
||||
---------------------+-------------------------+------------+----------+-----------
|
||||
2000-01-02 04:05:06 | 1999-01-08 12:05:06+00 | 2000-01-02 | 04:05:06 | @ 4 hours
|
||||
1970-01-01 00:00:00 | infinity | -infinity | 00:00:00 | @ 0
|
||||
(2 rows)
|
||||
|
||||
-- Test enum and composite types
|
||||
CREATE TYPE enum_type AS ENUM ('a', 'b', 'c');
|
||||
CREATE TYPE composite_type AS (a int, b text);
|
||||
CREATE FOREIGN TABLE test_enum_and_composite_types (enum enum_type,
|
||||
composite composite_type) SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/enum_and_composite_types.cstore');
|
||||
COPY test_enum_and_composite_types FROM
|
||||
'@abs_srcdir@/data/enum_and_composite_types.csv' WITH CSV;
|
||||
SELECT * FROM test_enum_and_composite_types;
|
||||
enum | composite
|
||||
------+-----------
|
||||
a | (2,b)
|
||||
b | (3,c)
|
||||
(2 rows)
|
||||
|
||||
-- Test range types
|
||||
CREATE FOREIGN TABLE test_range_types (int4range int4range, int8range int8range,
|
||||
numrange numrange, tsrange tsrange) SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/range_types.cstore');
|
||||
COPY test_range_types FROM '@abs_srcdir@/data/range_types.csv' WITH CSV;
|
||||
SELECT * FROM test_range_types;
|
||||
int4range | int8range | numrange | tsrange
|
||||
-----------+-----------+----------+-----------------------------------------------
|
||||
[1,3) | [1,3) | [1,3) | ["2000-01-02 00:30:00","2010-02-03 12:30:00")
|
||||
empty | [1,) | (,) | empty
|
||||
(2 rows)
|
||||
|
||||
-- Test other types
|
||||
CREATE FOREIGN TABLE test_other_types (bool boolean, bytea bytea, money money,
|
||||
inet inet, bitstring bit varying(5), uuid uuid, json json) SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/other_types.cstore');
|
||||
COPY test_other_types FROM '@abs_srcdir@/data/other_types.csv' WITH CSV;
|
||||
SELECT * FROM test_other_types;
|
||||
bool | bytea | money | inet | bitstring | uuid | json
|
||||
------+------------+-------+-------------+-----------+--------------------------------------+------------------
|
||||
f | \xdeadbeef | $1.00 | 192.168.1.2 | 10101 | a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 | {"key": "value"}
|
||||
t | \xcdb0 | $1.50 | 127.0.0.1 | | a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 | []
|
||||
(2 rows)
|
||||
|
||||
-- Test null values
|
||||
CREATE FOREIGN TABLE test_null_values (a int, b int[], c composite_type)
|
||||
SERVER cstore_server
|
||||
OPTIONS(filename '@abs_srcdir@/data/null_values.cstore');
|
||||
COPY test_null_values FROM '@abs_srcdir@/data/null_values.csv' WITH CSV;
|
||||
SELECT * FROM test_null_values;
|
||||
a | b | c
|
||||
---+--------+-----
|
||||
| {NULL} | (,)
|
||||
| |
|
||||
(2 rows)
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
--
|
||||
-- Test loading data into cstore_fdw tables.
|
||||
--
|
||||
-- COPY with incorrect delimiter
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv'
|
||||
WITH DELIMITER '|'; -- ERROR
|
||||
ERROR: missing data for column "birthdate"
|
||||
-- COPY with invalid program
|
||||
COPY contestant FROM PROGRAM 'invalid_program' WITH CSV; -- ERROR
|
||||
ERROR: program "invalid_program" failed
|
||||
DETAIL: command not found
|
||||
-- COPY into uncompressed table from file
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv' WITH CSV;
|
||||
-- COPY into compressed table
|
||||
COPY contestant_compressed FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant_compressed FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv'
|
||||
WITH CSV;
|
||||
-- Test column list
|
||||
CREATE FOREIGN TABLE famous_constants (id int, name text, value real)
|
||||
SERVER cstore_server;
|
||||
COPY famous_constants (value, name, id) FROM STDIN WITH CSV;
|
||||
COPY famous_constants (name, value) FROM STDIN WITH CSV;
|
||||
SELECT * FROM famous_constants ORDER BY id, name;
|
||||
id | name | value
|
||||
----+----------------+-----------
|
||||
1 | pi | 3.141
|
||||
2 | e | 2.718
|
||||
3 | gamma | 0.577
|
||||
4 | bohr radius | 5.291e-11
|
||||
| avagadro | 6.022e+23
|
||||
| electron mass | 9.109e-31
|
||||
| proton mass | 1.672e-27
|
||||
| speed of light | 2.997e+08
|
||||
(8 rows)
|
||||
|
||||
DROP FOREIGN TABLE famous_constants;
|
|
@ -0,0 +1,85 @@
|
|||
--
|
||||
-- Testing ALTER TABLE on cstore_fdw tables.
|
||||
--
|
||||
|
||||
CREATE FOREIGN TABLE test_alter_table (a int, b int, c int) SERVER cstore_server;
|
||||
|
||||
WITH sample_data AS (VALUES
|
||||
(1, 2, 3),
|
||||
(4, 5, 6),
|
||||
(7, 8, 9)
|
||||
)
|
||||
INSERT INTO test_alter_table SELECT * FROM sample_data;
|
||||
|
||||
-- drop a column
|
||||
ALTER FOREIGN TABLE test_alter_table DROP COLUMN a;
|
||||
|
||||
-- test analyze
|
||||
ANALYZE test_alter_table;
|
||||
|
||||
-- verify select queries run as expected
|
||||
SELECT * FROM test_alter_table;
|
||||
SELECT a FROM test_alter_table;
|
||||
SELECT b FROM test_alter_table;
|
||||
|
||||
-- verify insert runs as expected
|
||||
INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
INSERT INTO test_alter_table (SELECT 5, 8);
|
||||
|
||||
|
||||
-- add a column with no defaults
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN d int;
|
||||
SELECT * FROM test_alter_table;
|
||||
INSERT INTO test_alter_table (SELECT 3, 5, 8);
|
||||
SELECT * FROM test_alter_table;
|
||||
|
||||
|
||||
-- add a fixed-length column with default value
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN e int default 3;
|
||||
SELECT * from test_alter_table;
|
||||
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8);
|
||||
SELECT * from test_alter_table;
|
||||
|
||||
|
||||
-- add a variable-length column with default value
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN f text DEFAULT 'TEXT ME';
|
||||
SELECT * from test_alter_table;
|
||||
INSERT INTO test_alter_table (SELECT 1, 2, 4, 8, 'ABCDEF');
|
||||
SELECT * from test_alter_table;
|
||||
|
||||
|
||||
-- drop couple of columns
|
||||
ALTER FOREIGN TABLE test_alter_table DROP COLUMN c;
|
||||
ALTER FOREIGN TABLE test_alter_table DROP COLUMN e;
|
||||
ANALYZE test_alter_table;
|
||||
SELECT * from test_alter_table;
|
||||
SELECT count(*) from test_alter_table;
|
||||
SELECT count(t.*) from test_alter_table t;
|
||||
|
||||
|
||||
-- unsupported default values
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN g boolean DEFAULT isfinite(current_date);
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN h DATE DEFAULT current_date;
|
||||
SELECT * FROM test_alter_table;
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN g DROP DEFAULT;
|
||||
SELECT * FROM test_alter_table;
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN h DROP DEFAULT;
|
||||
ANALYZE test_alter_table;
|
||||
SELECT * FROM test_alter_table;
|
||||
|
||||
-- unsupported type change
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN i int;
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN j float;
|
||||
ALTER FOREIGN TABLE test_alter_table ADD COLUMN k text;
|
||||
|
||||
-- this is valid type change
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN i TYPE float;
|
||||
|
||||
-- this is not valid
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN j TYPE int;
|
||||
|
||||
-- text / varchar conversion is valid both ways
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN k TYPE varchar(20);
|
||||
ALTER FOREIGN TABLE test_alter_table ALTER COLUMN k TYPE text;
|
||||
|
||||
DROP FOREIGN TABLE test_alter_table;
|
|
@ -0,0 +1,11 @@
|
|||
--
|
||||
-- Test the ANALYZE command for cstore_fdw tables.
|
||||
--
|
||||
|
||||
-- ANALYZE uncompressed table
|
||||
ANALYZE contestant;
|
||||
SELECT count(*) FROM pg_stats WHERE tablename='contestant';
|
||||
|
||||
-- ANALYZE compressed table
|
||||
ANALYZE contestant_compressed;
|
||||
SELECT count(*) FROM pg_stats WHERE tablename='contestant_compressed';
|
|
@ -0,0 +1,76 @@
|
|||
--
|
||||
-- Tests the different DROP commands for cstore_fdw tables.
|
||||
--
|
||||
-- DROP FOREIGN TABL
|
||||
-- DROP SCHEMA
|
||||
-- DROP EXTENSION
|
||||
-- DROP DATABASE
|
||||
--
|
||||
|
||||
-- Note that travis does not create
|
||||
-- cstore_fdw extension in default database (postgres). This has caused
|
||||
-- different behavior between travis tests and local tests. Thus
|
||||
-- 'postgres' directory is excluded from comparison to have the same result.
|
||||
|
||||
-- store postgres database oid
|
||||
SELECT oid postgres_oid FROM pg_database WHERE datname = 'postgres' \gset
|
||||
|
||||
-- Check that files for the automatically managed table exist in the
|
||||
-- cstore_fdw/{databaseoid} directory.
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
|
||||
-- DROP cstore_fdw tables
|
||||
DROP FOREIGN TABLE contestant;
|
||||
DROP FOREIGN TABLE contestant_compressed;
|
||||
|
||||
-- Create a cstore_fdw table under a schema and drop it.
|
||||
CREATE SCHEMA test_schema;
|
||||
CREATE FOREIGN TABLE test_schema.test_table(data int) SERVER cstore_server;
|
||||
DROP SCHEMA test_schema CASCADE;
|
||||
|
||||
-- Check that the files have been deleted and the directory is empty after the
|
||||
-- DROP table command.
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
|
||||
SELECT current_database() datname \gset
|
||||
|
||||
CREATE DATABASE db_to_drop;
|
||||
\c db_to_drop
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
|
||||
|
||||
CREATE FOREIGN TABLE test_table(data int) SERVER cstore_server;
|
||||
-- should see 2 files, data and footer file for single table
|
||||
SELECT count(*) FROM pg_ls_dir('cstore_fdw/' || :databaseoid);
|
||||
|
||||
-- should see 2 directories 1 for each database, excluding postgres database
|
||||
SELECT count(*) FROM pg_ls_dir('cstore_fdw') WHERE pg_ls_dir != :postgres_oid::text;
|
||||
|
||||
DROP EXTENSION cstore_fdw CASCADE;
|
||||
|
||||
-- should only see 1 directory here
|
||||
SELECT count(*) FROM pg_ls_dir('cstore_fdw') WHERE pg_ls_dir != :postgres_oid::text;
|
||||
|
||||
-- test database drop
|
||||
CREATE EXTENSION cstore_fdw;
|
||||
CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset
|
||||
|
||||
CREATE FOREIGN TABLE test_table(data int) SERVER cstore_server;
|
||||
|
||||
-- should see 2 directories 1 for each database
|
||||
SELECT count(*) FROM pg_ls_dir('cstore_fdw') WHERE pg_ls_dir != :postgres_oid::text;
|
||||
|
||||
\c :datname
|
||||
|
||||
DROP DATABASE db_to_drop;
|
||||
|
||||
-- should only see 1 directory for the default database
|
||||
SELECT count(*) FROM pg_ls_dir('cstore_fdw') WHERE pg_ls_dir != :postgres_oid::text;
|
|
@ -0,0 +1,20 @@
|
|||
--
|
||||
-- Test utility functions for cstore_fdw tables.
|
||||
--
|
||||
|
||||
CREATE FOREIGN TABLE empty_table (a int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE table_with_data (a int) SERVER cstore_server;
|
||||
CREATE TABLE non_cstore_table (a int);
|
||||
|
||||
COPY table_with_data FROM STDIN;
|
||||
1
|
||||
2
|
||||
3
|
||||
\.
|
||||
|
||||
SELECT cstore_table_size('empty_table') < cstore_table_size('table_with_data');
|
||||
SELECT cstore_table_size('non_cstore_table');
|
||||
|
||||
DROP FOREIGN TABLE empty_table;
|
||||
DROP FOREIGN TABLE table_with_data;
|
||||
DROP TABLE non_cstore_table;
|
|
@ -0,0 +1,56 @@
|
|||
--
|
||||
-- Testing insert on cstore_fdw tables.
|
||||
--
|
||||
|
||||
CREATE FOREIGN TABLE test_insert_command (a int) SERVER cstore_server;
|
||||
|
||||
-- test single row inserts fail
|
||||
select count(*) from test_insert_command;
|
||||
insert into test_insert_command values(1);
|
||||
select count(*) from test_insert_command;
|
||||
|
||||
insert into test_insert_command default values;
|
||||
select count(*) from test_insert_command;
|
||||
|
||||
-- test inserting from another table succeed
|
||||
CREATE TABLE test_insert_command_data (a int);
|
||||
|
||||
select count(*) from test_insert_command_data;
|
||||
insert into test_insert_command_data values(1);
|
||||
select count(*) from test_insert_command_data;
|
||||
|
||||
insert into test_insert_command select * from test_insert_command_data;
|
||||
select count(*) from test_insert_command;
|
||||
|
||||
drop table test_insert_command_data;
|
||||
drop foreign table test_insert_command;
|
||||
|
||||
-- test long attribute value insertion
|
||||
-- create sufficiently long text so that data is stored in toast
|
||||
CREATE TABLE test_long_text AS
|
||||
SELECT a as int_val, string_agg(random()::text, '') as text_val
|
||||
FROM generate_series(1, 10) a, generate_series(1, 1000) b
|
||||
GROUP BY a ORDER BY a;
|
||||
|
||||
-- store hash values of text for later comparison
|
||||
CREATE TABLE test_long_text_hash AS
|
||||
SELECT int_val, md5(text_val) AS hash
|
||||
FROM test_long_text;
|
||||
|
||||
CREATE FOREIGN TABLE test_cstore_long_text(int_val int, text_val text)
|
||||
SERVER cstore_server;
|
||||
|
||||
-- store long text in cstore table
|
||||
INSERT INTO test_cstore_long_text SELECT * FROM test_long_text;
|
||||
|
||||
-- drop source table to remove original text from toast
|
||||
DROP TABLE test_long_text;
|
||||
|
||||
-- check if text data is still available in cstore table
|
||||
-- by comparing previously stored hash.
|
||||
SELECT a.int_val
|
||||
FROM test_long_text_hash a, test_cstore_long_text c
|
||||
WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val);
|
||||
|
||||
DROP TABLE test_long_text_hash;
|
||||
DROP FOREIGN TABLE test_cstore_long_text;
|
|
@ -0,0 +1,34 @@
|
|||
--
|
||||
-- Test querying cstore_fdw tables.
|
||||
--
|
||||
|
||||
-- Settings to make the result deterministic
|
||||
SET datestyle = "ISO, YMD";
|
||||
|
||||
-- Query uncompressed data
|
||||
SELECT count(*) FROM contestant;
|
||||
SELECT avg(rating), stddev_samp(rating) FROM contestant;
|
||||
SELECT country, avg(rating) FROM contestant WHERE rating > 2200
|
||||
GROUP BY country ORDER BY country;
|
||||
SELECT * FROM contestant ORDER BY handle;
|
||||
|
||||
-- Query compressed data
|
||||
SELECT count(*) FROM contestant_compressed;
|
||||
SELECT avg(rating), stddev_samp(rating) FROM contestant_compressed;
|
||||
SELECT country, avg(rating) FROM contestant_compressed WHERE rating > 2200
|
||||
GROUP BY country ORDER BY country;
|
||||
SELECT * FROM contestant_compressed ORDER BY handle;
|
||||
|
||||
-- Verify that we handle whole-row references correctly
|
||||
SELECT to_json(v) FROM contestant v ORDER BY rating LIMIT 1;
|
||||
|
||||
-- Test variables used in expressions
|
||||
CREATE FOREIGN TABLE union_first (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE union_second (a int, b int) SERVER cstore_server;
|
||||
|
||||
INSERT INTO union_first SELECT a, a FROM generate_series(1, 5) a;
|
||||
INSERT INTO union_second SELECT a, a FROM generate_series(11, 15) a;
|
||||
|
||||
(SELECT a*1, b FROM union_first) union all (SELECT a*1, b FROM union_second);
|
||||
|
||||
DROP FOREIGN TABLE union_first, union_second;
|
|
@ -0,0 +1,135 @@
|
|||
--
|
||||
-- Test the TRUNCATE TABLE command for cstore_fdw tables.
|
||||
--
|
||||
|
||||
-- print whether we're using version > 10 to make version-specific tests clear
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
|
||||
|
||||
-- Check that files for the automatically managed table exist in the
|
||||
-- cstore_fdw/{databaseoid} directory.
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
|
||||
-- CREATE a cstore_fdw table, fill with some data --
|
||||
CREATE FOREIGN TABLE cstore_truncate_test (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_second (a int, b int) SERVER cstore_server;
|
||||
CREATE FOREIGN TABLE cstore_truncate_test_compressed (a int, b int) SERVER cstore_server OPTIONS (compression 'pglz');
|
||||
CREATE TABLE cstore_truncate_test_regular (a int, b int);
|
||||
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
|
||||
|
||||
-- query rows
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
|
||||
SELECT * FROM cstore_truncate_test;
|
||||
|
||||
SELECT COUNT(*) from cstore_truncate_test;
|
||||
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
TRUNCATE TABLE cstore_truncate_test_compressed;
|
||||
SELECT count(*) FROM cstore_truncate_test_compressed;
|
||||
|
||||
SELECT cstore_table_size('cstore_truncate_test_compressed');
|
||||
|
||||
-- make sure data files still present
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
||||
|
||||
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(10, 20) a;
|
||||
INSERT INTO cstore_truncate_test_second select a, a from generate_series(20, 30) a;
|
||||
|
||||
SELECT * from cstore_truncate_test;
|
||||
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
|
||||
-- make sure multi truncate works
|
||||
-- notice that the same table might be repeated
|
||||
TRUNCATE TABLE cstore_truncate_test,
|
||||
cstore_truncate_test_regular,
|
||||
cstore_truncate_test_second,
|
||||
cstore_truncate_test;
|
||||
|
||||
SELECT * from cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test_second;
|
||||
SELECT * from cstore_truncate_test_regular;
|
||||
|
||||
-- test if truncate on empty table works
|
||||
TRUNCATE TABLE cstore_truncate_test;
|
||||
SELECT * from cstore_truncate_test;
|
||||
|
||||
-- test if a cached truncate from a pl/pgsql function works
|
||||
CREATE FUNCTION cstore_truncate_test_regular_func() RETURNS void AS $$
|
||||
BEGIN
|
||||
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(1, 10) a;
|
||||
TRUNCATE TABLE cstore_truncate_test_regular;
|
||||
END;$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
-- the cached plans are used stating from the second call
|
||||
SELECT cstore_truncate_test_regular_func();
|
||||
DROP FUNCTION cstore_truncate_test_regular_func();
|
||||
|
||||
DROP FOREIGN TABLE cstore_truncate_test, cstore_truncate_test_second;
|
||||
DROP TABLE cstore_truncate_test_regular;
|
||||
DROP FOREIGN TABLE cstore_truncate_test_compressed;
|
||||
|
||||
-- test truncate with schema
|
||||
CREATE SCHEMA truncate_schema;
|
||||
CREATE FOREIGN TABLE truncate_schema.truncate_tbl (id int) SERVER cstore_server OPTIONS(compression 'pglz');
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
|
||||
|
||||
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
|
||||
|
||||
-- create a user that can not truncate
|
||||
CREATE USER truncate_user;
|
||||
GRANT USAGE ON SCHEMA truncate_schema TO truncate_user;
|
||||
GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user;
|
||||
|
||||
SELECT current_user \gset
|
||||
|
||||
\c - truncate_user
|
||||
-- verify truncate command fails and check number of rows
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
|
||||
-- switch to super user, grant truncate to truncate_user
|
||||
\c - :current_user
|
||||
GRANT TRUNCATE ON TABLE truncate_schema.truncate_tbl TO truncate_user;
|
||||
|
||||
-- verify truncate_user can truncate now
|
||||
\c - truncate_user
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
TRUNCATE TABLE truncate_schema.truncate_tbl;
|
||||
SELECT count(*) FROM truncate_schema.truncate_tbl;
|
||||
|
||||
\c - :current_user
|
||||
|
||||
-- cleanup
|
||||
DROP SCHEMA truncate_schema CASCADE;
|
||||
DROP USER truncate_user;
|
||||
|
||||
-- verify files are removed
|
||||
SELECT count(*) FROM (
|
||||
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
|
||||
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
|
||||
) AS q1) AS q2;
|
Loading…
Reference in New Issue