mirror of https://github.com/citusdata/citus.git
Merge remote-tracking branch 'origin/remove_source_files' into pg15_support_tmp_onder
commit
cab41c4fc9
|
@ -17,12 +17,6 @@ trim_trailing_whitespace = true
|
|||
insert_final_newline = unset
|
||||
trim_trailing_whitespace = unset
|
||||
|
||||
# Don't change test/regress/output directory, this needs to be a separate rule
|
||||
# for some reason
|
||||
[/src/test/regress/output/**]
|
||||
insert_final_newline = unset
|
||||
trim_trailing_whitespace = unset
|
||||
|
||||
[*.{sql,sh,py}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
|
|
@ -16,7 +16,6 @@ README.* conflict-marker-size=32
|
|||
|
||||
# Test output files that contain extra whitespace
|
||||
*.out -whitespace
|
||||
src/test/regress/output/*.source -whitespace
|
||||
|
||||
# These files are maintained or generated elsewhere. We take them as is.
|
||||
configure -whitespace
|
||||
|
|
|
@ -363,11 +363,8 @@ This was deemed to be error prone and not worth the effort.
|
|||
|
||||
This script checks and fixes issues with `.gitignore` rules:
|
||||
|
||||
1. Makes sure git ignores the `.sql` files and expected output files that are generated
|
||||
from `.source` template files. If you created or deleted a `.source` file in a commit,
|
||||
git ignore rules should be updated to reflect this change.
|
||||
|
||||
2. Makes sure we do not commit any generated files that should be ignored. If there is an
|
||||
1. Makes sure we do not commit any generated files that should be ignored. If there is an
|
||||
ignored file in the git tree, the user is expected to review the files that are removed
|
||||
from the git tree and commit them.
|
||||
|
||||
|
|
|
@ -7,13 +7,12 @@ source ci/ci_helpers.sh
|
|||
|
||||
cd src/test/regress
|
||||
|
||||
# 1. Find all *.sql *.spec and *.source files in the sql, spec and input
|
||||
# directories
|
||||
# 1. Find all *.sql and *.spec files in the sql, and spec directories
|
||||
# 2. Strip the extension and the directory
|
||||
# 3. Ignore names that end with .include, those files are meant to be in an C
|
||||
# preprocessor #include statement. They should not be in schedules.
|
||||
test_names=$(
|
||||
find sql spec input -iname "*.sql" -o -iname "*.spec" -o -iname "*.source" |
|
||||
find sql spec -iname "*.sql" -o -iname "*.spec" |
|
||||
sed -E 's#^\w+/([^/]+)\.[^.]+$#\1#g' |
|
||||
grep -v '.include$'
|
||||
)
|
||||
|
|
|
@ -1,24 +1,8 @@
|
|||
#! /bin/bash
|
||||
# shellcheck disable=SC2012
|
||||
|
||||
set -euo pipefail
|
||||
# shellcheck disable=SC1091
|
||||
source ci/ci_helpers.sh
|
||||
|
||||
# We list all the .source files in alphabetical order, and do a substitution
|
||||
# before writing the resulting file names that are created by those templates in
|
||||
# relevant .gitignore files
|
||||
#
|
||||
# 1. Capture the file name without the .source extension
|
||||
# 2. Add the desired extension at the end
|
||||
# 3. Add a / character at the beginning of each line to conform to .gitignore file format
|
||||
#
|
||||
# e.g. multi_copy.source -> /multi_copy.sql
|
||||
ls -1 src/test/regress/input | sed -E "s#(.*)\.source#/\1.sql#" > src/test/regress/sql/.gitignore
|
||||
|
||||
# e.g. multi_copy.source -> /multi_copy.out
|
||||
ls -1 src/test/regress/output | sed -E "s#(.*)\.source#/\1.out#" > src/test/regress/expected/.gitignore
|
||||
|
||||
# Remove all the ignored files from git tree, and error out
|
||||
# find all ignored files in git tree, and use quotation marks to prevent word splitting on filenames with spaces in them
|
||||
ignored_lines_in_git_tree=$(git ls-files --ignored --exclude-standard | sed 's/.*/"&"/')
|
||||
|
|
|
@ -16,7 +16,6 @@ README.* conflict-marker-size=32
|
|||
|
||||
# Test output files that contain extra whitespace
|
||||
*.out -whitespace
|
||||
src/test/regress/output/*.source -whitespace
|
||||
|
||||
# These files are maintained or generated elsewhere. We take them as is.
|
||||
configure -whitespace
|
||||
|
|
|
@ -38,7 +38,6 @@ create_function_2.sql
|
|||
largeobject.sql
|
||||
misc.sql
|
||||
security_label.sql
|
||||
tablespace.sql
|
||||
constraints.out
|
||||
copy.out
|
||||
create_function_0.out
|
||||
|
@ -48,4 +47,3 @@ largeobject.out
|
|||
largeobject_1.out
|
||||
misc.out
|
||||
security_label.out
|
||||
tablespace.out
|
||||
|
|
|
@ -24,7 +24,7 @@ export PGISOLATIONTIMEOUT = 20
|
|||
## Citus regression support
|
||||
##
|
||||
MULTI_INSTALLDIR=$(CURDIR)/tmp_check/install
|
||||
pg_regress_multi_check = $(PERL) $(citus_abs_srcdir)/pg_regress_multi.pl --pgxsdir="$(pgxsdir)" --bindir="$(bindir)" --libdir="$(libdir)" --majorversion="$(MAJORVERSION)" --postgres-builddir="$(postgres_abs_builddir)" --postgres-srcdir="$(postgres_abs_srcdir)"
|
||||
pg_regress_multi_check = $(PERL) $(citus_abs_srcdir)/pg_regress_multi.pl --pgxsdir="$(pgxsdir)" --bindir="$(bindir)" --libdir="$(libdir)" --majorversion="$(MAJORVERSION)" --postgres-builddir="$(postgres_abs_builddir)" --postgres-srcdir="$(postgres_abs_srcdir)" --citus_abs_srcdir="$(citus_abs_srcdir)"
|
||||
MULTI_REGRESS_OPTS = --inputdir=$(citus_abs_srcdir) $(pg_regress_locale_flags) --launcher="$(citus_abs_srcdir)/log_test_times"
|
||||
|
||||
pg_upgrade_check = $(citus_abs_srcdir)/citus_tests/upgrade/pg_upgrade_test.py
|
||||
|
@ -34,11 +34,6 @@ arbitrary_config_check = $(citus_abs_srcdir)/citus_tests/arbitrary_configs/citus
|
|||
template_isolation_files = $(shell find $(citus_abs_srcdir)/spec/ -name '*.spec')
|
||||
generated_isolation_files = $(patsubst $(citus_abs_srcdir)/spec/%,$(citus_abs_srcdir)/build/specs/%,$(template_isolation_files))
|
||||
|
||||
# Test input and expected files. These are created by pg_regress itself, so we
|
||||
# don't have a rule to create them. We do need rules to clean them however.
|
||||
input_files := $(patsubst $(citus_abs_srcdir)/input/%.source,sql/%.sql, $(wildcard $(citus_abs_srcdir)/input/*.source))
|
||||
output_files := $(patsubst $(citus_abs_srcdir)/output/%.source,expected/%.out, $(wildcard $(citus_abs_srcdir)/output/*.source))
|
||||
|
||||
# have make check actually run all tests, but keep check-full as an
|
||||
# intermediate, for muscle memory backward compatibility.
|
||||
check: check-full check-enterprise-full
|
||||
|
@ -268,7 +263,7 @@ clean-upgrade-artifacts:
|
|||
rm -rf $(citus_abs_srcdir)/tmp_citus_tarballs/ $(citus_abs_srcdir)/tmp_citus_upgrade/ /tmp/citus_copy/
|
||||
|
||||
clean distclean maintainer-clean:
|
||||
rm -f $(output_files) $(input_files)
|
||||
rm -rf input/ output/
|
||||
rm -rf tmp_check/
|
||||
rm -rf tmp_citus_test/
|
||||
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
/columnar_copyto.out
|
||||
/columnar_data_types.out
|
||||
/columnar_load.out
|
||||
/hyperscale_tutorial.out
|
||||
/multi_agg_distinct.out
|
||||
/multi_agg_type_conversion.out
|
||||
/multi_alter_table_statements.out
|
||||
/multi_behavioral_analytics_create_table.out
|
||||
/multi_behavioral_analytics_create_table_superuser.out
|
||||
/multi_complex_count_distinct.out
|
||||
/multi_copy.out
|
||||
/multi_load_data.out
|
||||
/multi_load_data_superuser.out
|
||||
/multi_load_more_data.out
|
||||
/multi_multiuser_load_data.out
|
||||
/multi_mx_copy_data.out
|
||||
/multi_outer_join.out
|
||||
/multi_outer_join_reference.out
|
||||
/tablespace.out
|
|
@ -5,7 +5,8 @@ CREATE TABLE test_contestant(handle TEXT, birthdate DATE, rating INT,
|
|||
percentile FLOAT, country CHAR(3), achievements TEXT[])
|
||||
USING columnar;
|
||||
-- load table data from file
|
||||
COPY test_contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
\set contestants_1_csv_file :abs_srcdir '/data/contestants.1.csv'
|
||||
COPY test_contestant FROM :'contestants_1_csv_file' WITH CSV;
|
||||
-- export using COPY table TO ...
|
||||
COPY test_contestant TO STDOUT;
|
||||
a 01-10-1990 2090 97.1 XA {a}
|
|
@ -8,10 +8,11 @@ SET intervalstyle TO 'POSTGRES_VERBOSE';
|
|||
-- Test array types
|
||||
CREATE TABLE test_array_types (int_array int[], bigint_array bigint[],
|
||||
text_array text[]) USING columnar;
|
||||
COPY test_array_types FROM '@abs_srcdir@/data/array_types.csv' WITH CSV;
|
||||
\set array_types_csv_file :abs_srcdir '/data/array_types.csv'
|
||||
COPY test_array_types FROM :'array_types_csv_file' WITH CSV;
|
||||
SELECT * FROM test_array_types;
|
||||
int_array | bigint_array | text_array
|
||||
--------------------------+--------------------------------------------+------------
|
||||
int_array | bigint_array | text_array
|
||||
---------------------------------------------------------------------
|
||||
{1,2,3} | {1,2,3} | {a,b,c}
|
||||
{} | {} | {}
|
||||
{-2147483648,2147483647} | {-9223372036854775808,9223372036854775807} | {""}
|
||||
|
@ -21,10 +22,11 @@ SELECT * FROM test_array_types;
|
|||
CREATE TABLE test_datetime_types (timestamp timestamp,
|
||||
timestamp_with_timezone timestamp with time zone, date date, time time,
|
||||
interval interval) USING columnar;
|
||||
COPY test_datetime_types FROM '@abs_srcdir@/data/datetime_types.csv' WITH CSV;
|
||||
\set datetime_types_csv_file :abs_srcdir '/data/datetime_types.csv'
|
||||
COPY test_datetime_types FROM :'datetime_types_csv_file' WITH CSV;
|
||||
SELECT * FROM test_datetime_types;
|
||||
timestamp | timestamp_with_timezone | date | time | interval
|
||||
---------------------+-------------------------+------------+----------+-----------
|
||||
timestamp | timestamp_with_timezone | date | time | interval
|
||||
---------------------------------------------------------------------
|
||||
2000-01-02 04:05:06 | 1999-01-08 12:05:06+00 | 2000-01-02 | 04:05:06 | @ 4 hours
|
||||
1970-01-01 00:00:00 | infinity | -infinity | 00:00:00 | @ 0
|
||||
(2 rows)
|
||||
|
@ -34,11 +36,12 @@ CREATE TYPE enum_type AS ENUM ('a', 'b', 'c');
|
|||
CREATE TYPE composite_type AS (a int, b text);
|
||||
CREATE TABLE test_enum_and_composite_types (enum enum_type,
|
||||
composite composite_type) USING columnar;
|
||||
\set enum_and_composite_types_csv_file :abs_srcdir '/data/enum_and_composite_types.csv'
|
||||
COPY test_enum_and_composite_types FROM
|
||||
'@abs_srcdir@/data/enum_and_composite_types.csv' WITH CSV;
|
||||
:'enum_and_composite_types_csv_file' WITH CSV;
|
||||
SELECT * FROM test_enum_and_composite_types;
|
||||
enum | composite
|
||||
------+-----------
|
||||
enum | composite
|
||||
---------------------------------------------------------------------
|
||||
a | (2,b)
|
||||
b | (3,c)
|
||||
(2 rows)
|
||||
|
@ -46,10 +49,11 @@ SELECT * FROM test_enum_and_composite_types;
|
|||
-- Test range types
|
||||
CREATE TABLE test_range_types (int4range int4range, int8range int8range,
|
||||
numrange numrange, tsrange tsrange) USING columnar;
|
||||
COPY test_range_types FROM '@abs_srcdir@/data/range_types.csv' WITH CSV;
|
||||
\set range_types_csv_file :abs_srcdir '/data/range_types.csv'
|
||||
COPY test_range_types FROM :'range_types_csv_file' WITH CSV;
|
||||
SELECT * FROM test_range_types;
|
||||
int4range | int8range | numrange | tsrange
|
||||
-----------+-----------+----------+-----------------------------------------------
|
||||
int4range | int8range | numrange | tsrange
|
||||
---------------------------------------------------------------------
|
||||
[1,3) | [1,3) | [1,3) | ["2000-01-02 00:30:00","2010-02-03 12:30:00")
|
||||
empty | [1,) | (,) | empty
|
||||
(2 rows)
|
||||
|
@ -57,10 +61,11 @@ SELECT * FROM test_range_types;
|
|||
-- Test other types
|
||||
CREATE TABLE test_other_types (bool boolean, bytea bytea, money money,
|
||||
inet inet, bitstring bit varying(5), uuid uuid, json json) USING columnar;
|
||||
COPY test_other_types FROM '@abs_srcdir@/data/other_types.csv' WITH CSV;
|
||||
\set other_types_csv_file :abs_srcdir '/data/other_types.csv'
|
||||
COPY test_other_types FROM :'other_types_csv_file' WITH CSV;
|
||||
SELECT * FROM test_other_types;
|
||||
bool | bytea | money | inet | bitstring | uuid | json
|
||||
------+------------+-------+-------------+-----------+--------------------------------------+------------------
|
||||
bool | bytea | money | inet | bitstring | uuid | json
|
||||
---------------------------------------------------------------------
|
||||
f | \xdeadbeef | $1.00 | 192.168.1.2 | 10101 | a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 | {"key": "value"}
|
||||
t | \xcdb0 | $1.50 | 127.0.0.1 | | a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 | []
|
||||
(2 rows)
|
||||
|
@ -68,12 +73,13 @@ SELECT * FROM test_other_types;
|
|||
-- Test null values
|
||||
CREATE TABLE test_null_values (a int, b int[], c composite_type)
|
||||
USING columnar;
|
||||
COPY test_null_values FROM '@abs_srcdir@/data/null_values.csv' WITH CSV;
|
||||
\set null_values_csv_file :abs_srcdir '/data/null_values.csv'
|
||||
COPY test_null_values FROM :'null_values_csv_file' WITH CSV;
|
||||
SELECT * FROM test_null_values;
|
||||
a | b | c
|
||||
---+--------+-----
|
||||
a | b | c
|
||||
---------------------------------------------------------------------
|
||||
| {NULL} | (,)
|
||||
| |
|
||||
| |
|
||||
(2 rows)
|
||||
|
||||
CREATE TABLE test_json(j json) USING columnar;
|
|
@ -2,7 +2,8 @@
|
|||
-- Test loading data into columnar tables.
|
||||
--
|
||||
-- COPY with incorrect delimiter
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv'
|
||||
\set contestants_1_csv_file :abs_srcdir '/data/contestants.1.csv'
|
||||
COPY contestant FROM :'contestants_1_csv_file'
|
||||
WITH DELIMITER '|'; -- ERROR
|
||||
ERROR: missing data for column "birthdate"
|
||||
CONTEXT: COPY contestant, line 1: "a,1990-01-10,2090,97.1,XA ,{a}"
|
||||
|
@ -11,9 +12,10 @@ COPY contestant FROM PROGRAM 'invalid_program' WITH CSV; -- ERROR
|
|||
ERROR: program "invalid_program" failed
|
||||
DETAIL: command not found
|
||||
-- COPY into uncompressed table from file
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
COPY contestant FROM :'contestants_1_csv_file' WITH CSV;
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv' WITH CSV;
|
||||
\set cat_contestants_2_csv_file 'cat ' :abs_srcdir '/data/contestants.2.csv'
|
||||
COPY contestant FROM PROGRAM :'cat_contestants_2_csv_file' WITH CSV;
|
||||
select
|
||||
version_major, version_minor, reserved_stripe_id, reserved_row_number
|
||||
from columnar_test_helpers.columnar_storage_info('contestant');
|
||||
|
@ -23,9 +25,9 @@ select
|
|||
(1 row)
|
||||
|
||||
-- COPY into compressed table
|
||||
COPY contestant_compressed FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
COPY contestant_compressed FROM :'contestants_1_csv_file' WITH CSV;
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant_compressed FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv'
|
||||
COPY contestant_compressed FROM PROGRAM :'cat_contestants_2_csv_file'
|
||||
WITH CSV;
|
||||
select
|
||||
version_major, version_minor, reserved_stripe_id, reserved_row_number
|
||||
|
@ -42,7 +44,7 @@ COPY famous_constants (value, name, id) FROM STDIN WITH CSV;
|
|||
COPY famous_constants (name, value) FROM STDIN WITH CSV;
|
||||
SELECT * FROM famous_constants ORDER BY id, name;
|
||||
id | name | value
|
||||
----+----------------+-----------
|
||||
---------------------------------------------------------------------
|
||||
1 | pi | 3.141
|
||||
2 | e | 2.718
|
||||
3 | gamma | 0.577
|
|
@ -62,53 +62,58 @@ CREATE TABLE impressions (
|
|||
);
|
||||
begin;
|
||||
SELECT create_distributed_table('companies', 'id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('campaigns', 'company_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
rollback;
|
||||
SELECT create_distributed_table('companies', 'id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('campaigns', 'company_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('ads', 'company_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('clicks', 'company_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('impressions', 'company_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy companies from '@abs_srcdir@/data/companies.csv' with csv
|
||||
\copy campaigns from '@abs_srcdir@/data/campaigns.csv' with csv
|
||||
\copy ads from '@abs_srcdir@/data/ads.csv' with csv
|
||||
\copy clicks from '@abs_srcdir@/data/clicks.csv' with csv
|
||||
\copy impressions from '@abs_srcdir@/data/impressions.csv' with csv
|
||||
\set companies_csv_file :abs_srcdir '/data/companies.csv'
|
||||
\set campaigns_csv_file :abs_srcdir '/data/campaigns.csv'
|
||||
\set ads_csv_file :abs_srcdir '/data/ads.csv'
|
||||
\set clicks_csv_file :abs_srcdir '/data/clicks.csv'
|
||||
\set impressions_csv_file :abs_srcdir '/data/impressions.csv'
|
||||
COPY companies from :'companies_csv_file' with csv;
|
||||
COPY campaigns from :'campaigns_csv_file' with csv;
|
||||
COPY ads from :'ads_csv_file' with csv;
|
||||
COPY clicks from :'clicks_csv_file' with csv;
|
||||
COPY impressions from :'impressions_csv_file' with csv;
|
||||
SELECT a.campaign_id,
|
||||
RANK() OVER (
|
||||
PARTITION BY a.campaign_id
|
||||
|
@ -122,8 +127,8 @@ SELECT a.campaign_id,
|
|||
GROUP BY a.campaign_id, a.id
|
||||
ORDER BY a.campaign_id, n_impressions desc, a.id
|
||||
LIMIT 10;
|
||||
campaign_id | rank | n_impressions | id
|
||||
-------------+------+---------------+-----
|
||||
campaign_id | rank | n_impressions | id
|
||||
---------------------------------------------------------------------
|
||||
34 | 1 | 68 | 264
|
||||
34 | 2 | 56 | 266
|
||||
34 | 3 | 41 | 267
|
||||
|
@ -198,19 +203,19 @@ CREATE TABLE impressions (
|
|||
FOREIGN KEY (company_id, ad_id)
|
||||
REFERENCES ads (company_id, id)
|
||||
);
|
||||
\copy companies from '@abs_srcdir@/data/companies.csv' with csv
|
||||
\copy campaigns from '@abs_srcdir@/data/campaigns.csv' with csv
|
||||
\copy ads from '@abs_srcdir@/data/ads.csv' with csv
|
||||
\copy clicks from '@abs_srcdir@/data/clicks.csv' with csv
|
||||
\copy impressions from '@abs_srcdir@/data/impressions.csv' with csv
|
||||
COPY companies from :'companies_csv_file' with csv;
|
||||
COPY campaigns from :'campaigns_csv_file' with csv;
|
||||
COPY ads from :'ads_csv_file' with csv;
|
||||
COPY clicks from :'clicks_csv_file' with csv;
|
||||
COPY impressions from :'impressions_csv_file' with csv;
|
||||
SELECT create_distributed_table('companies', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.companies$$)
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('campaigns', 'company_id');
|
||||
|
@ -218,9 +223,9 @@ NOTICE: Copying data from local table...
|
|||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.campaigns$$)
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('ads', 'company_id');
|
||||
|
@ -228,9 +233,9 @@ NOTICE: Copying data from local table...
|
|||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.ads$$)
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('clicks', 'company_id');
|
||||
|
@ -238,9 +243,9 @@ NOTICE: Copying data from local table...
|
|||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.clicks$$)
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('impressions', 'company_id');
|
||||
|
@ -248,9 +253,9 @@ NOTICE: Copying data from local table...
|
|||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.impressions$$)
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT a.campaign_id,
|
||||
|
@ -266,8 +271,8 @@ SELECT a.campaign_id,
|
|||
GROUP BY a.campaign_id, a.id
|
||||
ORDER BY a.campaign_id, n_impressions desc, a.id
|
||||
LIMIT 10;
|
||||
campaign_id | rank | n_impressions | id
|
||||
-------------+------+---------------+-----
|
||||
campaign_id | rank | n_impressions | id
|
||||
---------------------------------------------------------------------
|
||||
59 | 1 | 70 | 477
|
||||
59 | 2 | 69 | 479
|
||||
59 | 3 | 63 | 475
|
|
@ -21,9 +21,9 @@ CREATE TABLE lineitem_range (
|
|||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null );
|
||||
SELECT create_distributed_table('lineitem_range', 'l_orderkey', 'range');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
|
||||
|
@ -34,18 +34,20 @@ SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
|
|||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
|
||||
WHERE shardid = :new_shard_id;
|
||||
\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
COPY lineitem_range FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem_range FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
-- Run aggregate(distinct) on partition column for range partitioned table
|
||||
SELECT count(distinct l_orderkey) FROM lineitem_range;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT avg(distinct l_orderkey) FROM lineitem_range;
|
||||
avg
|
||||
-----------------------
|
||||
avg
|
||||
---------------------------------------------------------------------
|
||||
7463.9474036850921273
|
||||
(1 row)
|
||||
|
||||
|
@ -56,8 +58,8 @@ SELECT p_partkey, count(distinct l_orderkey) FROM lineitem_range, part
|
|||
WHERE l_partkey = p_partkey
|
||||
GROUP BY p_partkey
|
||||
ORDER BY p_partkey LIMIT 10;
|
||||
p_partkey | count
|
||||
-----------+-------
|
||||
p_partkey | count
|
||||
---------------------------------------------------------------------
|
||||
18 | 1
|
||||
79 | 1
|
||||
91 | 1
|
||||
|
@ -72,53 +74,53 @@ SELECT p_partkey, count(distinct l_orderkey) FROM lineitem_range, part
|
|||
|
||||
-- Check that we support more complex expressions.
|
||||
SELECT count(distinct (l_orderkey)) FROM lineitem_range;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT count(distinct (l_orderkey + 1)) FROM lineitem_range;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT count(distinct (l_orderkey % 5)) FROM lineitem_range;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
-- count(distinct) on non-partition column is allowed
|
||||
SELECT count(distinct l_partkey) FROM lineitem_range;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
11661
|
||||
(1 row)
|
||||
|
||||
SELECT count(distinct (l_partkey + 1)) FROM lineitem_range;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
11661
|
||||
(1 row)
|
||||
|
||||
SELECT count(distinct (l_partkey % 5)) FROM lineitem_range;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
-- Now test append partitioned tables. First run count(distinct) on a single
|
||||
-- sharded table.
|
||||
SELECT count(distinct p_mfgr) FROM part;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
SELECT p_mfgr, count(distinct p_partkey) FROM part GROUP BY p_mfgr ORDER BY p_mfgr;
|
||||
p_mfgr | count
|
||||
---------------------------+-------
|
||||
p_mfgr | count
|
||||
---------------------------------------------------------------------
|
||||
Manufacturer#1 | 193
|
||||
Manufacturer#2 | 190
|
||||
Manufacturer#3 | 228
|
||||
|
@ -129,8 +131,8 @@ SELECT p_mfgr, count(distinct p_partkey) FROM part GROUP BY p_mfgr ORDER BY p_mf
|
|||
-- We support count(distinct) queries on append partitioned tables
|
||||
-- both on partition column, and non-partition column.
|
||||
SELECT count(distinct o_orderkey), count(distinct o_custkey) FROM orders;
|
||||
count | count
|
||||
-------+-------
|
||||
count | count
|
||||
---------------------------------------------------------------------
|
||||
2985 | 923
|
||||
(1 row)
|
||||
|
||||
|
@ -154,61 +156,61 @@ CREATE TABLE lineitem_hash (
|
|||
l_comment varchar(44) not null );
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
COPY lineitem_hash FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem_hash FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
-- aggregate(distinct) on partition column is allowed
|
||||
SELECT count(distinct l_orderkey) FROM lineitem_hash;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT avg(distinct l_orderkey) FROM lineitem_hash;
|
||||
avg
|
||||
-----------------------
|
||||
avg
|
||||
---------------------------------------------------------------------
|
||||
7463.9474036850921273
|
||||
(1 row)
|
||||
|
||||
-- Check that we support more complex expressions.
|
||||
SELECT count(distinct (l_orderkey)) FROM lineitem_hash;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT count(distinct (l_orderkey + 1)) FROM lineitem_hash;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT count(distinct (l_orderkey % 5)) FROM lineitem_hash;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
-- count(distinct) on non-partition column is allowed
|
||||
SELECT count(distinct l_partkey) FROM lineitem_hash;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
11661
|
||||
(1 row)
|
||||
|
||||
SELECT count(distinct (l_partkey + 1)) FROM lineitem_hash;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
11661
|
||||
(1 row)
|
||||
|
||||
SELECT count(distinct (l_partkey % 5)) FROM lineitem_hash;
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
|
@ -217,8 +219,8 @@ SELECT l_orderkey, count(distinct l_partkey) INTO hash_results FROM lineitem_has
|
|||
SELECT l_orderkey, count(distinct l_partkey) INTO range_results FROM lineitem_range GROUP BY l_orderkey;
|
||||
-- they should return the same results
|
||||
SELECT * FROM hash_results h, range_results r WHERE h.l_orderkey = r.l_orderkey AND h.count != r.count;
|
||||
l_orderkey | count | l_orderkey | count
|
||||
------------+-------+------------+-------
|
||||
l_orderkey | count | l_orderkey | count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- count(distinct) is allowed if we group by non-partition column
|
||||
|
@ -226,8 +228,8 @@ SELECT l_partkey, count(distinct l_orderkey) INTO hash_results_np FROM lineitem_
|
|||
SELECT l_partkey, count(distinct l_orderkey) INTO range_results_np FROM lineitem_range GROUP BY l_partkey;
|
||||
-- they should return the same results
|
||||
SELECT * FROM hash_results_np h, range_results_np r WHERE h.l_partkey = r.l_partkey AND h.count != r.count;
|
||||
l_partkey | count | l_partkey | count
|
||||
-----------+-------+-----------+-------
|
||||
l_partkey | count | l_partkey | count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- other agg(distinct) are not allowed on non-partition columns even they are grouped
|
|
@ -3,26 +3,26 @@
|
|||
--
|
||||
-- Test aggregate type conversions using sums of integers and division operator
|
||||
SELECT sum(l_suppkey) FROM lineitem;
|
||||
sum
|
||||
----------
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
60617976
|
||||
(1 row)
|
||||
|
||||
SELECT sum(l_suppkey) / 2 FROM lineitem;
|
||||
?column?
|
||||
----------
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
30308988
|
||||
(1 row)
|
||||
|
||||
SELECT sum(l_suppkey) / 2::numeric FROM lineitem;
|
||||
?column?
|
||||
-----------------------
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
30308988.000000000000
|
||||
(1 row)
|
||||
|
||||
SELECT sum(l_suppkey)::int8 / 2 FROM lineitem;
|
||||
?column?
|
||||
----------
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
30308988
|
||||
(1 row)
|
||||
|
||||
|
@ -34,27 +34,28 @@ CREATE TABLE aggregate_type (
|
|||
double_value float(40) not null,
|
||||
interval_value interval not null);
|
||||
SELECT create_distributed_table('aggregate_type', 'float_value', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('aggregate_type') AS shardid \gset
|
||||
copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' with (append_to_shard :shardid);
|
||||
\set agg_type_data_file :abs_srcdir '/data/agg_type.data'
|
||||
copy aggregate_type FROM :'agg_type_data_file' with (append_to_shard :shardid);
|
||||
-- Test conversions using aggregates on floats and division
|
||||
SELECT min(float_value), max(float_value),
|
||||
sum(float_value), count(float_value), avg(float_value)
|
||||
FROM aggregate_type;
|
||||
min | max | sum | count | avg
|
||||
-----+-----+------+-------+-------
|
||||
min | max | sum | count | avg
|
||||
---------------------------------------------------------------------
|
||||
1 | 4.5 | 10.5 | 4 | 2.625
|
||||
(1 row)
|
||||
|
||||
SELECT min(float_value) / 2, max(float_value) / 2,
|
||||
sum(float_value) / 2, count(float_value) / 2, avg(float_value) / 2
|
||||
FROM aggregate_type;
|
||||
?column? | ?column? | ?column? | ?column? | ?column?
|
||||
----------+----------+----------+----------+----------
|
||||
?column? | ?column? | ?column? | ?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
0.5 | 2.25 | 5.25 | 2 | 1.3125
|
||||
(1 row)
|
||||
|
||||
|
@ -62,16 +63,16 @@ FROM aggregate_type;
|
|||
SELECT min(double_value), max(double_value),
|
||||
sum(double_value), count(double_value), avg(double_value)
|
||||
FROM aggregate_type;
|
||||
min | max | sum | count | avg
|
||||
-------+---------+----------+-------+-----------
|
||||
min | max | sum | count | avg
|
||||
---------------------------------------------------------------------
|
||||
2.343 | 6.34343 | 15.79703 | 4 | 3.9492575
|
||||
(1 row)
|
||||
|
||||
SELECT min(double_value) * 2, max(double_value) * 2,
|
||||
sum(double_value) * 2, count(double_value) * 2, avg(double_value) * 2
|
||||
FROM aggregate_type;
|
||||
?column? | ?column? | ?column? | ?column? | ?column?
|
||||
----------+----------+----------+----------+----------
|
||||
?column? | ?column? | ?column? | ?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
4.686 | 12.68686 | 31.59406 | 8 | 7.898515
|
||||
(1 row)
|
||||
|
||||
|
@ -81,16 +82,16 @@ SET IntervalStyle TO 'postgres';
|
|||
SELECT min(interval_value), max(interval_value),
|
||||
sum(interval_value), count(interval_value), avg(interval_value)
|
||||
FROM aggregate_type;
|
||||
min | max | sum | count | avg
|
||||
-------------+------------+-------------+-------+-------------
|
||||
min | max | sum | count | avg
|
||||
---------------------------------------------------------------------
|
||||
00:00:23.44 | 00:38:52.9 | 01:23:33.64 | 4 | 00:20:53.41
|
||||
(1 row)
|
||||
|
||||
SELECT min(interval_value) / 2, max(interval_value) / 2,
|
||||
sum(interval_value) / 2, count(interval_value) / 2, avg(interval_value) / 2
|
||||
FROM aggregate_type;
|
||||
?column? | ?column? | ?column? | ?column? | ?column?
|
||||
-------------+-------------+-------------+----------+--------------
|
||||
?column? | ?column? | ?column? | ?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
00:00:11.72 | 00:19:26.45 | 00:41:46.82 | 2 | 00:10:26.705
|
||||
(1 row)
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -9,99 +9,101 @@ CREATE SCHEMA with_basics;
|
|||
SET search_path TO 'with_basics';
|
||||
CREATE TABLE users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint);
|
||||
SELECT create_distributed_table('users_table', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint);
|
||||
SELECT create_distributed_table('events_table', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
|
||||
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
|
||||
\set users_table_data_file :abs_srcdir '/data/users_table.data'
|
||||
\set events_table_data_file :abs_srcdir '/data/events_table.data'
|
||||
COPY users_table FROM :'users_table_data_file' WITH CSV;
|
||||
COPY events_table FROM :'events_table_data_file' WITH CSV;
|
||||
SET citus.shard_count = 96;
|
||||
CREATE SCHEMA subquery_and_ctes;
|
||||
SET search_path TO subquery_and_ctes;
|
||||
CREATE TABLE users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint);
|
||||
SELECT create_distributed_table('users_table', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint);
|
||||
SELECT create_distributed_table('events_table', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
|
||||
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
|
||||
COPY users_table FROM :'users_table_data_file' WITH CSV;
|
||||
COPY events_table FROM :'events_table_data_file' WITH CSV;
|
||||
SET citus.shard_count TO DEFAULT;
|
||||
SET search_path TO DEFAULT;
|
||||
CREATE TABLE users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint);
|
||||
SELECT create_distributed_table('users_table', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint);
|
||||
SELECT create_distributed_table('events_table', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE agg_results (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp);
|
||||
SELECT create_distributed_table('agg_results', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- we need this to improve the concurrency on the regression tests
|
||||
CREATE TABLE agg_results_second (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp);
|
||||
SELECT create_distributed_table('agg_results_second', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- same as agg_results_second
|
||||
CREATE TABLE agg_results_third (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp);
|
||||
SELECT create_distributed_table('agg_results_third', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- same as agg_results_second
|
||||
CREATE TABLE agg_results_fourth (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp);
|
||||
SELECT create_distributed_table('agg_results_fourth', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- same as agg_results_second
|
||||
CREATE TABLE agg_results_window (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp);
|
||||
SELECT create_distributed_table('agg_results_window', 'user_id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE users_ref_test_table(id int, it_name varchar(25), k_no int);
|
||||
SELECT create_reference_table('users_ref_test_table');
|
||||
create_reference_table
|
||||
------------------------
|
||||
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO users_ref_test_table VALUES(1,'User_1',45);
|
||||
|
@ -110,8 +112,8 @@ INSERT INTO users_ref_test_table VALUES(3,'User_3',47);
|
|||
INSERT INTO users_ref_test_table VALUES(4,'User_4',48);
|
||||
INSERT INTO users_ref_test_table VALUES(5,'User_5',49);
|
||||
INSERT INTO users_ref_test_table VALUES(6,'User_6',50);
|
||||
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
|
||||
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
|
||||
COPY users_table FROM :'users_table_data_file' WITH CSV;
|
||||
COPY events_table FROM :'events_table_data_file' WITH CSV;
|
||||
-- create indexes for
|
||||
CREATE INDEX is_index1 ON users_table(user_id);
|
||||
CREATE INDEX is_index2 ON events_table(user_id);
|
||||
|
@ -135,26 +137,26 @@ SELECT run_command_on_master_and_workers($f$
|
|||
IMMUTABLE
|
||||
RETURNS NULL ON NULL INPUT;
|
||||
$f$);
|
||||
run_command_on_master_and_workers
|
||||
-----------------------------------
|
||||
|
||||
run_command_on_master_and_workers
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.next_shard_id TO 1400297;
|
||||
CREATE TABLE events_reference_table (like events_table including all);
|
||||
SELECT create_reference_table('events_reference_table');
|
||||
create_reference_table
|
||||
------------------------
|
||||
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX events_ref_val2 on events_reference_table(value_2);
|
||||
INSERT INTO events_reference_table SELECT * FROM events_table;
|
||||
CREATE TABLE users_reference_table (like users_table including all);
|
||||
SELECT create_reference_table('users_reference_table');
|
||||
create_reference_table
|
||||
------------------------
|
||||
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO users_reference_table SELECT * FROM users_table;
|
|
@ -217,7 +217,7 @@ SELECT master_create_empty_shard('events') AS new_shard_id
|
|||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
|
||||
WHERE shardid = :new_shard_id;
|
||||
\COPY events FROM STDIN WITH CSV
|
||||
COPY events FROM STDIN WITH CSV;
|
||||
CREATE TABLE users (
|
||||
composite_id user_composite_type,
|
||||
lastseen bigint
|
||||
|
@ -247,7 +247,7 @@ SELECT master_create_empty_shard('users') AS new_shard_id
|
|||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
|
||||
WHERE shardid = :new_shard_id;
|
||||
\COPY users FROM STDIN WITH CSV
|
||||
COPY users FROM STDIN WITH CSV;
|
||||
-- Create tables for subquery tests
|
||||
CREATE TABLE lineitem_subquery (
|
||||
l_orderkey bigint not null,
|
||||
|
@ -330,7 +330,11 @@ SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
|
|||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
|
||||
WHERE shardid = :new_shard_id;
|
||||
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
COPY lineitem_subquery FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
COPY lineitem_subquery FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
COPY orders_subquery FROM :'orders_1_data_file' with delimiter '|';
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
COPY orders_subquery FROM :'orders_2_data_file' with delimiter '|';
|
|
@ -29,8 +29,10 @@ SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
|
|||
|
||||
(1 row)
|
||||
|
||||
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
COPY lineitem_hash FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem_hash FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
ANALYZE lineitem_hash;
|
||||
-- count(distinct) is supported on top level query if there
|
||||
-- is a grouping on the partition key
|
|
@ -1,6 +1,11 @@
|
|||
--
|
||||
-- MULTI_COPY
|
||||
--
|
||||
-- set file paths
|
||||
\set customer1datafile :abs_srcdir '/data/customer.1.data'
|
||||
\set customer2datafile :abs_srcdir '/data/customer.2.data'
|
||||
\set customer3datafile :abs_srcdir '/data/customer.3.data'
|
||||
\set lineitem1datafile :abs_srcdir '/data/lineitem.1.data'
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 560000;
|
||||
-- Create a new hash-partitioned table into which to COPY
|
||||
CREATE TABLE customer_copy_hash (
|
||||
|
@ -101,7 +106,7 @@ SELECT count(*) FROM customer_copy_hash WHERE c_custkey = 9;
|
|||
(1 row)
|
||||
|
||||
-- Test server-side copy from file
|
||||
COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.2.data' WITH (DELIMITER '|');
|
||||
COPY customer_copy_hash FROM :'customer2datafile' WITH (DELIMITER '|');
|
||||
-- Confirm that data was copied
|
||||
SELECT count(*) FROM customer_copy_hash;
|
||||
count
|
||||
|
@ -110,7 +115,11 @@ SELECT count(*) FROM customer_copy_hash;
|
|||
(1 row)
|
||||
|
||||
-- Test client-side copy from file
|
||||
\copy customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|');
|
||||
-- \copy does not support variable interpolation. Hence we store and execute
|
||||
-- the query in two steps for interpolation to kick in.
|
||||
-- See https://stackoverflow.com/a/67642094/4576416 for details.
|
||||
\set client_side_copy_command '\\copy customer_copy_hash FROM ' :'customer3datafile' ' WITH (DELIMITER '''|''');'
|
||||
:client_side_copy_command
|
||||
-- Confirm that data was copied
|
||||
SELECT count(*) FROM customer_copy_hash;
|
||||
count
|
||||
|
@ -199,7 +208,7 @@ SELECT master_create_distributed_table('customer_copy_range', 'c_custkey', 'rang
|
|||
(1 row)
|
||||
|
||||
-- Test COPY into empty range-partitioned table
|
||||
COPY customer_copy_range FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|');
|
||||
COPY customer_copy_range FROM :'customer1datafile' WITH (DELIMITER '|');
|
||||
ERROR: could not find any shards into which to copy
|
||||
DETAIL: No shards exist for distributed table "customer_copy_range".
|
||||
SELECT master_create_empty_shard('customer_copy_range') AS new_shard_id
|
||||
|
@ -211,7 +220,7 @@ SELECT master_create_empty_shard('customer_copy_range') AS new_shard_id
|
|||
UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1000
|
||||
WHERE shardid = :new_shard_id;
|
||||
-- Test copy into range-partitioned table
|
||||
COPY customer_copy_range FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|');
|
||||
COPY customer_copy_range FROM :'customer1datafile' WITH (DELIMITER '|');
|
||||
-- Check whether data went into the right shard (maybe)
|
||||
SELECT min(c_custkey), max(c_custkey), avg(c_custkey), count(*)
|
||||
FROM customer_copy_range WHERE c_custkey <= 500;
|
||||
|
@ -351,7 +360,7 @@ SELECT create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append');
|
|||
|
||||
BEGIN;
|
||||
SELECT master_create_empty_shard('lineitem_copy_append') AS shardid \gset
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
COPY lineitem_copy_append FROM :'lineitem1datafile' with (delimiter '|', append_to_shard :shardid);
|
||||
END;
|
||||
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::regclass;
|
||||
count
|
||||
|
@ -360,9 +369,9 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::
|
|||
(1 row)
|
||||
|
||||
-- trigger some errors on the append_to_shard option
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard xxxxx);
|
||||
COPY lineitem_copy_append FROM :'lineitem1datafile' with (delimiter '|', append_to_shard xxxxx);
|
||||
ERROR: could not find valid entry for shard xxxxx
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard xxxxx);
|
||||
COPY lineitem_copy_append FROM :'lineitem1datafile' with (delimiter '|', append_to_shard xxxxx);
|
||||
ERROR: shard xxxxx does not belong to table lineitem_copy_append
|
||||
-- Test schema support on append partitioned tables
|
||||
CREATE SCHEMA append;
|
||||
|
@ -384,8 +393,8 @@ SELECT create_distributed_table('append.customer_copy', 'c_custkey', 'append');
|
|||
SELECT master_create_empty_shard('append.customer_copy') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('append.customer_copy') AS shardid2 \gset
|
||||
-- Test copy from the master node
|
||||
COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard :shardid1);
|
||||
COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid2);
|
||||
COPY append.customer_copy FROM :'customer1datafile' with (delimiter '|', append_to_shard :shardid1);
|
||||
COPY append.customer_copy FROM :'customer2datafile' with (delimiter '|', append_to_shard :shardid2);
|
||||
-- Test the content of the table
|
||||
SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.customer_copy;
|
||||
min | max | avg | count
|
|
@ -0,0 +1,24 @@
|
|||
--
|
||||
-- MULTI_LOAD_DATA
|
||||
--
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
COPY lineitem FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
COPY orders FROM :'orders_1_data_file' with delimiter '|';
|
||||
COPY orders FROM :'orders_2_data_file' with delimiter '|';
|
||||
COPY orders_reference FROM :'orders_1_data_file' with delimiter '|';
|
||||
COPY orders_reference FROM :'orders_2_data_file' with delimiter '|';
|
||||
\set customer_1_data_file :abs_srcdir '/data/customer.1.data'
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
\set part_data_file :abs_srcdir '/data/part.data'
|
||||
\set supplier_data_file :abs_srcdir '/data/supplier.data'
|
||||
COPY customer FROM :'customer_1_data_file' with delimiter '|';
|
||||
COPY customer_append FROM :'customer_1_data_file' with (delimiter '|', append_to_shard xxxxx);
|
||||
COPY nation FROM :'nation_data_file' with delimiter '|';
|
||||
COPY part FROM :'part_data_file' with delimiter '|';
|
||||
COPY part_append FROM :'part_data_file' with (delimiter '|', append_to_shard xxxxx);
|
||||
COPY supplier FROM :'supplier_data_file' with delimiter '|';
|
||||
COPY supplier_single_shard FROM :'supplier_data_file' with delimiter '|';
|
|
@ -0,0 +1,8 @@
|
|||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
COPY lineitem_hash_part FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem_hash_part FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
COPY orders_hash_part FROM :'orders_1_data_file' with delimiter '|';
|
||||
COPY orders_hash_part FROM :'orders_2_data_file' with delimiter '|';
|
|
@ -0,0 +1,19 @@
|
|||
--
|
||||
-- MULTI_STAGE_MORE_DATA
|
||||
--
|
||||
SET citus.next_shard_id TO 280000;
|
||||
-- We load more data to customer and part tables to test distributed joins. The
|
||||
-- loading causes the planner to consider customer and part tables as large, and
|
||||
-- evaluate plans where some of the underlying tables need to be repartitioned.
|
||||
\set customer_2_data_file :abs_srcdir '/data/customer.2.data'
|
||||
\set customer_3_data_file :abs_srcdir '/data/customer.3.data'
|
||||
\set part_more_data_file :abs_srcdir '/data/part.more.data'
|
||||
COPY customer FROM :'customer_2_data_file' with delimiter '|';
|
||||
COPY customer FROM :'customer_3_data_file' with delimiter '|';
|
||||
COPY part FROM :'part_more_data_file' with delimiter '|';
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid2 \gset
|
||||
copy customer_append FROM :'customer_2_data_file' with (delimiter '|', append_to_shard :shardid1);
|
||||
copy customer_append FROM :'customer_3_data_file' with (delimiter '|', append_to_shard :shardid2);
|
||||
SELECT master_create_empty_shard('part_append') AS shardid \gset
|
||||
copy part_append FROM :'part_more_data_file' with (delimiter '|', append_to_shard :shardid);
|
|
@ -0,0 +1,50 @@
|
|||
--
|
||||
-- MULTI_MULTIUSER_LOAD_DATA
|
||||
--
|
||||
-- Tests for loading data in a distributed cluster. Please note that the number
|
||||
-- of shards uploaded depends on two config values: citusdb.shard_replication_factor and
|
||||
-- citusdb.shard_max_size. These values are manually set in pg_regress.c. We also set
|
||||
-- the shard placement policy to the local-node-first policy as other regression
|
||||
-- tests expect the placements to be in that order.
|
||||
SET citusdb.shard_placement_policy TO 'local-node-first';
|
||||
-- load as superuser
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set copy_command '\\COPY lineitem FROM ' :'lineitem_1_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
-- as user with ALL access
|
||||
SET ROLE full_access;
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
\set copy_command '\\COPY lineitem FROM ' :'lineitem_2_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
RESET ROLE;
|
||||
-- as user with SELECT access, should fail
|
||||
SET ROLE read_access;
|
||||
\set copy_command '\\COPY lineitem FROM ' :'lineitem_2_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
ERROR: permission denied for table lineitem
|
||||
RESET ROLE;
|
||||
-- as user with no access, should fail
|
||||
SET ROLE no_access;
|
||||
\set copy_command '\\COPY lineitem FROM ' :'lineitem_2_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
ERROR: permission denied for table lineitem
|
||||
RESET ROLE;
|
||||
SET ROLE full_access;
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
\set copy_command '\\COPY orders FROM ' :'orders_1_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
\set copy_command '\\COPY orders FROM ' :'orders_2_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
\set customer_1_data_file :abs_srcdir '/data/customer.1.data'
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
\set part_data_file :abs_srcdir '/data/part.data'
|
||||
\set supplier_data_file :abs_srcdir '/data/supplier.data'
|
||||
\set copy_command '\\COPY customer FROM ' :'customer_1_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
\set copy_command '\\COPY nation FROM ' :'nation_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
\set copy_command '\\COPY part FROM ' :'part_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
\set copy_command '\\COPY supplier FROM ' :'supplier_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
|
@ -1,12 +1,13 @@
|
|||
--
|
||||
-- MULTI_MX_COPY_DATA
|
||||
--
|
||||
\COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
COPY nation_hash FROM :'nation_data_file' with delimiter '|';
|
||||
SET search_path TO citus_mx_test_schema;
|
||||
\COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
\COPY citus_mx_test_schema_join_1.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
\COPY citus_mx_test_schema_join_1.nation_hash_2 FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
\COPY citus_mx_test_schema_join_2.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
COPY nation_hash FROM :'nation_data_file' with delimiter '|';
|
||||
COPY citus_mx_test_schema_join_1.nation_hash FROM :'nation_data_file' with delimiter '|';
|
||||
COPY citus_mx_test_schema_join_1.nation_hash_2 FROM :'nation_data_file' with delimiter '|';
|
||||
COPY citus_mx_test_schema_join_2.nation_hash FROM :'nation_data_file' with delimiter '|';
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE citus_mx_test_schema.nation_hash_replicated AS SELECT * FROM citus_mx_test_schema.nation_hash;
|
||||
SELECT create_distributed_table('citus_mx_test_schema.nation_hash_replicated', 'n_nationkey');
|
||||
|
@ -19,18 +20,24 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
|
||||
(1 row)
|
||||
|
||||
\COPY nation_hash_replicated FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
COPY nation_hash_replicated FROM :'nation_data_file' with delimiter '|';
|
||||
-- now try loading data from worker node
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO public;
|
||||
\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\COPY citus_mx_test_schema.nation_hash_replicated FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
COPY lineitem_mx FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem_mx FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
COPY citus_mx_test_schema.nation_hash_replicated FROM :'nation_data_file' with delimiter '|';
|
||||
\c - - - :worker_2_port
|
||||
-- and use second worker as well
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
\COPY citus_mx_test_schema.nation_hash_replicated FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
COPY orders_mx FROM :'orders_1_data_file' with delimiter '|';
|
||||
COPY orders_mx FROM :'orders_2_data_file' with delimiter '|';
|
||||
COPY citus_mx_test_schema.nation_hash_replicated FROM :'nation_data_file' with delimiter '|';
|
||||
-- get ready for the next test
|
||||
TRUNCATE orders_mx;
|
||||
\c - - - :worker_2_port
|
||||
|
@ -55,7 +62,9 @@ show citus.local_shared_pool_size;
|
|||
-1
|
||||
(1 row)
|
||||
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
COPY orders_mx FROM :'orders_1_data_file' with delimiter '|';
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
CONTEXT: COPY orders_mx, line 3: "3|1234|F|205654.30|1993-10-14|5-LOW|Clerk#000000955|0|sly final accounts boost. carefully regular id..."
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
|
@ -72,7 +81,7 @@ NOTICE: executing the copy locally for shard xxxxx
|
|||
CONTEXT: COPY orders_mx, line 25: "97|211|F|100572.55|1993-01-29|3-MEDIUM|Clerk#000000547|0|hang blithely along the regular accounts. f..."
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
CONTEXT: COPY orders_mx, line 38: "134|62|F|208201.46|1992-05-01|4-NOT SPECIFIED|Clerk#000000711|0|lar theodolites boos"
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
COPY orders_mx FROM :'orders_2_data_file' with delimiter '|';
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
CONTEXT: COPY orders_mx, line 2: "8998|80|F|147264.16|1993-01-04|5-LOW|Clerk#000000733|0| fluffily pending sauternes cajo"
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
|
@ -89,7 +98,8 @@ NOTICE: executing the copy locally for shard xxxxx
|
|||
CONTEXT: COPY orders_mx, line 43: "9159|1135|O|99594.61|1995-07-26|1-URGENT|Clerk#000000892|0|xcuses. quickly ironic deposits wake alon..."
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
CONTEXT: COPY orders_mx, line 69: "9281|904|F|173278.28|1992-02-24|1-URGENT|Clerk#000000530|0|eep furiously according to the requests; ..."
|
||||
\COPY citus_mx_test_schema.nation_hash_replicated FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
COPY citus_mx_test_schema.nation_hash_replicated FROM :'nation_data_file' with delimiter '|';
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
CONTEXT: COPY nation_hash_replicated, line 1: "0|ALGERIA|0| haggle. carefully final deposits detect slyly agai"
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
|
@ -124,7 +134,11 @@ show citus.local_shared_pool_size;
|
|||
-- when worker nodes gain capability to run dml commands on reference tables.
|
||||
\c - - - :master_port
|
||||
SET search_path TO public;
|
||||
\COPY customer_mx FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\COPY nation_mx FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
\COPY part_mx FROM '@abs_srcdir@/data/part.data' with delimiter '|'
|
||||
\COPY supplier_mx FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
||||
\set customer_1_data_file :abs_srcdir '/data/customer.1.data'
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
\set part_data_file :abs_srcdir '/data/part.data'
|
||||
\set supplier_data_file :abs_srcdir '/data/supplier.data'
|
||||
COPY customer_mx FROM :'customer_1_data_file' with delimiter '|';
|
||||
COPY nation_mx FROM :'nation_data_file' with delimiter '|';
|
||||
COPY part_mx FROM :'part_data_file' with delimiter '|';
|
||||
COPY supplier_mx FROM :'supplier_data_file' with delimiter '|';
|
|
@ -86,10 +86,13 @@ SELECT create_reference_table('multi_outer_join_third_reference');
|
|||
|
||||
(1 row)
|
||||
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\set customer_1_10_data :abs_srcdir '/data/customer-1-10.data'
|
||||
\set customer_11_20_data :abs_srcdir '/data/customer-11-20.data'
|
||||
\set customer_1_15_data :abs_srcdir '/data/customer-1-15.data'
|
||||
COPY multi_outer_join_left FROM :'customer_1_10_data' with delimiter '|';
|
||||
COPY multi_outer_join_left FROM :'customer_11_20_data' with delimiter '|';
|
||||
COPY multi_outer_join_right FROM :'customer_1_15_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_reference FROM :'customer_1_15_data' with delimiter '|';
|
||||
-- Make sure we do not crash if one table has no shards
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
|
@ -110,8 +113,9 @@ FROM
|
|||
(1 row)
|
||||
|
||||
-- Third table is a single shard table with all data
|
||||
\copy multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
|
||||
\copy multi_outer_join_third_reference FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
|
||||
\set customer_1_30_data :abs_srcdir '/data/customer-1-30.data'
|
||||
COPY multi_outer_join_third FROM :'customer_1_30_data' with delimiter '|';
|
||||
COPY multi_outer_join_third_reference FROM :'customer_1_30_data' with delimiter '|';
|
||||
-- Regular outer join should return results for all rows
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
|
@ -223,7 +227,8 @@ FROM
|
|||
(1 row)
|
||||
|
||||
-- Turn the right table into a large table
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\set customer_21_30_data :abs_srcdir '/data/customer-21-30.data'
|
||||
COPY multi_outer_join_right FROM :'customer_21_30_data' with delimiter '|';
|
||||
-- Shards do not have 1-1 matching. We should error here.
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
|
@ -238,10 +243,12 @@ FROM
|
|||
TRUNCATE multi_outer_join_left;
|
||||
TRUNCATE multi_outer_join_right;
|
||||
-- reload shards with 1-1 matching
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-subset-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-subset-21-30.data' with delimiter '|'
|
||||
\set customer_subset_11_20_data :abs_srcdir '/data/customer-subset-11-20.data'
|
||||
COPY multi_outer_join_left FROM :'customer_subset_11_20_data' with delimiter '|';
|
||||
COPY multi_outer_join_left FROM :'customer_21_30_data' with delimiter '|';
|
||||
\set customer_subset_21_30_data :abs_srcdir '/data/customer-subset-21-30.data'
|
||||
COPY multi_outer_join_right FROM :'customer_11_20_data' with delimiter '|';
|
||||
COPY multi_outer_join_right FROM :'customer_subset_21_30_data' with delimiter '|';
|
||||
-- multi_outer_join_third is a single shard table
|
||||
-- Regular left join should work as expected
|
||||
SELECT
|
||||
|
@ -795,7 +802,8 @@ LIMIT 20;
|
|||
(16 rows)
|
||||
|
||||
-- Add a shard to the left table that overlaps with multiple shards in the right
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\set customer_1_data_file :abs_srcdir '/data/customer.1.data'
|
||||
COPY multi_outer_join_left FROM :'customer_1_data_file' with delimiter '|';
|
||||
-- All outer joins should error out
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
|
@ -877,7 +885,7 @@ SELECT create_distributed_table('left_values', 'val');
|
|||
|
||||
(1 row)
|
||||
|
||||
\copy left_values from stdin
|
||||
COPY left_values from stdin;
|
||||
CREATE TABLE right_values(val int);
|
||||
SELECT create_distributed_table('right_values', 'val');
|
||||
create_distributed_table
|
||||
|
@ -885,7 +893,7 @@ SELECT create_distributed_table('right_values', 'val');
|
|||
|
||||
(1 row)
|
||||
|
||||
\copy right_values from stdin
|
||||
COPY right_values from stdin;
|
||||
SELECT
|
||||
*
|
||||
FROM
|
|
@ -14,9 +14,9 @@ CREATE TABLE multi_outer_join_left_hash
|
|||
l_comment varchar(117) not null
|
||||
);
|
||||
SELECT create_distributed_table('multi_outer_join_left_hash', 'l_custkey');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE multi_outer_join_right_reference
|
||||
|
@ -31,9 +31,9 @@ CREATE TABLE multi_outer_join_right_reference
|
|||
r_comment varchar(117) not null
|
||||
);
|
||||
SELECT create_reference_table('multi_outer_join_right_reference');
|
||||
create_reference_table
|
||||
------------------------
|
||||
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE multi_outer_join_third_reference
|
||||
|
@ -48,9 +48,9 @@ CREATE TABLE multi_outer_join_third_reference
|
|||
t_comment varchar(117) not null
|
||||
);
|
||||
SELECT create_reference_table('multi_outer_join_third_reference');
|
||||
create_reference_table
|
||||
------------------------
|
||||
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE multi_outer_join_right_hash
|
||||
|
@ -65,9 +65,9 @@ CREATE TABLE multi_outer_join_right_hash
|
|||
r_comment varchar(117) not null
|
||||
);
|
||||
SELECT create_distributed_table('multi_outer_join_right_hash', 'r_custkey');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Make sure we do not crash if both tables are emmpty
|
||||
|
@ -75,24 +75,27 @@ SELECT
|
|||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_third_reference b ON (l_custkey = t_custkey);
|
||||
min | max
|
||||
-----+-----
|
||||
|
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
|
|
||||
(1 row)
|
||||
|
||||
-- Left table is a large table
|
||||
\copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
|
||||
\copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\set customer_1_10_data :abs_srcdir '/data/customer-1-10.data'
|
||||
\set customer_11_20_data :abs_srcdir '/data/customer-11-20.data'
|
||||
COPY multi_outer_join_left_hash FROM :'customer_1_10_data' with delimiter '|';
|
||||
COPY multi_outer_join_left_hash FROM :'customer_11_20_data' with delimiter '|';
|
||||
-- Right table is a small table
|
||||
\copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\set customer_1_15_data :abs_srcdir '/data/customer-1-15.data'
|
||||
COPY multi_outer_join_right_reference FROM :'customer_1_15_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_hash FROM :'customer_1_15_data' with delimiter '|';
|
||||
-- Make sure we do not crash if one table has data
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_third_reference b ON (l_custkey = t_custkey);
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
1 | 20
|
||||
(1 row)
|
||||
|
||||
|
@ -100,21 +103,22 @@ SELECT
|
|||
min(t_custkey), max(t_custkey)
|
||||
FROM
|
||||
multi_outer_join_third_reference a LEFT JOIN multi_outer_join_right_reference b ON (r_custkey = t_custkey);
|
||||
min | max
|
||||
-----+-----
|
||||
|
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
|
|
||||
(1 row)
|
||||
|
||||
-- Third table is a single shard table with all data
|
||||
\copy multi_outer_join_third_reference FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
|
||||
\set customer_1_30_data :abs_srcdir '/data/customer-1-30.data'
|
||||
COPY multi_outer_join_third_reference FROM :'customer_1_30_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_hash FROM :'customer_1_30_data' with delimiter '|';
|
||||
-- Regular outer join should return results for all rows
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey);
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
1 | 20
|
||||
(1 row)
|
||||
|
||||
|
@ -123,8 +127,8 @@ SELECT
|
|||
count(*)
|
||||
FROM
|
||||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_nationkey = r_nationkey);
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
28
|
||||
(1 row)
|
||||
|
||||
|
@ -135,8 +139,8 @@ FROM
|
|||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey)
|
||||
WHERE
|
||||
r_custkey IS NULL;
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
16 | 20
|
||||
(1 row)
|
||||
|
||||
|
@ -147,8 +151,8 @@ FROM
|
|||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey)
|
||||
WHERE
|
||||
r_custkey IS NULL OR r_custkey = 5;
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
5 | 20
|
||||
(1 row)
|
||||
|
||||
|
@ -160,8 +164,8 @@ FROM
|
|||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey)
|
||||
WHERE
|
||||
r_custkey = 5 or r_custkey > 15;
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
5 | 5
|
||||
(1 row)
|
||||
|
||||
|
@ -171,8 +175,8 @@ SELECT
|
|||
FROM
|
||||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b
|
||||
ON (l_custkey = r_custkey AND r_custkey = 5);
|
||||
count | count
|
||||
-------+-------
|
||||
count | count
|
||||
---------------------------------------------------------------------
|
||||
20 | 1
|
||||
(1 row)
|
||||
|
||||
|
@ -182,8 +186,8 @@ SELECT
|
|||
FROM
|
||||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b
|
||||
ON (l_custkey = r_custkey AND r_custkey = -1 /* nonexistant */);
|
||||
count | count
|
||||
-------+-------
|
||||
count | count
|
||||
---------------------------------------------------------------------
|
||||
20 | 0
|
||||
(1 row)
|
||||
|
||||
|
@ -193,8 +197,8 @@ SELECT
|
|||
FROM
|
||||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b
|
||||
ON (l_custkey = r_custkey AND l_custkey = -1 /* nonexistant */);
|
||||
count | count
|
||||
-------+-------
|
||||
count | count
|
||||
---------------------------------------------------------------------
|
||||
20 | 0
|
||||
(1 row)
|
||||
|
||||
|
@ -210,13 +214,14 @@ SELECT
|
|||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_right_reference a RIGHT JOIN multi_outer_join_left_hash b ON (l_custkey = r_custkey);
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
1 | 20
|
||||
(1 row)
|
||||
|
||||
-- load some more data
|
||||
\copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\set customer_21_30_data :abs_srcdir '/data/customer-21-30.data'
|
||||
COPY multi_outer_join_right_reference FROM :'customer_21_30_data' with delimiter '|';
|
||||
-- Update shards so that they do not have 1-1 matching, triggering an error.
|
||||
UPDATE pg_dist_shard SET shardminvalue = '2147483646' WHERE shardid = 1260006;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '2147483647' WHERE shardid = 1260006;
|
||||
|
@ -230,20 +235,20 @@ UPDATE pg_dist_shard SET shardmaxvalue = '-1073741825' WHERE shardid = 1260006;
|
|||
-- empty tables
|
||||
TRUNCATE multi_outer_join_left_hash, multi_outer_join_right_hash, multi_outer_join_right_reference;
|
||||
-- reload shards with 1-1 matching
|
||||
\copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
COPY multi_outer_join_left_hash FROM :'customer_1_15_data' with delimiter '|';
|
||||
COPY multi_outer_join_left_hash FROM :'customer_21_30_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_reference FROM :'customer_11_20_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_reference FROM :'customer_21_30_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_hash FROM :'customer_11_20_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_hash FROM :'customer_21_30_data' with delimiter '|';
|
||||
-- multi_outer_join_third_reference is a single shard table
|
||||
-- Regular left join should work as expected
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_hash b ON (l_custkey = r_custkey);
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
1 | 30
|
||||
(1 row)
|
||||
|
||||
|
@ -260,8 +265,8 @@ FROM
|
|||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey)
|
||||
WHERE
|
||||
r_custkey IS NULL;
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
1 | 10
|
||||
(1 row)
|
||||
|
||||
|
@ -272,8 +277,8 @@ FROM
|
|||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey)
|
||||
WHERE
|
||||
r_custkey IS NULL OR r_custkey = 15;
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
1 | 15
|
||||
(1 row)
|
||||
|
||||
|
@ -285,8 +290,8 @@ FROM
|
|||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey)
|
||||
WHERE
|
||||
r_custkey = 21 or r_custkey < 10;
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
21 | 21
|
||||
(1 row)
|
||||
|
||||
|
@ -296,8 +301,8 @@ SELECT
|
|||
FROM
|
||||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b
|
||||
ON (l_custkey = r_custkey AND r_custkey = 21);
|
||||
count | count
|
||||
-------+-------
|
||||
count | count
|
||||
---------------------------------------------------------------------
|
||||
25 | 1
|
||||
(1 row)
|
||||
|
||||
|
@ -313,8 +318,8 @@ SELECT
|
|||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_right_reference a RIGHT JOIN multi_outer_join_left_hash b ON (l_custkey = r_custkey);
|
||||
min | max
|
||||
-----+-----
|
||||
min | max
|
||||
---------------------------------------------------------------------
|
||||
1 | 30
|
||||
(1 row)
|
||||
|
||||
|
@ -346,18 +351,18 @@ FROM
|
|||
LEFT JOIN multi_outer_join_right_reference r1 ON (l1.l_custkey = r1.r_custkey)
|
||||
LEFT JOIN multi_outer_join_third_reference t1 ON (r1.r_custkey = t1.t_custkey)
|
||||
ORDER BY 1;
|
||||
l_custkey | r_custkey | t_custkey
|
||||
-----------+-----------+-----------
|
||||
1 | |
|
||||
2 | |
|
||||
3 | |
|
||||
4 | |
|
||||
5 | |
|
||||
6 | |
|
||||
7 | |
|
||||
8 | |
|
||||
9 | |
|
||||
10 | |
|
||||
l_custkey | r_custkey | t_custkey
|
||||
---------------------------------------------------------------------
|
||||
1 | |
|
||||
2 | |
|
||||
3 | |
|
||||
4 | |
|
||||
5 | |
|
||||
6 | |
|
||||
7 | |
|
||||
8 | |
|
||||
9 | |
|
||||
10 | |
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
|
@ -392,18 +397,18 @@ FROM
|
|||
RIGHT JOIN multi_outer_join_right_hash r1 ON (t1.t_custkey = r1.r_custkey)
|
||||
LEFT JOIN multi_outer_join_left_hash l1 ON (r1.r_custkey = l1.l_custkey)
|
||||
ORDER BY 1,2,3;
|
||||
t_custkey | r_custkey | l_custkey
|
||||
-----------+-----------+-----------
|
||||
t_custkey | r_custkey | l_custkey
|
||||
---------------------------------------------------------------------
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
14 | 14 | 14
|
||||
15 | 15 | 15
|
||||
16 | 16 |
|
||||
17 | 17 |
|
||||
18 | 18 |
|
||||
19 | 19 |
|
||||
20 | 20 |
|
||||
16 | 16 |
|
||||
17 | 17 |
|
||||
18 | 18 |
|
||||
19 | 19 |
|
||||
20 | 20 |
|
||||
21 | 21 | 21
|
||||
22 | 22 | 22
|
||||
23 | 23 | 23
|
||||
|
@ -426,13 +431,13 @@ FROM
|
|||
WHERE
|
||||
l_custkey is NULL
|
||||
ORDER BY 1;
|
||||
t_custkey | r_custkey | l_custkey
|
||||
-----------+-----------+-----------
|
||||
16 | 16 |
|
||||
17 | 17 |
|
||||
18 | 18 |
|
||||
19 | 19 |
|
||||
20 | 20 |
|
||||
t_custkey | r_custkey | l_custkey
|
||||
---------------------------------------------------------------------
|
||||
16 | 16 |
|
||||
17 | 17 |
|
||||
18 | 18 |
|
||||
19 | 19 |
|
||||
20 | 20 |
|
||||
(5 rows)
|
||||
|
||||
-- Cascading right join with single shard left most table should work
|
||||
|
@ -443,8 +448,8 @@ FROM
|
|||
RIGHT JOIN multi_outer_join_right_hash r1 ON (t1.t_custkey = r1.r_custkey)
|
||||
RIGHT JOIN multi_outer_join_left_hash l1 ON (r1.r_custkey = l1.l_custkey)
|
||||
ORDER BY l_custkey;
|
||||
t_custkey | r_custkey | l_custkey
|
||||
-----------+-----------+-----------
|
||||
t_custkey | r_custkey | l_custkey
|
||||
---------------------------------------------------------------------
|
||||
| | 1
|
||||
| | 2
|
||||
| | 3
|
||||
|
@ -479,18 +484,18 @@ FROM
|
|||
multi_outer_join_left_hash l1
|
||||
FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey)
|
||||
ORDER BY 1,2;
|
||||
l_custkey | r_custkey
|
||||
-----------+-----------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
l_custkey | r_custkey
|
||||
---------------------------------------------------------------------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
|
@ -519,21 +524,21 @@ SELECT
|
|||
FROM
|
||||
multi_outer_join_left_hash l1
|
||||
FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey)
|
||||
WHERE
|
||||
WHERE
|
||||
r_custkey is NULL
|
||||
ORDER BY 1;
|
||||
l_custkey | r_custkey
|
||||
-----------+-----------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
l_custkey | r_custkey
|
||||
---------------------------------------------------------------------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
(10 rows)
|
||||
|
||||
-- full outer join + anti (left) should work with 1-1 matched shards
|
||||
|
@ -542,11 +547,11 @@ SELECT
|
|||
FROM
|
||||
multi_outer_join_left_hash l1
|
||||
FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey)
|
||||
WHERE
|
||||
WHERE
|
||||
l_custkey is NULL
|
||||
ORDER BY 2;
|
||||
l_custkey | r_custkey
|
||||
-----------+-----------
|
||||
l_custkey | r_custkey
|
||||
---------------------------------------------------------------------
|
||||
| 16
|
||||
| 17
|
||||
| 18
|
||||
|
@ -560,21 +565,21 @@ SELECT
|
|||
FROM
|
||||
multi_outer_join_left_hash l1
|
||||
FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey)
|
||||
WHERE
|
||||
WHERE
|
||||
l_custkey is NULL or r_custkey is NULL
|
||||
ORDER BY 1,2 DESC;
|
||||
l_custkey | r_custkey
|
||||
-----------+-----------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
l_custkey | r_custkey
|
||||
---------------------------------------------------------------------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
| 20
|
||||
| 19
|
||||
| 18
|
||||
|
@ -598,8 +603,8 @@ FROM
|
|||
INNER JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey)
|
||||
LEFT JOIN multi_outer_join_third_reference t1 ON (r1.r_custkey = t1.t_custkey)
|
||||
ORDER BY 1;
|
||||
l_custkey | r_custkey | t_custkey
|
||||
-----------+-----------+-----------
|
||||
l_custkey | r_custkey | t_custkey
|
||||
---------------------------------------------------------------------
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
|
@ -625,18 +630,18 @@ FROM
|
|||
INNER JOIN multi_outer_join_third_reference t1 ON (l1.l_custkey = t1.t_custkey)
|
||||
LEFT JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey)
|
||||
ORDER BY 1,2,3;
|
||||
l_custkey | t_custkey | r_custkey
|
||||
-----------+-----------+-----------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
l_custkey | t_custkey | r_custkey
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
|
@ -662,18 +667,18 @@ FROM
|
|||
INNER JOIN multi_outer_join_left_hash l1 ON (l1.l_custkey = t1.t_custkey)
|
||||
LEFT JOIN multi_outer_join_right_reference r1 ON (l1.l_custkey = r1.r_custkey)
|
||||
ORDER BY 1,2,3;
|
||||
t_custkey | l_custkey | r_custkey
|
||||
-----------+-----------+-----------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
t_custkey | l_custkey | r_custkey
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
|
@ -699,18 +704,18 @@ FROM
|
|||
INNER JOIN multi_outer_join_third_reference t1 ON (l1.l_custkey = t1.t_custkey)
|
||||
LEFT JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey)
|
||||
ORDER BY 1,2,3;
|
||||
l_custkey | t_custkey | r_custkey
|
||||
-----------+-----------+-----------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
l_custkey | t_custkey | r_custkey
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
|
@ -738,31 +743,31 @@ FROM
|
|||
WHERE
|
||||
r_custkey is NULL
|
||||
ORDER BY 1;
|
||||
l_custkey | t_custkey | r_custkey
|
||||
-----------+-----------+-----------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
l_custkey | t_custkey | r_custkey
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
(10 rows)
|
||||
|
||||
-- Test joinExpr aliases by performing an outer-join.
|
||||
SELECT
|
||||
SELECT
|
||||
t_custkey
|
||||
FROM
|
||||
(multi_outer_join_right_hash r1
|
||||
FROM
|
||||
(multi_outer_join_right_hash r1
|
||||
LEFT OUTER JOIN multi_outer_join_left_hash l1 ON (l1.l_custkey = r1.r_custkey)) AS
|
||||
test(c_custkey, c_nationkey)
|
||||
INNER JOIN multi_outer_join_third_reference t1 ON (test.c_custkey = t1.t_custkey)
|
||||
ORDER BY 1;
|
||||
t_custkey
|
||||
-----------
|
||||
t_custkey
|
||||
---------------------------------------------------------------------
|
||||
11
|
||||
12
|
||||
13
|
||||
|
@ -802,8 +807,8 @@ LEFT JOIN (
|
|||
GROUP BY l1.l_custkey
|
||||
ORDER BY cnt DESC, l1.l_custkey DESC
|
||||
LIMIT 20;
|
||||
l_custkey | cnt
|
||||
-----------+-----
|
||||
l_custkey | cnt
|
||||
---------------------------------------------------------------------
|
||||
30 | 1
|
||||
29 | 1
|
||||
28 | 1
|
||||
|
@ -827,24 +832,24 @@ LIMIT 20;
|
|||
(20 rows)
|
||||
|
||||
-- full join among reference tables should go thourgh router planner
|
||||
SELECT
|
||||
t_custkey, r_custkey
|
||||
FROM
|
||||
multi_outer_join_right_reference FULL JOIN
|
||||
SELECT
|
||||
t_custkey, r_custkey
|
||||
FROM
|
||||
multi_outer_join_right_reference FULL JOIN
|
||||
multi_outer_join_third_reference ON (t_custkey = r_custkey)
|
||||
ORDER BY 1;
|
||||
t_custkey | r_custkey
|
||||
-----------+-----------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
t_custkey | r_custkey
|
||||
---------------------------------------------------------------------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
|
@ -879,8 +884,8 @@ LEFT JOIN (
|
|||
ON (r_name = t_name)
|
||||
) AS bar
|
||||
ON (l_name = r_name);
|
||||
count
|
||||
-------
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
25
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
\set test_tablespace :abs_srcdir '/tmp_check/ts0'
|
||||
CREATE TABLESPACE test_tablespace LOCATION XXXX
|
||||
\c - - - :worker_1_port
|
||||
\set test_tablespace :abs_srcdir '/tmp_check/ts1'
|
||||
CREATE TABLESPACE test_tablespace LOCATION XXXX
|
||||
\c - - - :worker_2_port
|
||||
\set test_tablespace :abs_srcdir '/tmp_check/ts2'
|
||||
CREATE TABLESPACE test_tablespace LOCATION XXXX
|
|
@ -1,20 +0,0 @@
|
|||
--
|
||||
-- MULTI_LOAD_DATA
|
||||
--
|
||||
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
|
||||
\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
|
||||
\copy orders_reference FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders_reference FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard 360006)
|
||||
\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
|
||||
\copy part_append FROM '@abs_srcdir@/data/part.data' with (delimiter '|', append_to_shard 360009)
|
||||
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
||||
\copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
|
@ -1,4 +0,0 @@
|
|||
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
|
@ -1,25 +0,0 @@
|
|||
--
|
||||
-- MULTI_STAGE_MORE_DATA
|
||||
--
|
||||
|
||||
|
||||
SET citus.next_shard_id TO 280000;
|
||||
|
||||
|
||||
-- We load more data to customer and part tables to test distributed joins. The
|
||||
-- loading causes the planner to consider customer and part tables as large, and
|
||||
-- evaluate plans where some of the underlying tables need to be repartitioned.
|
||||
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
|
||||
\copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
|
||||
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid2 \gset
|
||||
|
||||
copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid1);
|
||||
copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with (delimiter '|', append_to_shard :shardid2);
|
||||
|
||||
SELECT master_create_empty_shard('part_append') AS shardid \gset
|
||||
|
||||
copy part_append FROM '@abs_srcdir@/data/part.more.data' with (delimiter '|', append_to_shard :shardid);
|
|
@ -1,38 +0,0 @@
|
|||
--
|
||||
-- MULTI_MULTIUSER_LOAD_DATA
|
||||
--
|
||||
|
||||
-- Tests for loading data in a distributed cluster. Please note that the number
|
||||
-- of shards uploaded depends on two config values: citusdb.shard_replication_factor and
|
||||
-- citusdb.shard_max_size. These values are manually set in pg_regress.c. We also set
|
||||
-- the shard placement policy to the local-node-first policy as other regression
|
||||
-- tests expect the placements to be in that order.
|
||||
|
||||
SET citusdb.shard_placement_policy TO 'local-node-first';
|
||||
|
||||
-- load as superuser
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
|
||||
-- as user with ALL access
|
||||
SET ROLE full_access;
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
RESET ROLE;
|
||||
|
||||
-- as user with SELECT access, should fail
|
||||
SET ROLE read_access;
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
RESET ROLE;
|
||||
|
||||
-- as user with no access, should fail
|
||||
SET ROLE no_access;
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
RESET ROLE;
|
||||
|
||||
SET ROLE full_access;
|
||||
\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
|
||||
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
|
@ -1,64 +0,0 @@
|
|||
--
|
||||
-- MULTI_MX_COPY_DATA
|
||||
--
|
||||
|
||||
\COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
|
||||
SET search_path TO citus_mx_test_schema;
|
||||
\COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
\COPY citus_mx_test_schema_join_1.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
\COPY citus_mx_test_schema_join_1.nation_hash_2 FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
\COPY citus_mx_test_schema_join_2.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE citus_mx_test_schema.nation_hash_replicated AS SELECT * FROM citus_mx_test_schema.nation_hash;
|
||||
SELECT create_distributed_table('citus_mx_test_schema.nation_hash_replicated', 'n_nationkey');
|
||||
\COPY nation_hash_replicated FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
|
||||
-- now try loading data from worker node
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO public;
|
||||
|
||||
\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
|
||||
\COPY citus_mx_test_schema.nation_hash_replicated FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
|
||||
\c - - - :worker_2_port
|
||||
-- and use second worker as well
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
\COPY citus_mx_test_schema.nation_hash_replicated FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
|
||||
-- get ready for the next test
|
||||
TRUNCATE orders_mx;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SET citus.log_local_commands TO ON;
|
||||
-- simulate the case where there is no connection slots available
|
||||
ALTER SYSTEM SET citus.local_shared_pool_size TO -1;
|
||||
SELECT pg_reload_conf();
|
||||
SELECT pg_sleep(0.1);
|
||||
show citus.local_shared_pool_size;
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
|
||||
\COPY citus_mx_test_schema.nation_hash_replicated FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
|
||||
-- set it back
|
||||
ALTER SYSTEM RESET citus.local_shared_pool_size;
|
||||
SELECT pg_reload_conf();
|
||||
SELECT pg_sleep(0.1);
|
||||
show citus.local_shared_pool_size;
|
||||
|
||||
-- These copies were intended to test copying data to single sharded table from
|
||||
-- worker nodes, yet in order to remove broadcast logic related codes we change
|
||||
-- the table to reference table and copy data from master. Should be updated
|
||||
-- when worker nodes gain capability to run dml commands on reference tables.
|
||||
\c - - - :master_port
|
||||
SET search_path TO public;
|
||||
|
||||
\COPY customer_mx FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\COPY nation_mx FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
\COPY part_mx FROM '@abs_srcdir@/data/part.data' with delimiter '|'
|
||||
\COPY supplier_mx FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
|
@ -1,5 +0,0 @@
|
|||
CREATE TABLESPACE test_tablespace LOCATION '@abs_srcdir@/tmp_check/ts0';
|
||||
\c - - - :worker_1_port
|
||||
CREATE TABLESPACE test_tablespace LOCATION '@abs_srcdir@/tmp_check/ts1';
|
||||
\c - - - :worker_2_port
|
||||
CREATE TABLESPACE test_tablespace LOCATION '@abs_srcdir@/tmp_check/ts2';
|
|
@ -1,16 +0,0 @@
|
|||
--
|
||||
-- MULTI_LOAD_DATA
|
||||
--
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
\copy orders_reference FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders_reference FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard 360006)
|
||||
\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
|
||||
\copy part_append FROM '@abs_srcdir@/data/part.data' with (delimiter '|', append_to_shard 360009)
|
||||
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
||||
\copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
|
@ -1,4 +0,0 @@
|
|||
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
|
@ -1,16 +0,0 @@
|
|||
--
|
||||
-- MULTI_STAGE_MORE_DATA
|
||||
--
|
||||
SET citus.next_shard_id TO 280000;
|
||||
-- We load more data to customer and part tables to test distributed joins. The
|
||||
-- loading causes the planner to consider customer and part tables as large, and
|
||||
-- evaluate plans where some of the underlying tables need to be repartitioned.
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
|
||||
\copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid2 \gset
|
||||
copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid1);
|
||||
copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with (delimiter '|', append_to_shard :shardid2);
|
||||
SELECT master_create_empty_shard('part_append') AS shardid \gset
|
||||
copy part_append FROM '@abs_srcdir@/data/part.more.data' with (delimiter '|', append_to_shard :shardid);
|
|
@ -1,32 +0,0 @@
|
|||
--
|
||||
-- MULTI_MULTIUSER_LOAD_DATA
|
||||
--
|
||||
-- Tests for loading data in a distributed cluster. Please note that the number
|
||||
-- of shards uploaded depends on two config values: citusdb.shard_replication_factor and
|
||||
-- citusdb.shard_max_size. These values are manually set in pg_regress.c. We also set
|
||||
-- the shard placement policy to the local-node-first policy as other regression
|
||||
-- tests expect the placements to be in that order.
|
||||
SET citusdb.shard_placement_policy TO 'local-node-first';
|
||||
-- load as superuser
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
-- as user with ALL access
|
||||
SET ROLE full_access;
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
RESET ROLE;
|
||||
-- as user with SELECT access, should fail
|
||||
SET ROLE read_access;
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
ERROR: permission denied for table lineitem
|
||||
RESET ROLE;
|
||||
-- as user with no access, should fail
|
||||
SET ROLE no_access;
|
||||
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
ERROR: permission denied for table lineitem
|
||||
RESET ROLE;
|
||||
SET ROLE full_access;
|
||||
\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
|
||||
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
|
@ -1,5 +0,0 @@
|
|||
CREATE TABLESPACE test_tablespace LOCATION '@abs_srcdir@/tmp_check/ts0';
|
||||
\c - - - :worker_1_port
|
||||
CREATE TABLESPACE test_tablespace LOCATION '@abs_srcdir@/tmp_check/ts1';
|
||||
\c - - - :worker_2_port
|
||||
CREATE TABLESPACE test_tablespace LOCATION '@abs_srcdir@/tmp_check/ts2';
|
|
@ -66,6 +66,7 @@ my $bindir = "";
|
|||
my $libdir = undef;
|
||||
my $pgxsdir = "";
|
||||
my $postgresBuilddir = "";
|
||||
my $citusAbsSrcdir = "";
|
||||
my $postgresSrcdir = "";
|
||||
my $majorversion = "";
|
||||
my $synchronousReplication = "";
|
||||
|
@ -104,6 +105,7 @@ GetOptions(
|
|||
'pgxsdir=s' => \$pgxsdir,
|
||||
'postgres-builddir=s' => \$postgresBuilddir,
|
||||
'postgres-srcdir=s' => \$postgresSrcdir,
|
||||
'citus_abs_srcdir=s' => \$citusAbsSrcdir,
|
||||
'majorversion=s' => \$majorversion,
|
||||
'load-extension=s' => \@extensions,
|
||||
'server-option=s' => \@userPgOptions,
|
||||
|
@ -640,6 +642,7 @@ print $fh "--variable=worker_2_proxy_port=$mitmPort ";
|
|||
print $fh "--variable=follower_master_port=$followerCoordPort ";
|
||||
print $fh "--variable=default_user=$user ";
|
||||
print $fh "--variable=SHOW_CONTEXT=always ";
|
||||
print $fh "--variable=abs_srcdir=$citusAbsSrcdir ";
|
||||
for my $workeroff (0 .. $#workerPorts)
|
||||
{
|
||||
my $port = $workerPorts[$workeroff];
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
/columnar_copyto.sql
|
||||
/columnar_data_types.sql
|
||||
/columnar_load.sql
|
||||
/hyperscale_tutorial.sql
|
||||
/multi_agg_distinct.sql
|
||||
/multi_agg_type_conversion.sql
|
||||
/multi_alter_table_statements.sql
|
||||
/multi_behavioral_analytics_create_table.sql
|
||||
/multi_behavioral_analytics_create_table_superuser.sql
|
||||
/multi_complex_count_distinct.sql
|
||||
/multi_copy.sql
|
||||
/multi_load_data.sql
|
||||
/multi_load_data_superuser.sql
|
||||
/multi_load_more_data.sql
|
||||
/multi_multiuser_load_data.sql
|
||||
/multi_mx_copy_data.sql
|
||||
/multi_outer_join.sql
|
||||
/multi_outer_join_reference.sql
|
||||
/tablespace.sql
|
|
@ -6,7 +6,8 @@ CREATE TABLE test_contestant(handle TEXT, birthdate DATE, rating INT,
|
|||
USING columnar;
|
||||
|
||||
-- load table data from file
|
||||
COPY test_contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
\set contestants_1_csv_file :abs_srcdir '/data/contestants.1.csv'
|
||||
COPY test_contestant FROM :'contestants_1_csv_file' WITH CSV;
|
||||
|
||||
-- export using COPY table TO ...
|
||||
COPY test_contestant TO STDOUT;
|
|
@ -13,7 +13,8 @@ SET intervalstyle TO 'POSTGRES_VERBOSE';
|
|||
CREATE TABLE test_array_types (int_array int[], bigint_array bigint[],
|
||||
text_array text[]) USING columnar;
|
||||
|
||||
COPY test_array_types FROM '@abs_srcdir@/data/array_types.csv' WITH CSV;
|
||||
\set array_types_csv_file :abs_srcdir '/data/array_types.csv'
|
||||
COPY test_array_types FROM :'array_types_csv_file' WITH CSV;
|
||||
|
||||
SELECT * FROM test_array_types;
|
||||
|
||||
|
@ -23,7 +24,8 @@ CREATE TABLE test_datetime_types (timestamp timestamp,
|
|||
timestamp_with_timezone timestamp with time zone, date date, time time,
|
||||
interval interval) USING columnar;
|
||||
|
||||
COPY test_datetime_types FROM '@abs_srcdir@/data/datetime_types.csv' WITH CSV;
|
||||
\set datetime_types_csv_file :abs_srcdir '/data/datetime_types.csv'
|
||||
COPY test_datetime_types FROM :'datetime_types_csv_file' WITH CSV;
|
||||
|
||||
SELECT * FROM test_datetime_types;
|
||||
|
||||
|
@ -35,8 +37,9 @@ CREATE TYPE composite_type AS (a int, b text);
|
|||
CREATE TABLE test_enum_and_composite_types (enum enum_type,
|
||||
composite composite_type) USING columnar;
|
||||
|
||||
\set enum_and_composite_types_csv_file :abs_srcdir '/data/enum_and_composite_types.csv'
|
||||
COPY test_enum_and_composite_types FROM
|
||||
'@abs_srcdir@/data/enum_and_composite_types.csv' WITH CSV;
|
||||
:'enum_and_composite_types_csv_file' WITH CSV;
|
||||
|
||||
SELECT * FROM test_enum_and_composite_types;
|
||||
|
||||
|
@ -45,7 +48,8 @@ SELECT * FROM test_enum_and_composite_types;
|
|||
CREATE TABLE test_range_types (int4range int4range, int8range int8range,
|
||||
numrange numrange, tsrange tsrange) USING columnar;
|
||||
|
||||
COPY test_range_types FROM '@abs_srcdir@/data/range_types.csv' WITH CSV;
|
||||
\set range_types_csv_file :abs_srcdir '/data/range_types.csv'
|
||||
COPY test_range_types FROM :'range_types_csv_file' WITH CSV;
|
||||
|
||||
SELECT * FROM test_range_types;
|
||||
|
||||
|
@ -54,7 +58,8 @@ SELECT * FROM test_range_types;
|
|||
CREATE TABLE test_other_types (bool boolean, bytea bytea, money money,
|
||||
inet inet, bitstring bit varying(5), uuid uuid, json json) USING columnar;
|
||||
|
||||
COPY test_other_types FROM '@abs_srcdir@/data/other_types.csv' WITH CSV;
|
||||
\set other_types_csv_file :abs_srcdir '/data/other_types.csv'
|
||||
COPY test_other_types FROM :'other_types_csv_file' WITH CSV;
|
||||
|
||||
SELECT * FROM test_other_types;
|
||||
|
||||
|
@ -63,7 +68,8 @@ SELECT * FROM test_other_types;
|
|||
CREATE TABLE test_null_values (a int, b int[], c composite_type)
|
||||
USING columnar;
|
||||
|
||||
COPY test_null_values FROM '@abs_srcdir@/data/null_values.csv' WITH CSV;
|
||||
\set null_values_csv_file :abs_srcdir '/data/null_values.csv'
|
||||
COPY test_null_values FROM :'null_values_csv_file' WITH CSV;
|
||||
|
||||
SELECT * FROM test_null_values;
|
||||
|
|
@ -3,27 +3,29 @@
|
|||
--
|
||||
|
||||
-- COPY with incorrect delimiter
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv'
|
||||
\set contestants_1_csv_file :abs_srcdir '/data/contestants.1.csv'
|
||||
COPY contestant FROM :'contestants_1_csv_file'
|
||||
WITH DELIMITER '|'; -- ERROR
|
||||
|
||||
-- COPY with invalid program
|
||||
COPY contestant FROM PROGRAM 'invalid_program' WITH CSV; -- ERROR
|
||||
|
||||
-- COPY into uncompressed table from file
|
||||
COPY contestant FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
COPY contestant FROM :'contestants_1_csv_file' WITH CSV;
|
||||
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv' WITH CSV;
|
||||
\set cat_contestants_2_csv_file 'cat ' :abs_srcdir '/data/contestants.2.csv'
|
||||
COPY contestant FROM PROGRAM :'cat_contestants_2_csv_file' WITH CSV;
|
||||
|
||||
select
|
||||
version_major, version_minor, reserved_stripe_id, reserved_row_number
|
||||
from columnar_test_helpers.columnar_storage_info('contestant');
|
||||
|
||||
-- COPY into compressed table
|
||||
COPY contestant_compressed FROM '@abs_srcdir@/data/contestants.1.csv' WITH CSV;
|
||||
COPY contestant_compressed FROM :'contestants_1_csv_file' WITH CSV;
|
||||
|
||||
-- COPY into uncompressed table from program
|
||||
COPY contestant_compressed FROM PROGRAM 'cat @abs_srcdir@/data/contestants.2.csv'
|
||||
COPY contestant_compressed FROM PROGRAM :'cat_contestants_2_csv_file'
|
||||
WITH CSV;
|
||||
|
||||
select
|
|
@ -82,11 +82,16 @@ SELECT create_distributed_table('ads', 'company_id');
|
|||
SELECT create_distributed_table('clicks', 'company_id');
|
||||
SELECT create_distributed_table('impressions', 'company_id');
|
||||
|
||||
\copy companies from '@abs_srcdir@/data/companies.csv' with csv
|
||||
\copy campaigns from '@abs_srcdir@/data/campaigns.csv' with csv
|
||||
\copy ads from '@abs_srcdir@/data/ads.csv' with csv
|
||||
\copy clicks from '@abs_srcdir@/data/clicks.csv' with csv
|
||||
\copy impressions from '@abs_srcdir@/data/impressions.csv' with csv
|
||||
\set companies_csv_file :abs_srcdir '/data/companies.csv'
|
||||
\set campaigns_csv_file :abs_srcdir '/data/campaigns.csv'
|
||||
\set ads_csv_file :abs_srcdir '/data/ads.csv'
|
||||
\set clicks_csv_file :abs_srcdir '/data/clicks.csv'
|
||||
\set impressions_csv_file :abs_srcdir '/data/impressions.csv'
|
||||
COPY companies from :'companies_csv_file' with csv;
|
||||
COPY campaigns from :'campaigns_csv_file' with csv;
|
||||
COPY ads from :'ads_csv_file' with csv;
|
||||
COPY clicks from :'clicks_csv_file' with csv;
|
||||
COPY impressions from :'impressions_csv_file' with csv;
|
||||
|
||||
SELECT a.campaign_id,
|
||||
RANK() OVER (
|
||||
|
@ -174,11 +179,11 @@ CREATE TABLE impressions (
|
|||
REFERENCES ads (company_id, id)
|
||||
);
|
||||
|
||||
\copy companies from '@abs_srcdir@/data/companies.csv' with csv
|
||||
\copy campaigns from '@abs_srcdir@/data/campaigns.csv' with csv
|
||||
\copy ads from '@abs_srcdir@/data/ads.csv' with csv
|
||||
\copy clicks from '@abs_srcdir@/data/clicks.csv' with csv
|
||||
\copy impressions from '@abs_srcdir@/data/impressions.csv' with csv
|
||||
COPY companies from :'companies_csv_file' with csv;
|
||||
COPY campaigns from :'campaigns_csv_file' with csv;
|
||||
COPY ads from :'ads_csv_file' with csv;
|
||||
COPY clicks from :'clicks_csv_file' with csv;
|
||||
COPY impressions from :'impressions_csv_file' with csv;
|
||||
|
||||
SELECT create_distributed_table('companies', 'id');
|
||||
SELECT create_distributed_table('campaigns', 'company_id');
|
|
@ -34,8 +34,10 @@ SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
|
|||
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
|
||||
WHERE shardid = :new_shard_id;
|
||||
|
||||
\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
COPY lineitem_range FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem_range FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
|
||||
-- Run aggregate(distinct) on partition column for range partitioned table
|
||||
|
||||
|
@ -93,8 +95,8 @@ CREATE TABLE lineitem_hash (
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
|
||||
|
||||
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
COPY lineitem_hash FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem_hash FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
|
||||
-- aggregate(distinct) on partition column is allowed
|
||||
SELECT count(distinct l_orderkey) FROM lineitem_hash;
|
|
@ -20,7 +20,8 @@ CREATE TABLE aggregate_type (
|
|||
SELECT create_distributed_table('aggregate_type', 'float_value', 'append');
|
||||
SELECT master_create_empty_shard('aggregate_type') AS shardid \gset
|
||||
|
||||
copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' with (append_to_shard :shardid);
|
||||
\set agg_type_data_file :abs_srcdir '/data/agg_type.data'
|
||||
copy aggregate_type FROM :'agg_type_data_file' with (append_to_shard :shardid);
|
||||
|
||||
-- Test conversions using aggregates on floats and division
|
||||
|
|
@ -29,7 +29,8 @@ CREATE TABLE lineitem_alter (
|
|||
WITH ( fillfactor = 80 );
|
||||
SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
|
||||
SELECT master_create_empty_shard('lineitem_alter') AS shardid \gset
|
||||
copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
copy lineitem_alter FROM :'lineitem_1_data_file' with (delimiter '|', append_to_shard :shardid);
|
||||
|
||||
-- verify that the storage options made it to the table definitions
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
|
||||
|
@ -64,9 +65,9 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
|
|||
ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1;
|
||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT;
|
||||
|
||||
-- \copy to verify that default values take effect
|
||||
-- COPY to verify that default values take effect
|
||||
SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
|
||||
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM :'lineitem_1_data_file' with (delimiter '|', append_to_shard :shardid);
|
||||
|
||||
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
|
||||
SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
|
||||
|
@ -79,11 +80,11 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite
|
|||
-- Drop default so that NULLs will be inserted for this column
|
||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
|
||||
|
||||
-- \copy should fail because it will try to insert NULLs for a NOT NULL column
|
||||
-- COPY should fail because it will try to insert NULLs for a NOT NULL column
|
||||
-- Note, this operation will create a table on the workers but it won't be in the metadata
|
||||
BEGIN;
|
||||
SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
|
||||
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM :'lineitem_1_data_file' with (delimiter '|', append_to_shard :shardid);
|
||||
END;
|
||||
|
||||
-- Verify that DROP NOT NULL works
|
||||
|
@ -91,9 +92,9 @@ END;
|
|||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||
|
||||
-- \copy should succeed now
|
||||
-- COPY should succeed now
|
||||
SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
|
||||
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM :'lineitem_1_data_file' with (delimiter '|', append_to_shard :shardid);
|
||||
SELECT count(*) from lineitem_alter;
|
||||
|
||||
-- Verify that SET DATA TYPE works
|
||||
|
@ -516,7 +517,8 @@ DROP TABLE trigger_table;
|
|||
|
||||
-- test ALTER TABLE ALL IN TABLESPACE
|
||||
-- we expect that it will warn out
|
||||
CREATE TABLESPACE super_fast_ssd LOCATION '@abs_srcdir@/data';
|
||||
\set tablespace_location :abs_srcdir '/data'
|
||||
CREATE TABLESPACE super_fast_ssd LOCATION :'tablespace_location';
|
||||
ALTER TABLE ALL IN TABLESPACE pg_default SET TABLESPACE super_fast_ssd;
|
||||
ALTER TABLE ALL IN TABLESPACE super_fast_ssd SET TABLESPACE pg_default;
|
||||
DROP TABLESPACE super_fast_ssd;
|
|
@ -15,8 +15,10 @@ SELECT create_distributed_table('users_table', 'user_id');
|
|||
CREATE TABLE events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint);
|
||||
SELECT create_distributed_table('events_table', 'user_id');
|
||||
|
||||
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
|
||||
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
|
||||
\set users_table_data_file :abs_srcdir '/data/users_table.data'
|
||||
\set events_table_data_file :abs_srcdir '/data/events_table.data'
|
||||
COPY users_table FROM :'users_table_data_file' WITH CSV;
|
||||
COPY events_table FROM :'events_table_data_file' WITH CSV;
|
||||
|
||||
SET citus.shard_count = 96;
|
||||
CREATE SCHEMA subquery_and_ctes;
|
||||
|
@ -28,8 +30,8 @@ SELECT create_distributed_table('users_table', 'user_id');
|
|||
CREATE TABLE events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint);
|
||||
SELECT create_distributed_table('events_table', 'user_id');
|
||||
|
||||
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
|
||||
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
|
||||
COPY users_table FROM :'users_table_data_file' WITH CSV;
|
||||
COPY events_table FROM :'events_table_data_file' WITH CSV;
|
||||
|
||||
SET citus.shard_count TO DEFAULT;
|
||||
SET search_path TO DEFAULT;
|
||||
|
@ -68,8 +70,8 @@ INSERT INTO users_ref_test_table VALUES(4,'User_4',48);
|
|||
INSERT INTO users_ref_test_table VALUES(5,'User_5',49);
|
||||
INSERT INTO users_ref_test_table VALUES(6,'User_6',50);
|
||||
|
||||
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
|
||||
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
|
||||
COPY users_table FROM :'users_table_data_file' WITH CSV;
|
||||
COPY events_table FROM :'events_table_data_file' WITH CSV;
|
||||
|
||||
-- create indexes for
|
||||
CREATE INDEX is_index1 ON users_table(user_id);
|
|
@ -163,7 +163,7 @@ SELECT master_create_empty_shard('events') AS new_shard_id
|
|||
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
|
||||
WHERE shardid = :new_shard_id;
|
||||
|
||||
\COPY events FROM STDIN WITH CSV
|
||||
COPY events FROM STDIN WITH CSV;
|
||||
"(1,1001)",20001,click,1472807012
|
||||
"(1,1001)",20002,submit,1472807015
|
||||
"(1,1001)",20003,pay,1472807020
|
||||
|
@ -207,7 +207,7 @@ SELECT master_create_empty_shard('users') AS new_shard_id
|
|||
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
|
||||
WHERE shardid = :new_shard_id;
|
||||
|
||||
\COPY users FROM STDIN WITH CSV
|
||||
COPY users FROM STDIN WITH CSV;
|
||||
"(1,1001)",1472807115
|
||||
"(1,1002)",1472807215
|
||||
"(1,1003)",1472807315
|
||||
|
@ -290,8 +290,12 @@ SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
|
|||
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
|
||||
WHERE shardid = :new_shard_id;
|
||||
|
||||
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
COPY lineitem_subquery FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
COPY lineitem_subquery FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
|
||||
\copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
COPY orders_subquery FROM :'orders_1_data_file' with delimiter '|';
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
COPY orders_subquery FROM :'orders_2_data_file' with delimiter '|';
|
|
@ -29,8 +29,10 @@ CREATE TABLE lineitem_hash (
|
|||
|
||||
SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
|
||||
|
||||
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
COPY lineitem_hash FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem_hash FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
|
||||
ANALYZE lineitem_hash;
|
||||
|
|
@ -2,6 +2,11 @@
|
|||
-- MULTI_COPY
|
||||
--
|
||||
|
||||
-- set file paths
|
||||
\set customer1datafile :abs_srcdir '/data/customer.1.data'
|
||||
\set customer2datafile :abs_srcdir '/data/customer.2.data'
|
||||
\set customer3datafile :abs_srcdir '/data/customer.3.data'
|
||||
\set lineitem1datafile :abs_srcdir '/data/lineitem.1.data'
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 560000;
|
||||
|
||||
|
@ -97,13 +102,17 @@ WITH (DELIMITER ' ');
|
|||
SELECT count(*) FROM customer_copy_hash WHERE c_custkey = 9;
|
||||
|
||||
-- Test server-side copy from file
|
||||
COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.2.data' WITH (DELIMITER '|');
|
||||
COPY customer_copy_hash FROM :'customer2datafile' WITH (DELIMITER '|');
|
||||
|
||||
-- Confirm that data was copied
|
||||
SELECT count(*) FROM customer_copy_hash;
|
||||
|
||||
-- Test client-side copy from file
|
||||
\copy customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|');
|
||||
-- \copy does not support variable interpolation. Hence we store and execute
|
||||
-- the query in two steps for interpolation to kick in.
|
||||
-- See https://stackoverflow.com/a/67642094/4576416 for details.
|
||||
\set client_side_copy_command '\\copy customer_copy_hash FROM ' :'customer3datafile' ' WITH (DELIMITER '''|''');'
|
||||
:client_side_copy_command
|
||||
|
||||
-- Confirm that data was copied
|
||||
SELECT count(*) FROM customer_copy_hash;
|
||||
|
@ -175,7 +184,7 @@ CREATE TABLE customer_copy_range (
|
|||
SELECT master_create_distributed_table('customer_copy_range', 'c_custkey', 'range');
|
||||
|
||||
-- Test COPY into empty range-partitioned table
|
||||
COPY customer_copy_range FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|');
|
||||
COPY customer_copy_range FROM :'customer1datafile' WITH (DELIMITER '|');
|
||||
|
||||
SELECT master_create_empty_shard('customer_copy_range') AS new_shard_id
|
||||
\gset
|
||||
|
@ -188,7 +197,7 @@ UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1000
|
|||
WHERE shardid = :new_shard_id;
|
||||
|
||||
-- Test copy into range-partitioned table
|
||||
COPY customer_copy_range FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|');
|
||||
COPY customer_copy_range FROM :'customer1datafile' WITH (DELIMITER '|');
|
||||
|
||||
-- Check whether data went into the right shard (maybe)
|
||||
SELECT min(c_custkey), max(c_custkey), avg(c_custkey), count(*)
|
||||
|
@ -284,14 +293,14 @@ SELECT create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append');
|
|||
|
||||
BEGIN;
|
||||
SELECT master_create_empty_shard('lineitem_copy_append') AS shardid \gset
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
COPY lineitem_copy_append FROM :'lineitem1datafile' with (delimiter '|', append_to_shard :shardid);
|
||||
END;
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::regclass;
|
||||
|
||||
-- trigger some errors on the append_to_shard option
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard 1);
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard 560000);
|
||||
COPY lineitem_copy_append FROM :'lineitem1datafile' with (delimiter '|', append_to_shard 1);
|
||||
COPY lineitem_copy_append FROM :'lineitem1datafile' with (delimiter '|', append_to_shard 560000);
|
||||
|
||||
-- Test schema support on append partitioned tables
|
||||
CREATE SCHEMA append;
|
||||
|
@ -310,8 +319,8 @@ SELECT master_create_empty_shard('append.customer_copy') AS shardid1 \gset
|
|||
SELECT master_create_empty_shard('append.customer_copy') AS shardid2 \gset
|
||||
|
||||
-- Test copy from the master node
|
||||
COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard :shardid1);
|
||||
COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid2);
|
||||
COPY append.customer_copy FROM :'customer1datafile' with (delimiter '|', append_to_shard :shardid1);
|
||||
COPY append.customer_copy FROM :'customer2datafile' with (delimiter '|', append_to_shard :shardid2);
|
||||
|
||||
-- Test the content of the table
|
||||
SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.customer_copy;
|
|
@ -0,0 +1,28 @@
|
|||
--
|
||||
-- MULTI_LOAD_DATA
|
||||
--
|
||||
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
COPY lineitem FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
COPY orders FROM :'orders_1_data_file' with delimiter '|';
|
||||
COPY orders FROM :'orders_2_data_file' with delimiter '|';
|
||||
|
||||
COPY orders_reference FROM :'orders_1_data_file' with delimiter '|';
|
||||
COPY orders_reference FROM :'orders_2_data_file' with delimiter '|';
|
||||
|
||||
\set customer_1_data_file :abs_srcdir '/data/customer.1.data'
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
\set part_data_file :abs_srcdir '/data/part.data'
|
||||
\set supplier_data_file :abs_srcdir '/data/supplier.data'
|
||||
COPY customer FROM :'customer_1_data_file' with delimiter '|';
|
||||
COPY customer_append FROM :'customer_1_data_file' with (delimiter '|', append_to_shard 360006);
|
||||
COPY nation FROM :'nation_data_file' with delimiter '|';
|
||||
COPY part FROM :'part_data_file' with delimiter '|';
|
||||
COPY part_append FROM :'part_data_file' with (delimiter '|', append_to_shard 360009);
|
||||
COPY supplier FROM :'supplier_data_file' with delimiter '|';
|
||||
COPY supplier_single_shard FROM :'supplier_data_file' with delimiter '|';
|
|
@ -0,0 +1,8 @@
|
|||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
COPY lineitem_hash_part FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem_hash_part FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
COPY orders_hash_part FROM :'orders_1_data_file' with delimiter '|';
|
||||
COPY orders_hash_part FROM :'orders_2_data_file' with delimiter '|';
|
|
@ -0,0 +1,28 @@
|
|||
--
|
||||
-- MULTI_STAGE_MORE_DATA
|
||||
--
|
||||
|
||||
|
||||
SET citus.next_shard_id TO 280000;
|
||||
|
||||
|
||||
-- We load more data to customer and part tables to test distributed joins. The
|
||||
-- loading causes the planner to consider customer and part tables as large, and
|
||||
-- evaluate plans where some of the underlying tables need to be repartitioned.
|
||||
|
||||
\set customer_2_data_file :abs_srcdir '/data/customer.2.data'
|
||||
\set customer_3_data_file :abs_srcdir '/data/customer.3.data'
|
||||
\set part_more_data_file :abs_srcdir '/data/part.more.data'
|
||||
COPY customer FROM :'customer_2_data_file' with delimiter '|';
|
||||
COPY customer FROM :'customer_3_data_file' with delimiter '|';
|
||||
COPY part FROM :'part_more_data_file' with delimiter '|';
|
||||
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid2 \gset
|
||||
|
||||
copy customer_append FROM :'customer_2_data_file' with (delimiter '|', append_to_shard :shardid1);
|
||||
copy customer_append FROM :'customer_3_data_file' with (delimiter '|', append_to_shard :shardid2);
|
||||
|
||||
SELECT master_create_empty_shard('part_append') AS shardid \gset
|
||||
|
||||
copy part_append FROM :'part_more_data_file' with (delimiter '|', append_to_shard :shardid);
|
|
@ -0,0 +1,56 @@
|
|||
--
|
||||
-- MULTI_MULTIUSER_LOAD_DATA
|
||||
--
|
||||
|
||||
-- Tests for loading data in a distributed cluster. Please note that the number
|
||||
-- of shards uploaded depends on two config values: citusdb.shard_replication_factor and
|
||||
-- citusdb.shard_max_size. These values are manually set in pg_regress.c. We also set
|
||||
-- the shard placement policy to the local-node-first policy as other regression
|
||||
-- tests expect the placements to be in that order.
|
||||
|
||||
SET citusdb.shard_placement_policy TO 'local-node-first';
|
||||
|
||||
-- load as superuser
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set copy_command '\\COPY lineitem FROM ' :'lineitem_1_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
|
||||
-- as user with ALL access
|
||||
SET ROLE full_access;
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
\set copy_command '\\COPY lineitem FROM ' :'lineitem_2_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
RESET ROLE;
|
||||
|
||||
-- as user with SELECT access, should fail
|
||||
SET ROLE read_access;
|
||||
\set copy_command '\\COPY lineitem FROM ' :'lineitem_2_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
RESET ROLE;
|
||||
|
||||
-- as user with no access, should fail
|
||||
SET ROLE no_access;
|
||||
\set copy_command '\\COPY lineitem FROM ' :'lineitem_2_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
RESET ROLE;
|
||||
|
||||
SET ROLE full_access;
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
\set copy_command '\\COPY orders FROM ' :'orders_1_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
\set copy_command '\\COPY orders FROM ' :'orders_2_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
|
||||
\set customer_1_data_file :abs_srcdir '/data/customer.1.data'
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
\set part_data_file :abs_srcdir '/data/part.data'
|
||||
\set supplier_data_file :abs_srcdir '/data/supplier.data'
|
||||
\set copy_command '\\COPY customer FROM ' :'customer_1_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
\set copy_command '\\COPY nation FROM ' :'nation_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
\set copy_command '\\COPY part FROM ' :'part_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
||||
\set copy_command '\\COPY supplier FROM ' :'supplier_data_file' ' with delimiter '''|''';'
|
||||
:copy_command
|
|
@ -0,0 +1,79 @@
|
|||
--
|
||||
-- MULTI_MX_COPY_DATA
|
||||
--
|
||||
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
COPY nation_hash FROM :'nation_data_file' with delimiter '|';
|
||||
|
||||
SET search_path TO citus_mx_test_schema;
|
||||
COPY nation_hash FROM :'nation_data_file' with delimiter '|';
|
||||
COPY citus_mx_test_schema_join_1.nation_hash FROM :'nation_data_file' with delimiter '|';
|
||||
COPY citus_mx_test_schema_join_1.nation_hash_2 FROM :'nation_data_file' with delimiter '|';
|
||||
COPY citus_mx_test_schema_join_2.nation_hash FROM :'nation_data_file' with delimiter '|';
|
||||
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE citus_mx_test_schema.nation_hash_replicated AS SELECT * FROM citus_mx_test_schema.nation_hash;
|
||||
SELECT create_distributed_table('citus_mx_test_schema.nation_hash_replicated', 'n_nationkey');
|
||||
COPY nation_hash_replicated FROM :'nation_data_file' with delimiter '|';
|
||||
|
||||
-- now try loading data from worker node
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO public;
|
||||
|
||||
\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
|
||||
\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
|
||||
COPY lineitem_mx FROM :'lineitem_1_data_file' with delimiter '|';
|
||||
COPY lineitem_mx FROM :'lineitem_2_data_file' with delimiter '|';
|
||||
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
COPY citus_mx_test_schema.nation_hash_replicated FROM :'nation_data_file' with delimiter '|';
|
||||
|
||||
\c - - - :worker_2_port
|
||||
-- and use second worker as well
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
COPY orders_mx FROM :'orders_1_data_file' with delimiter '|';
|
||||
COPY orders_mx FROM :'orders_2_data_file' with delimiter '|';
|
||||
COPY citus_mx_test_schema.nation_hash_replicated FROM :'nation_data_file' with delimiter '|';
|
||||
|
||||
-- get ready for the next test
|
||||
TRUNCATE orders_mx;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SET citus.log_local_commands TO ON;
|
||||
-- simulate the case where there is no connection slots available
|
||||
ALTER SYSTEM SET citus.local_shared_pool_size TO -1;
|
||||
SELECT pg_reload_conf();
|
||||
SELECT pg_sleep(0.1);
|
||||
show citus.local_shared_pool_size;
|
||||
|
||||
\set orders_1_data_file :abs_srcdir '/data/orders.1.data'
|
||||
\set orders_2_data_file :abs_srcdir '/data/orders.2.data'
|
||||
COPY orders_mx FROM :'orders_1_data_file' with delimiter '|';
|
||||
COPY orders_mx FROM :'orders_2_data_file' with delimiter '|';
|
||||
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
COPY citus_mx_test_schema.nation_hash_replicated FROM :'nation_data_file' with delimiter '|';
|
||||
|
||||
-- set it back
|
||||
ALTER SYSTEM RESET citus.local_shared_pool_size;
|
||||
SELECT pg_reload_conf();
|
||||
SELECT pg_sleep(0.1);
|
||||
show citus.local_shared_pool_size;
|
||||
|
||||
-- These copies were intended to test copying data to single sharded table from
|
||||
-- worker nodes, yet in order to remove broadcast logic related codes we change
|
||||
-- the table to reference table and copy data from master. Should be updated
|
||||
-- when worker nodes gain capability to run dml commands on reference tables.
|
||||
\c - - - :master_port
|
||||
SET search_path TO public;
|
||||
|
||||
\set customer_1_data_file :abs_srcdir '/data/customer.1.data'
|
||||
\set nation_data_file :abs_srcdir '/data/nation.data'
|
||||
\set part_data_file :abs_srcdir '/data/part.data'
|
||||
\set supplier_data_file :abs_srcdir '/data/supplier.data'
|
||||
COPY customer_mx FROM :'customer_1_data_file' with delimiter '|';
|
||||
COPY nation_mx FROM :'nation_data_file' with delimiter '|';
|
||||
COPY part_mx FROM :'part_data_file' with delimiter '|';
|
||||
COPY supplier_mx FROM :'supplier_data_file' with delimiter '|';
|
|
@ -69,11 +69,13 @@ CREATE TABLE multi_outer_join_third_reference
|
|||
t_comment varchar(117) not null
|
||||
);
|
||||
SELECT create_reference_table('multi_outer_join_third_reference');
|
||||
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\set customer_1_10_data :abs_srcdir '/data/customer-1-10.data'
|
||||
\set customer_11_20_data :abs_srcdir '/data/customer-11-20.data'
|
||||
\set customer_1_15_data :abs_srcdir '/data/customer-1-15.data'
|
||||
COPY multi_outer_join_left FROM :'customer_1_10_data' with delimiter '|';
|
||||
COPY multi_outer_join_left FROM :'customer_11_20_data' with delimiter '|';
|
||||
COPY multi_outer_join_right FROM :'customer_1_15_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_reference FROM :'customer_1_15_data' with delimiter '|';
|
||||
|
||||
-- Make sure we do not crash if one table has no shards
|
||||
SELECT
|
||||
|
@ -87,8 +89,9 @@ FROM
|
|||
multi_outer_join_third a LEFT JOIN multi_outer_join_right_reference b ON (r_custkey = t_custkey);
|
||||
|
||||
-- Third table is a single shard table with all data
|
||||
\copy multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
|
||||
\copy multi_outer_join_third_reference FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
|
||||
\set customer_1_30_data :abs_srcdir '/data/customer-1-30.data'
|
||||
COPY multi_outer_join_third FROM :'customer_1_30_data' with delimiter '|';
|
||||
COPY multi_outer_join_third_reference FROM :'customer_1_30_data' with delimiter '|';
|
||||
|
||||
-- Regular outer join should return results for all rows
|
||||
SELECT
|
||||
|
@ -167,7 +170,8 @@ FROM
|
|||
|
||||
|
||||
-- Turn the right table into a large table
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\set customer_21_30_data :abs_srcdir '/data/customer-21-30.data'
|
||||
COPY multi_outer_join_right FROM :'customer_21_30_data' with delimiter '|';
|
||||
|
||||
|
||||
-- Shards do not have 1-1 matching. We should error here.
|
||||
|
@ -181,11 +185,13 @@ TRUNCATE multi_outer_join_left;
|
|||
TRUNCATE multi_outer_join_right;
|
||||
|
||||
-- reload shards with 1-1 matching
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-subset-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\set customer_subset_11_20_data :abs_srcdir '/data/customer-subset-11-20.data'
|
||||
COPY multi_outer_join_left FROM :'customer_subset_11_20_data' with delimiter '|';
|
||||
COPY multi_outer_join_left FROM :'customer_21_30_data' with delimiter '|';
|
||||
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-subset-21-30.data' with delimiter '|'
|
||||
\set customer_subset_21_30_data :abs_srcdir '/data/customer-subset-21-30.data'
|
||||
COPY multi_outer_join_right FROM :'customer_11_20_data' with delimiter '|';
|
||||
COPY multi_outer_join_right FROM :'customer_subset_21_30_data' with delimiter '|';
|
||||
|
||||
-- multi_outer_join_third is a single shard table
|
||||
-- Regular left join should work as expected
|
||||
|
@ -447,7 +453,8 @@ ORDER BY cnt DESC, l1.l_custkey DESC
|
|||
LIMIT 20;
|
||||
|
||||
-- Add a shard to the left table that overlaps with multiple shards in the right
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\set customer_1_data_file :abs_srcdir '/data/customer.1.data'
|
||||
COPY multi_outer_join_left FROM :'customer_1_data_file' with delimiter '|';
|
||||
|
||||
|
||||
-- All outer joins should error out
|
||||
|
@ -483,7 +490,7 @@ SET citus.shard_replication_factor to 1;
|
|||
|
||||
SELECT create_distributed_table('left_values', 'val');
|
||||
|
||||
\copy left_values from stdin
|
||||
COPY left_values from stdin;
|
||||
1
|
||||
2
|
||||
3
|
||||
|
@ -495,7 +502,7 @@ CREATE TABLE right_values(val int);
|
|||
|
||||
SELECT create_distributed_table('right_values', 'val');
|
||||
|
||||
\copy right_values from stdin
|
||||
COPY right_values from stdin;
|
||||
2
|
||||
3
|
||||
4
|
|
@ -64,12 +64,15 @@ FROM
|
|||
multi_outer_join_left_hash a LEFT JOIN multi_outer_join_third_reference b ON (l_custkey = t_custkey);
|
||||
|
||||
-- Left table is a large table
|
||||
\copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
|
||||
\copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\set customer_1_10_data :abs_srcdir '/data/customer-1-10.data'
|
||||
\set customer_11_20_data :abs_srcdir '/data/customer-11-20.data'
|
||||
COPY multi_outer_join_left_hash FROM :'customer_1_10_data' with delimiter '|';
|
||||
COPY multi_outer_join_left_hash FROM :'customer_11_20_data' with delimiter '|';
|
||||
|
||||
-- Right table is a small table
|
||||
\copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\set customer_1_15_data :abs_srcdir '/data/customer-1-15.data'
|
||||
COPY multi_outer_join_right_reference FROM :'customer_1_15_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_hash FROM :'customer_1_15_data' with delimiter '|';
|
||||
|
||||
-- Make sure we do not crash if one table has data
|
||||
SELECT
|
||||
|
@ -83,8 +86,9 @@ FROM
|
|||
multi_outer_join_third_reference a LEFT JOIN multi_outer_join_right_reference b ON (r_custkey = t_custkey);
|
||||
|
||||
-- Third table is a single shard table with all data
|
||||
\copy multi_outer_join_third_reference FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
|
||||
\set customer_1_30_data :abs_srcdir '/data/customer-1-30.data'
|
||||
COPY multi_outer_join_third_reference FROM :'customer_1_30_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_hash FROM :'customer_1_30_data' with delimiter '|';
|
||||
|
||||
|
||||
-- Regular outer join should return results for all rows
|
||||
|
@ -164,7 +168,8 @@ FROM
|
|||
|
||||
|
||||
-- load some more data
|
||||
\copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\set customer_21_30_data :abs_srcdir '/data/customer-21-30.data'
|
||||
COPY multi_outer_join_right_reference FROM :'customer_21_30_data' with delimiter '|';
|
||||
|
||||
-- Update shards so that they do not have 1-1 matching, triggering an error.
|
||||
UPDATE pg_dist_shard SET shardminvalue = '2147483646' WHERE shardid = 1260006;
|
||||
|
@ -180,14 +185,14 @@ UPDATE pg_dist_shard SET shardmaxvalue = '-1073741825' WHERE shardid = 1260006;
|
|||
TRUNCATE multi_outer_join_left_hash, multi_outer_join_right_hash, multi_outer_join_right_reference;
|
||||
|
||||
-- reload shards with 1-1 matching
|
||||
\copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
COPY multi_outer_join_left_hash FROM :'customer_1_15_data' with delimiter '|';
|
||||
COPY multi_outer_join_left_hash FROM :'customer_21_30_data' with delimiter '|';
|
||||
|
||||
\copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
COPY multi_outer_join_right_reference FROM :'customer_11_20_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_reference FROM :'customer_21_30_data' with delimiter '|';
|
||||
|
||||
\copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
COPY multi_outer_join_right_hash FROM :'customer_11_20_data' with delimiter '|';
|
||||
COPY multi_outer_join_right_hash FROM :'customer_21_30_data' with delimiter '|';
|
||||
|
||||
-- multi_outer_join_third_reference is a single shard table
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
\set test_tablespace :abs_srcdir '/tmp_check/ts0'
|
||||
CREATE TABLESPACE test_tablespace LOCATION :'test_tablespace';
|
||||
\c - - - :worker_1_port
|
||||
\set test_tablespace :abs_srcdir '/tmp_check/ts1'
|
||||
CREATE TABLESPACE test_tablespace LOCATION :'test_tablespace';
|
||||
\c - - - :worker_2_port
|
||||
\set test_tablespace :abs_srcdir '/tmp_check/ts2'
|
||||
CREATE TABLESPACE test_tablespace LOCATION :'test_tablespace';
|
Loading…
Reference in New Issue