Strip trailing whitespace and add final newline (#3186)

This brings files in line with our editorconfig file
pull/3207/head
Philip Dubé 2019-11-21 13:25:37 +00:00 committed by Jelte Fennema
parent 1d8dde232f
commit c563e0825c
274 changed files with 6482 additions and 6638 deletions

View File

@ -22,6 +22,12 @@ jobs:
- run:
name: 'Check Style'
command: citus_indent --check
- run:
name: 'Fix whitespace'
command: ci/editorconfig.sh
- run:
name: 'Check if whitespace fixing changed anything, install editorconfig if it did'
command: git diff --exit-code
- run:
name: 'Remove useless declarations'
command: ci/remove_useless_declarations.sh

View File

@ -12,12 +12,18 @@ insert_final_newline = true
charset = utf-8
trim_trailing_whitespace = true
# Don't change test output files
[*.out]
# Don't change test output files, pngs or test data files
[*.{out,png,data}]
insert_final_newline = unset
trim_trailing_whitespace = unset
[*.sql]
# Don't change test/regress/output directory, this needs to be a separate rule
# for some reason
[/src/test/regress/output/**]
insert_final_newline = unset
trim_trailing_whitespace = unset
[*.sql,*.sh]
indent_style = space
indent_size = 4
tab_width = 4

16
ci/editorconfig.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/sh
set -eu
for f in $(git ls-tree -r HEAD --name-only); do
if [ "$f" = "${f%.out}" ] &&
[ "$f" = "${f%.data}" ] &&
[ "$f" = "${f%.png}" ] &&
[ "$(dirname "$f")" != "src/test/regress/output" ]
then
# Trim trailing whitespace
sed -e 's/[[:space:]]*$//' -i "./$f"
# Add final newline if not there
if [ -n "$(tail -c1 "$f")" ]; then
echo >> "$f"
fi
fi
done

View File

@ -9,4 +9,3 @@ COMMENT ON FUNCTION pg_catalog.alter_role_if_exists(
role_name text,
utility_query text)
IS 'runs the utility query, if the role exists';

View File

@ -9,4 +9,3 @@ COMMENT ON FUNCTION pg_catalog.alter_role_if_exists(
role_name text,
utility_query text)
IS 'runs the utility query, if the role exists';

View File

@ -30,7 +30,6 @@ WITH cte_1 AS (
FROM tt1
WHERE value_1 >= 2
)
DELETE FROM tt2
USING cte_2
WHERE tt2.id = cte_2.cte2_id
@ -57,7 +56,6 @@ WITH cte_1 AS (
FROM tt1
WHERE value_1 >= 2
)
DELETE FROM tt2
USING cte_2
WHERE tt2.id = cte_2.cte2_id
@ -82,7 +80,6 @@ WITH cte_1(id) AS (
FROM tt1
WHERE value_1 >= 2
)
DELETE FROM tt2
USING cte_2
WHERE tt2.id = cte_2.cte2_id
@ -107,7 +104,6 @@ WITH cte_1 AS (
FROM tt1
WHERE value_1 >= 2
)
UPDATE tt2
SET value_1 = 10
FROM cte_2
@ -130,7 +126,6 @@ WITH cte_1 AS (
WITH cte_2 AS (
SELECT * FROM tt3
)
UPDATE tt2
SET value_1 = (SELECT max((json_val->>'qty')::int) FROM cte_2)
RETURNING id, value_1

View File

@ -269,7 +269,6 @@ WITH cte_1 AS (
FROM second_distributed_table
WHERE dept >= 2
)
UPDATE distributed_table
SET dept = 10
RETURNING *
@ -286,7 +285,6 @@ WITH cte_1 AS (
FROM second_distributed_table
WHERE dept >= 2
)
UPDATE distributed_table
SET dept = 10
RETURNING *

View File

@ -4,7 +4,6 @@
-- master_disable_node and master_add_inactive_node can not be
-- tested as they don't create network activity
--
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
-----------

View File

@ -87,7 +87,6 @@ ERROR: connection error: localhost:9060
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
-- kill at the third copy (pull)
SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").kill()');
mitmproxy

View File

@ -517,7 +517,6 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e
0
(1 row)
INSERT INTO distributed_table (key) SELECT i FROM generate_series(1,10)i;
ERROR: cannot execute command because a local execution has already been done in the transaction
DETAIL: Some parallel commands cannot be executed if a previous command has already been executed locally
@ -532,7 +531,6 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e
0
(1 row)
INSERT INTO distributed_table (key) SELECT key+1 FROM distributed_table;
ERROR: cannot execute command because a local execution has already been done in the transaction
DETAIL: Some parallel commands cannot be executed if a previous command has already been executed locally
@ -948,7 +946,6 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.distribut
LOG: executing the command locally: DELETE FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (value OPERATOR(pg_catalog.<>) '123123213123213'::text)
ROLLBACK;
BEGIN;
DELETE FROM reference_table WHERE key = 500 RETURNING *;
LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) 500) RETURNING key
key
@ -1016,19 +1013,15 @@ BEGIN;
(1 row)
DELETE FROM distributed_table WHERE key = 500;
ROLLBACK TO SAVEPOINT my_savepoint;
DELETE FROM distributed_table WHERE key = 500;
COMMIT;
-- even if we switch from local execution -> remote execution,
-- we are able to use local execution after rollback
BEGIN;
SAVEPOINT my_savepoint;
DELETE FROM distributed_table WHERE key = 500;
LOG: executing the command locally: DELETE FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 500)
SELECT count(*) FROM distributed_table;
LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE true
LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE true
@ -1038,7 +1031,6 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e
(1 row)
ROLLBACK TO SAVEPOINT my_savepoint;
DELETE FROM distributed_table WHERE key = 500;
LOG: executing the command locally: DELETE FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 500)
COMMIT;

View File

@ -170,7 +170,6 @@ FROM (
max(u.time) as user_lastseen,
array_agg(event_type ORDER BY u.time) AS event_array
FROM (
SELECT user_id, time
FROM users_table
WHERE
@ -205,7 +204,6 @@ FROM users_table
WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 <= 2)
AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 3 AND value_1 <= 4)
AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 5 AND value_1 <= 6);
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results;
count | count | avg
@ -300,7 +298,6 @@ SELECT user_id, value_2 FROM users_table WHERE
value_2 >= 3
AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id=users_table.user_id)
AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id=users_table.user_id);
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results;
count | count | avg
@ -329,7 +326,6 @@ INSERT INTO agg_results(user_id, value_2_agg)
AND user_id = users_table.user_id
GROUP BY user_id
HAVING Count(*) > 2);
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results;
count | count | avg
@ -337,7 +333,6 @@ SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results;
4 | 2 | 3.5000000000000000
(1 row)
------------------------------------
------------------------------------
-- Find me all users_table who logged in more than once
@ -373,7 +368,6 @@ And user_id in
From users_table
Where value_1 = 2
And value_2 > 1);
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results;
count | count | avg
@ -398,7 +392,6 @@ SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results;
34 | 6 | 3.4411764705882353
(1 row)
------------------------------------
------------------------------------
-- Find me all the users_table who has done some event more than three times
@ -414,7 +407,6 @@ select user_id from
events_table
where event_type = 4 group by user_id having count(*) > 3
) as a;
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results;
count | count | avg
@ -422,7 +414,6 @@ SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results;
4 | 4 | 2.5000000000000000
(1 row)
------------------------------------
------------------------------------
-- Find my assets that have the highest probability and fetch their metadata

View File

@ -93,7 +93,6 @@ FROM (
SELECT DISTINCT user_id,
'Has done event'::TEXT AS hasdone_event
FROM events_table AS e
WHERE e.user_id >= 1
AND e.user_id <= 2
AND e.event_type IN (5, 6)
@ -136,7 +135,6 @@ FROM (
SELECT DISTINCT user_id,
'Has done event'::TEXT AS hasdone_event
FROM events_table AS e
WHERE
(e.user_id = 2 OR e.user_id = 3)
AND e.event_type IN (4, 5)
@ -170,7 +168,6 @@ FROM (
max(u.time) as user_lastseen,
array_agg(event_type ORDER BY u.time) AS event_array
FROM (
SELECT user_id, time
FROM users_table
WHERE
@ -213,7 +210,6 @@ FROM (
max(u.time) as user_lastseen,
array_agg(event_type ORDER BY u.time) AS event_array
FROM (
SELECT user_id, time
FROM users_table
WHERE
@ -251,7 +247,6 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1
AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 3 AND value_1 <= 4)
AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 5 AND value_1 <= 6)
AND user_id = 1;
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second;
count | count | avg
@ -272,7 +267,6 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1
AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 3 AND value_1 <= 4 AND (user_id = 1 OR user_id = 2))
AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 5 AND value_1 <= 6 AND (user_id = 1 OR user_id = 2))
AND (user_id = 1 OR user_id = 2);
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second;
count | count | avg
@ -330,7 +324,6 @@ SELECT user_id, value_2 FROM users_table WHERE
AND user_id = 1
AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id=users_table.user_id)
AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 4 AND event_type <= 5 AND value_3 > 4 AND user_id=users_table.user_id);
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second;
count | count | avg
@ -350,7 +343,6 @@ SELECT user_id, value_2 FROM users_table WHERE
AND (user_id = 1 OR user_id = 2)
AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id=users_table.user_id AND (user_id = 1 OR user_id = 2))
AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 4 AND event_type <= 5 AND value_3 > 4 AND user_id=users_table.user_id AND (user_id = 1 OR user_id = 2));
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second;
count | count | avg
@ -381,7 +373,6 @@ INSERT INTO agg_results_second(user_id, value_2_agg)
AND user_id = 3
GROUP BY user_id
HAVING Count(*) > 2);
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second;
count | count | avg
@ -411,7 +402,6 @@ INSERT INTO agg_results_second(user_id, value_2_agg)
AND (user_id = 3 or user_id = 4)
GROUP BY user_id
HAVING Count(*) > 2);
-- get some statistics from the aggregated results to ensure the results are correct
SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second;
count | count | avg
@ -419,4 +409,3 @@ SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second;
4 | 2 | 3.5000000000000000
(1 row)

View File

@ -61,7 +61,6 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]:
localhost | 57637 | f | expected a single row in query result
(1 row)
-- send multiple queries
SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[],
ARRAY[:node_port, :node_port]::int[],
@ -185,7 +184,6 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]:
localhost | 57637 | t | DROP TABLE
(1 row)
-- verify table is dropped
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select count(*) from second_table']::text[],
@ -226,7 +224,6 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]:
localhost | 57637 | f | expected a single row in query result
(1 row)
-- send multiple queries
SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[],
ARRAY[:node_port, :node_port]::int[],

View File

@ -223,9 +223,7 @@ SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size
DELETE FROM pg_dist_shard_placement
WHERE shardid IN (SELECT shardid FROM pg_dist_shard
WHERE logicalrelid = 'foreign_table_to_distribute'::regclass);
DELETE FROM pg_dist_shard
WHERE logicalrelid = 'foreign_table_to_distribute'::regclass;
DELETE FROM pg_dist_partition
WHERE logicalrelid = 'foreign_table_to_distribute'::regclass;

View File

@ -17,7 +17,6 @@ SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get
(1 row)
BEGIN;
-- we should still see the uninitialized values
SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id();
initiator_node_identifier | transaction_number | transaction_stamp | ?column?
@ -52,7 +51,6 @@ SELECT initiator_node_identifier, transaction_number, transaction_stamp, (proces
-- also see that ROLLBACK (i.e., failures in the transaction) clears the shared memory
BEGIN;
-- we should still see the uninitialized values
SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id();
initiator_node_identifier | transaction_number | transaction_stamp | ?column?
@ -79,7 +77,6 @@ COMMIT;
-- we should also see that a new connection means an uninitialized transaction id
BEGIN;
SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0');
assign_distributed_transaction_id
-----------------------------------

View File

@ -601,7 +601,6 @@ drop cascades to table test_8
---------------------------------
(0 rows)
ROLLBACK;
SET search_path TO public;
DROP SCHEMA fkey_graph CASCADE;

View File

@ -770,7 +770,6 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_t
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300002 raw_events_first LEFT JOIN public.raw_events_second_13300006 raw_events_second ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE ((worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.<=) 1073741823))
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300003 raw_events_first LEFT JOIN public.raw_events_second_13300007 raw_events_second ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE ((worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.<=) 2147483647))
DEBUG: Plan is router executable
INSERT INTO agg_events (user_id)
SELECT
raw_events_second.user_id
@ -781,7 +780,6 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_t
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT raw_events_second.user_id FROM (public.reference_table_13300012 reference_table LEFT JOIN public.raw_events_second_13300006 raw_events_second ON ((reference_table.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE ((worker_hash(raw_events_second.user_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(raw_events_second.user_id) OPERATOR(pg_catalog.<=) 1073741823))
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT raw_events_second.user_id FROM (public.reference_table_13300012 reference_table LEFT JOIN public.raw_events_second_13300007 raw_events_second ON ((reference_table.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE ((worker_hash(raw_events_second.user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(raw_events_second.user_id) OPERATOR(pg_catalog.<=) 2147483647))
DEBUG: Plan is router executable
INSERT INTO agg_events (user_id)
SELECT
raw_events_first.user_id
@ -793,7 +791,6 @@ DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned
DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300011 since SELECT query for it pruned away
DEBUG: Plan is router executable
INSERT INTO agg_events (user_id)
SELECT
raw_events_first.user_id
@ -805,7 +802,6 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_t
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300002 raw_events_first LEFT JOIN (SELECT NULL::integer AS user_id, NULL::timestamp without time zone AS "time", NULL::integer AS value_1, NULL::integer AS value_2, NULL::double precision AS value_3, NULL::bigint AS value_4 WHERE false) raw_events_second(user_id, "time", value_1, value_2, value_3, value_4) ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE (((raw_events_second.user_id OPERATOR(pg_catalog.=) 10) OR (raw_events_second.user_id OPERATOR(pg_catalog.=) 11)) AND ((worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.<=) 1073741823)))
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300003 raw_events_first LEFT JOIN public.raw_events_second_13300007 raw_events_second ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE (((raw_events_second.user_id OPERATOR(pg_catalog.=) 10) OR (raw_events_second.user_id OPERATOR(pg_catalog.=) 11)) AND ((worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.<=) 2147483647)))
DEBUG: Plan is router executable
INSERT INTO agg_events (user_id)
SELECT
raw_events_first.user_id
@ -817,7 +813,6 @@ DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned
DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300011 since SELECT query for it pruned away
DEBUG: Plan is router executable
INSERT INTO agg_events (user_id)
SELECT
raw_events_first.user_id
@ -829,7 +824,6 @@ DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned
DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300011 since SELECT query for it pruned away
DEBUG: Plan is router executable
INSERT INTO agg_events (user_id)
SELECT
raw_events_first.user_id
@ -841,7 +835,6 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_t
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300002 raw_events_first LEFT JOIN public.raw_events_second_13300006 raw_events_second ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.<=) 1073741823)))
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM ((SELECT NULL::integer AS user_id, NULL::timestamp without time zone AS "time", NULL::integer AS value_1, NULL::integer AS value_2, NULL::double precision AS value_3, NULL::bigint AS value_4 WHERE false) raw_events_first(user_id, "time", value_1, value_2, value_3, value_4) LEFT JOIN public.raw_events_second_13300007 raw_events_second ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.<=) 2147483647)))
DEBUG: Plan is router executable
INSERT INTO agg_events (user_id)
SELECT
raw_events_first.user_id
@ -853,7 +846,6 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_t
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300002 raw_events_first JOIN public.raw_events_second_13300006 raw_events_second ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.<=) 1073741823)))
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300003 raw_events_first JOIN (SELECT NULL::integer AS user_id, NULL::timestamp without time zone AS "time", NULL::integer AS value_1, NULL::integer AS value_2, NULL::double precision AS value_3, NULL::bigint AS value_4 WHERE false) raw_events_second(user_id, "time", value_1, value_2, value_3, value_4) ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(raw_events_first.user_id) OPERATOR(pg_catalog.<=) 2147483647)))
DEBUG: Plan is router executable
-- the following is a very tricky query for Citus
-- although we do not support pushing down JOINs on non-partition
-- columns here it is safe to push it down given that we're looking for
@ -870,7 +862,6 @@ DEBUG: Plan is router executable
AND raw_events_first.value_1 = 12;
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- some unsupported LEFT/INNER JOINs
-- JOIN on one table with partition column other is not
INSERT INTO agg_events (user_id)
@ -880,7 +871,6 @@ DETAIL: Select query cannot be pushed down to the worker.
raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1;
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- same as the above with INNER JOIN
INSERT INTO agg_events (user_id)
SELECT
@ -889,7 +879,6 @@ DETAIL: Select query cannot be pushed down to the worker.
raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1;
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- a not meaningful query
INSERT INTO agg_events
(user_id)
@ -899,7 +888,6 @@ DETAIL: Select query cannot be pushed down to the worker.
WHERE raw_events_first.user_id = raw_events_first.value_1;
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- both tables joined on non-partition columns
INSERT INTO agg_events (user_id)
SELECT
@ -908,7 +896,6 @@ DETAIL: Select query cannot be pushed down to the worker.
raw_events_first LEFT JOIN raw_events_second ON raw_events_first.value_1 = raw_events_second.value_1;
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- same as the above with INNER JOIN
INSERT INTO agg_events (user_id)
SELECT
@ -917,7 +904,6 @@ DETAIL: Select query cannot be pushed down to the worker.
raw_events_first INNER JOIN raw_events_second ON raw_events_first.value_1 = raw_events_second.value_1;
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- even if there is a filter on the partition key, since the join is not on the partition key we reject
-- this query
INSERT INTO agg_events (user_id)
@ -929,7 +915,6 @@ WHERE
raw_events_first.user_id = 10;
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- same as the above with INNER JOIN
INSERT INTO agg_events (user_id)
SELECT
@ -939,7 +924,6 @@ DETAIL: Select query cannot be pushed down to the worker.
WHERE raw_events_first.user_id = 10;
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- make things a bit more complicate with IN clauses
INSERT INTO agg_events (user_id)
SELECT
@ -949,7 +933,6 @@ DETAIL: Select query cannot be pushed down to the worker.
WHERE raw_events_first.value_1 IN (10, 11,12) OR raw_events_second.user_id IN (1,2,3,4);
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- implicit join on non partition column should also not be pushed down
INSERT INTO agg_events
(user_id)
@ -959,7 +942,6 @@ DETAIL: Select query cannot be pushed down to the worker.
WHERE raw_events_second.user_id = raw_events_first.value_1;
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- the following is again a tricky query for Citus
-- if the given filter was on value_1 as shown in the above, Citus could
-- push it down. But here the query is refused
@ -972,7 +954,6 @@ DETAIL: Select query cannot be pushed down to the worker.
AND raw_events_first.value_2 = 12;
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- lets do some unsupported query tests with subqueries
-- foo is not joined on the partition key so the query is not
-- pushed down

View File

@ -70,7 +70,6 @@ FROM (
SELECT DISTINCT user_id,
'Has done event'::TEXT AS hasdone_event
FROM events_table AS e
WHERE e.user_id >= 10
AND e.user_id <= 25
AND e.event_type IN (106, 107, 108)
@ -111,7 +110,6 @@ FROM (
SELECT DISTINCT user_id,
'Has done event'::TEXT AS hasdone_event
FROM events_table AS e
WHERE e.user_id >= 10
AND e.user_id <= 25
AND e.event_type IN (106, 107, 108)
@ -154,7 +152,6 @@ FROM (
SELECT DISTINCT user_id,
'Has done event'::TEXT AS hasdone_event
FROM events_table AS e
WHERE e.user_id >= 10
AND e.user_id <= 25
AND e.event_type IN (106, 107, 108)
@ -340,7 +337,6 @@ FROM (
max(u.time) as user_lastseen,
array_agg(event_type ORDER BY u.time) AS event_array
FROM (
SELECT user_id, time
FROM users_table
WHERE
@ -370,7 +366,6 @@ FROM (
max(u.time) as user_lastseen,
array_agg(event_type ORDER BY u.time) AS event_array
FROM (
SELECT user_id, time
FROM users_table
WHERE
@ -400,7 +395,6 @@ FROM (
max(u.time) as user_lastseen,
array_agg(event_type ORDER BY u.time) AS event_array
FROM (
SELECT user_id, time, value_3 as val_3
FROM users_table
WHERE
@ -519,7 +513,6 @@ SELECT user_id, value_2 FROM users_table WHERE
AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id);
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
------------------------------------
------------------------------------
-- Customers who have done X more than 2 times, and satisfy other customer specific criteria
@ -610,7 +603,6 @@ And user_id in
And value_2 > 25);
ERROR: cannot perform distributed planning for the given modification
DETAIL: Select query cannot be pushed down to the worker.
-- not pushable since we're not selecting the partition key
-- from the events table
INSERT INTO agg_results_third(user_id)

View File

@ -141,7 +141,6 @@ SELECT json_agg(case when l_quantity > 20 then l_quantity else NULL end)
[null, 36.00, null, 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, null, 28.00, 26.00, 30.00]
(1 row)
-- Check that we can execute json_agg() with an expression containing different types
SELECT json_agg(case when l_quantity > 20 then to_json(l_quantity) else '"f"'::json end)
FROM lineitem WHERE l_orderkey < 5;
@ -150,7 +149,6 @@ SELECT json_agg(case when l_quantity > 20 then to_json(l_quantity) else '"f"'::j
["f", 36.00, "f", 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, "f", 28.00, 26.00, 30.00]
(1 row)
-- Check that we can execute json_agg() with an expression containing json arrays
SELECT json_agg(json_build_array(l_quantity, l_shipdate))
FROM lineitem WHERE l_orderkey < 3;
@ -159,7 +157,6 @@ SELECT json_agg(json_build_array(l_quantity, l_shipdate))
[[17.00, "1996-03-13"], [36.00, "1996-04-12"], [8.00, "1996-01-29"], [28.00, "1996-04-21"], [24.00, "1996-03-30"], [32.00, "1996-01-30"], [38.00, "1997-01-28"]]
(1 row)
-- Check that we can execute json_agg() with an expression containing arrays
SELECT json_agg(ARRAY[l_quantity, l_orderkey])
FROM lineitem WHERE l_orderkey < 3;

View File

@ -155,7 +155,6 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text,
{ "11" : null, "12" : 36.00, "13" : null, "14" : 28.00, "15" : 24.00, "16" : 32.00, "21" : 38.00, "31" : 45.00, "32" : 49.00, "33" : 27.00, "34" : null, "35" : 28.00, "36" : 26.00, "41" : 30.00 }
(1 row)
-- Check that we can execute json_object_agg() with an expression containing different types
SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text,
case when l_quantity > 20 then to_json(l_quantity) else '"f"'::json end))
@ -165,7 +164,6 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text,
{ "11" : "f", "12" : 36.00, "13" : "f", "14" : 28.00, "15" : 24.00, "16" : 32.00, "21" : 38.00, "31" : 45.00, "32" : 49.00, "33" : 27.00, "34" : "f", "35" : 28.00, "36" : 26.00, "41" : 30.00 }
(1 row)
-- Check that we can execute json_object_agg() with an expression containing json arrays
SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, json_build_array(l_quantity, l_shipdate)))
FROM lineitem WHERE l_orderkey < 3;
@ -174,7 +172,6 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, json_bu
{ "11" : [17.00, "1996-03-13"], "12" : [36.00, "1996-04-12"], "13" : [8.00, "1996-01-29"], "14" : [28.00, "1996-04-21"], "15" : [24.00, "1996-03-30"], "16" : [32.00, "1996-01-30"], "21" : [38.00, "1997-01-28"] }
(1 row)
-- Check that we can execute json_object_agg() with an expression containing arrays
SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, ARRAY[l_quantity, l_orderkey]))
FROM lineitem WHERE l_orderkey < 3;

View File

@ -141,7 +141,6 @@ SELECT jsonb_agg(case when l_quantity > 20 then l_quantity else NULL end)
[null, 36.00, null, 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, null, 28.00, 26.00, 30.00]
(1 row)
-- Check that we can execute jsonb_agg() with an expression containing different types
SELECT jsonb_agg(case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"'::jsonb end)
FROM lineitem WHERE l_orderkey < 5;
@ -150,7 +149,6 @@ SELECT jsonb_agg(case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"':
["f", 36.00, "f", 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, "f", 28.00, 26.00, 30.00]
(1 row)
-- Check that we can execute jsonb_agg() with an expression containing jsonb arrays
SELECT jsonb_agg(jsonb_build_array(l_quantity, l_shipdate))
FROM lineitem WHERE l_orderkey < 3;
@ -159,7 +157,6 @@ SELECT jsonb_agg(jsonb_build_array(l_quantity, l_shipdate))
[[17.00, "1996-03-13"], [36.00, "1996-04-12"], [8.00, "1996-01-29"], [28.00, "1996-04-21"], [24.00, "1996-03-30"], [32.00, "1996-01-30"], [38.00, "1997-01-28"]]
(1 row)
-- Check that we can execute jsonb_agg() with an expression containing arrays
SELECT jsonb_agg(ARRAY[l_quantity, l_orderkey])
FROM lineitem WHERE l_orderkey < 3;

View File

@ -148,7 +148,6 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text,
{"11": null, "12": 36.00, "13": null, "14": 28.00, "15": 24.00, "16": 32.00, "21": 38.00, "31": 45.00, "32": 49.00, "33": 27.00, "34": null, "35": 28.00, "36": 26.00, "41": 30.00}
(1 row)
-- Check that we can execute jsonb_object_agg() with an expression containing different types
SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text,
case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"'::jsonb end)
@ -158,7 +157,6 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text,
{"11": "f", "12": 36.00, "13": "f", "14": 28.00, "15": 24.00, "16": 32.00, "21": 38.00, "31": 45.00, "32": 49.00, "33": 27.00, "34": "f", "35": 28.00, "36": 26.00, "41": 30.00}
(1 row)
-- Check that we can execute jsonb_object_agg() with an expression containing jsonb arrays
SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, jsonb_build_array(l_quantity, l_shipdate))
FROM lineitem WHERE l_orderkey < 3;
@ -167,7 +165,6 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, jsonb_build_arra
{"11": [17.00, "1996-03-13"], "12": [36.00, "1996-04-12"], "13": [8.00, "1996-01-29"], "14": [28.00, "1996-04-21"], "15": [24.00, "1996-03-30"], "16": [32.00, "1996-01-30"], "21": [38.00, "1997-01-28"]}
(1 row)
-- Check that we can execute jsonb_object_agg() with an expression containing arrays
SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, ARRAY[l_quantity, l_orderkey])
FROM lineitem WHERE l_orderkey < 3;

View File

@ -15,7 +15,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
-- 1-) Distributing partitioned table
-- create partitioned table
CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time);
-- create its partitions
CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01');
CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01');

View File

@ -194,6 +194,5 @@ LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_ot
6 | (2,3) | foo | 12 | (2,3) | foo
(5 rows)
\c - - - :worker_1_port
\c - - - :worker_2_port

View File

@ -607,7 +607,6 @@ DETAIL: distribution column value: 10
10 | 19519
(3 rows)
-- following join is not router plannable since there are no
-- workers containing both shards, but will work through recursive
-- planning
@ -675,7 +674,6 @@ DETAIL: distribution column value: 1
21 | 1 | arcading | 5890
(2 rows)
-- single shard select with group by on non-partition column is router plannable
SELECT id
FROM articles_hash_mx
@ -749,7 +747,6 @@ DETAIL: distribution column value: 1
11814
(1 row)
-- router plannable union queries are supported
SELECT * FROM (
SELECT * FROM articles_hash_mx WHERE author_id = 1
@ -913,7 +910,6 @@ DEBUG: Router planner cannot handle multi-shard select queries
41 | 1 | aznavour | 11814
(5 rows)
-- router plannable
SELECT *
FROM articles_hash_mx
@ -966,7 +962,6 @@ DEBUG: Router planner cannot handle multi-shard select queries
41 | 1 | aznavour | 11814
(5 rows)
-- router plannable due to abs(-1) getting converted to 1 by postgresql
SELECT *
FROM articles_hash_mx
@ -1326,7 +1321,6 @@ DETAIL: distribution column value: 1
21 1 arcading 5890
31 1 athwartships 7271
41 1 aznavour 11814
-- table creation queries inside can be router plannable
CREATE TEMP TABLE temp_articles_hash_mx as
SELECT *
@ -1474,7 +1468,6 @@ SET client_min_messages to 'DEBUG2';
CREATE MATERIALIZED VIEW mv_articles_hash_mx_error AS
SELECT * FROM articles_hash_mx WHERE author_id in (1,2);
DEBUG: Router planner cannot handle multi-shard select queries
-- router planner/executor is disabled for task-tracker executor
-- following query is router plannable, but router planner is disabled
-- TODO: Uncomment once we fix task-tracker issue

View File

@ -145,7 +145,6 @@ WHERE colocationid IN
10004 | 1 | -1 | 0
(1 row)
\c - - - :worker_1_port
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
count
@ -164,7 +163,6 @@ WHERE
1380000 | 1 | 0 | localhost | 57638
(1 row)
\c - - - :master_port
SELECT master_remove_node('localhost', :worker_2_port);
master_remove_node
@ -217,7 +215,6 @@ WHERE
---------+------------+-------------+----------+----------
(0 rows)
\c - - - :master_port
-- remove same node twice
SELECT master_remove_node('localhost', :worker_2_port);
@ -281,7 +278,6 @@ WHERE colocationid IN
10004 | 1 | -1 | 0
(1 row)
\c - - - :worker_1_port
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
count
@ -300,7 +296,6 @@ WHERE
1380000 | 1 | 0 | localhost | 57638
(1 row)
\c - - - :master_port
BEGIN;
SELECT master_remove_node('localhost', :worker_2_port);
@ -357,7 +352,6 @@ WHERE
1380000 | 1 | 0 | localhost | 57638
(1 row)
\c - - - :master_port
-- remove node in a transaction and COMMIT
-- status before master_remove_node
@ -389,7 +383,6 @@ WHERE colocationid IN
10004 | 1 | -1 | 0
(1 row)
\c - - - :worker_1_port
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
count
@ -408,7 +401,6 @@ WHERE
1380000 | 1 | 0 | localhost | 57638
(1 row)
\c - - - :master_port
BEGIN;
SELECT master_remove_node('localhost', :worker_2_port);
@ -446,7 +438,6 @@ WHERE colocationid IN
10004 | 1 | -1 | 0
(1 row)
\c - - - :worker_1_port
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
count
@ -464,7 +455,6 @@ WHERE
---------+------------+-------------+----------+----------
(0 rows)
\c - - - :master_port
-- re-add the node for next tests
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
@ -522,9 +512,7 @@ WHERE
1380000 | 1 | 0 | localhost | 57638
(1 row)
\c - - - :master_port
BEGIN;
INSERT INTO remove_node_reference_table VALUES(1);
SELECT master_remove_node('localhost', :worker_2_port);
@ -586,14 +574,12 @@ WHERE
---------+------------+-------------+----------+----------
(0 rows)
SELECT * FROM remove_node_reference_table;
column1
---------
1
(1 row)
\c - - - :master_port
-- re-add the node for next tests
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
@ -651,7 +637,6 @@ WHERE
1380000 | 1 | 0 | localhost | 57638
(1 row)
\c - - - :master_port
BEGIN;
ALTER TABLE remove_node_reference_table ADD column2 int;
@ -690,7 +675,6 @@ WHERE colocationid IN
10004 | 1 | -1 | 0
(1 row)
\c - - - :worker_1_port
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
count
@ -708,7 +692,6 @@ WHERE
---------+------------+-------------+----------+----------
(0 rows)
\c - - - :master_port
SET citus.next_shard_id TO 1380001;
-- verify table structure is changed
@ -844,7 +827,6 @@ WHERE colocationid IN
10004 | 1 | -1 | 0
(1 row)
\c - - - :worker_1_port
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
count
@ -918,9 +900,7 @@ WHERE
---------+------------+-------------+----------+----------
(0 rows)
\c - - - :master_port
-- re-add the node for next tests
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638
@ -983,9 +963,7 @@ ORDER BY shardid ASC;
1380002 | 1 | 0 | localhost | 57638
(2 rows)
\c - - - :master_port
SELECT master_disable_node('localhost', :worker_2_port);
master_disable_node
---------------------
@ -1037,7 +1015,6 @@ WHERE
---------+------------+-------------+----------+----------
(0 rows)
\c - - - :master_port
-- re-add the node for next tests
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);

View File

@ -767,7 +767,6 @@ DETAIL: distribution column value: 10
10 | 19519
(3 rows)
-- following join is not router plannable since there are no
-- workers containing both shards, but will work through recursive
-- planning
@ -835,7 +834,6 @@ DETAIL: distribution column value: 1
21 | 1 | arcading | 5890
(2 rows)
-- single shard select with group by on non-partition column is router plannable
SELECT id
FROM articles_hash
@ -1094,7 +1092,6 @@ DEBUG: Router planner cannot handle multi-shard select queries
41 | 1 | aznavour | 11814
(5 rows)
-- router plannable
SELECT *
FROM articles_hash
@ -1147,7 +1144,6 @@ DEBUG: Router planner cannot handle multi-shard select queries
41 | 1 | aznavour | 11814
(5 rows)
-- router plannable due to abs(-1) getting converted to 1 by postgresql
SELECT *
FROM articles_hash
@ -1686,7 +1682,6 @@ DEBUG: Plan is router executable
-----------
(0 rows)
-- verify range partitioned tables can be used in router plannable queries
-- just 4 shards to be created for each table to make sure
-- they are 'co-located' pairwise
@ -1916,7 +1911,6 @@ DEBUG: Router planner cannot handle multi-shard select queries
----+-----------+-------+------------+------+----
(0 rows)
-- following is a bug, function should have been
-- evaluated at master before going to worker
-- need to use a append distributed table here
@ -2166,7 +2160,6 @@ DETAIL: distribution column value: 1
21 1 arcading 5890
31 1 athwartships 7271
41 1 aznavour 11814
-- table creation queries inside can be router plannable
CREATE TEMP TABLE temp_articles_hash as
SELECT *

View File

@ -568,7 +568,6 @@ DETAIL: distribution column value: 1
21 | 1 | arcading | 5890
(2 rows)
-- single shard select with group by on non-partition column goes through fast-path planning
SELECT id
FROM articles_hash
@ -765,7 +764,6 @@ DEBUG: Router planner cannot handle multi-shard select queries
41 | 1 | aznavour | 11814
(5 rows)
-- goes through fast-path planning because
-- the dist. key is ANDed with the rest of the
-- filters
@ -822,7 +820,6 @@ DEBUG: Router planner cannot handle multi-shard select queries
41 | 1 | aznavour | 11814
(5 rows)
-- Citus does not qualify this as a fast-path because
-- dist_key = func()
SELECT *
@ -1465,7 +1462,6 @@ DETAIL: distribution column value: 1
21 1 arcading 5890
31 1 athwartships 7271
41 1 aznavour 11814
-- table creation queries inside can be fast-path router plannable
CREATE TEMP TABLE temp_articles_hash as
SELECT *

View File

@ -199,7 +199,6 @@ SELECT DISTINCT l_orderkey, count(*)
197 | 6
(19 rows)
-- explain the query to see actual plan
EXPLAIN (COSTS FALSE)
SELECT DISTINCT l_orderkey, count(*)
@ -272,7 +271,6 @@ SELECT DISTINCT count(*)
4
(4 rows)
-- explain the query to see actual plan. We expect to see Aggregate node having
-- group by key on count(*) column, since columns in the Group By doesn't guarantee
-- the uniqueness of the result.
@ -350,7 +348,6 @@ SELECT DISTINCT l_suppkey, count(*)
14 | 1
(10 rows)
-- explain the query to see actual plan. Similar to the explain of the query above.
EXPLAIN (COSTS FALSE)
SELECT DISTINCT l_suppkey, count(*)
@ -430,7 +427,6 @@ SELECT DISTINCT l_suppkey, avg(l_partkey)
12 | 17510.0000000000000000
(10 rows)
-- explain the query to see actual plan. Similar to the explain of the query above.
-- Only aggregate functions will be changed.
EXPLAIN (COSTS FALSE)
@ -510,7 +506,6 @@ SELECT DISTINCT ON (l_suppkey) avg(l_partkey)
77506.000000000000
(10 rows)
-- explain the query to see actual plan. We expect to see sort+unique to handle
-- distinct on.
EXPLAIN (COSTS FALSE)
@ -587,7 +582,6 @@ SELECT DISTINCT avg(ceil(l_partkey / 2))
122
(10 rows)
-- explain the query to see actual plan
EXPLAIN (COSTS FALSE)
SELECT DISTINCT avg(ceil(l_partkey / 2))
@ -645,7 +639,6 @@ EXPLAIN (COSTS FALSE)
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(18 rows)
SET enable_hashagg TO on;
-- expression among aggregations.
SELECT DISTINCT sum(l_suppkey) + count(l_partkey) AS dis
@ -667,7 +660,6 @@ SELECT DISTINCT sum(l_suppkey) + count(l_partkey) AS dis
15
(10 rows)
-- explain the query to see actual plan
EXPLAIN (COSTS FALSE)
SELECT DISTINCT sum(l_suppkey) + count(l_partkey) AS dis
@ -725,9 +717,7 @@ EXPLAIN (COSTS FALSE)
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(18 rows)
SET enable_hashagg TO on;
-- distinct on all columns, note Group By columns guarantees uniqueness of the
-- result list.
SELECT DISTINCT *
@ -749,7 +739,6 @@ SELECT DISTINCT *
3 | 29380 | 1883 | 4 | 2.00 | 2618.76 | 0.01 | 0.06 | A | F | 12-04-1993 | 01-07-1994 | 01-01-1994 | NONE | TRUCK | y. fluffily pending d
(10 rows)
-- explain the query to see actual plan. We expect to see only one aggregation
-- node since group by columns guarantees the uniqueness.
EXPLAIN (COSTS FALSE)
@ -811,7 +800,6 @@ EXPLAIN (COSTS FALSE)
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(19 rows)
SET enable_hashagg TO on;
-- distinct on count distinct
SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
@ -847,7 +835,6 @@ SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
7 | 7
(25 rows)
-- explain the query to see actual plan. We expect to see aggregation plan for
-- the outer distinct.
EXPLAIN (COSTS FALSE)
@ -906,7 +893,6 @@ EXPLAIN (COSTS FALSE)
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(19 rows)
SET enable_hashagg TO on;
-- distinct on aggregation with filter and expression
SELECT DISTINCT ceil(count(case when l_partkey > 100000 THEN 1 ELSE 0 END) / 2) AS count
@ -922,7 +908,6 @@ SELECT DISTINCT ceil(count(case when l_partkey > 100000 THEN 1 ELSE 0 END) / 2)
4
(5 rows)
-- explain the query to see actual plan
EXPLAIN (COSTS FALSE)
SELECT DISTINCT ceil(count(case when l_partkey > 100000 THEN 1 ELSE 0 END) / 2) AS count
@ -975,9 +960,7 @@ EXPLAIN (COSTS FALSE)
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(17 rows)
SET enable_hashagg TO on;
-- explain the query to see actual plan with array_agg aggregation.
EXPLAIN (COSTS FALSE)
SELECT DISTINCT array_agg(l_linenumber), array_length(array_agg(l_linenumber), 1)
@ -1038,7 +1021,6 @@ EXPLAIN (COSTS FALSE)
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(20 rows)
SET enable_hashagg TO on;
-- distinct on non-partition column with aggregate
-- this is the same as non-distinct version due to group by
@ -1062,7 +1044,6 @@ SELECT DISTINCT l_partkey, count(*)
199146 | 3
(11 rows)
-- explain the query to see actual plan
EXPLAIN (COSTS FALSE)
SELECT DISTINCT l_partkey, count(*)
@ -1137,7 +1118,6 @@ SELECT DISTINCT l_partkey, l_suppkey
197921 | 441
(15 rows)
EXPLAIN (COSTS FALSE)
SELECT DISTINCT l_partkey, l_suppkey
FROM lineitem_hash_part
@ -1180,7 +1160,6 @@ SELECT DISTINCT ON (l_orderkey) l_orderkey, l_partkey, l_suppkey
34 | 88362 | 871
(10 rows)
EXPLAIN (COSTS FALSE)
SELECT DISTINCT ON (l_orderkey) l_orderkey, l_partkey, l_suppkey
FROM lineitem_hash_part

View File

@ -5,7 +5,6 @@
--
-- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests
-- SET citus.next_shard_id TO 1400000;
--
-- UNIONs and JOINs mixed
--
@ -1833,7 +1832,6 @@ DEBUG: Plan 66 query after replacing subqueries and CTEs: SELECT count(*) AS va
1 | 0
(4 rows)
SET citus.enable_repartition_joins to OFF;
RESET client_min_messages;
-- single level inner joins

View File

@ -374,7 +374,6 @@ DEBUG: Plan is router executable
5
(1 row)
-- union involving reference table and distributed table subqueries
-- is supported with pulling data to coordinator
SELECT * FROM
@ -1644,7 +1643,6 @@ LIMIT 4;
-- test the read_intermediate_result() for GROUP BYs
BEGIN;
SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,200) s');
broadcast_intermediate_result
-------------------------------

View File

@ -228,7 +228,6 @@ ORDER BY shardid;
1360009 | t
(1 row)
DROP TABLE upgrade_reference_table_append;
-- test valid cases, shard exists at one worker
CREATE TABLE upgrade_reference_table_one_worker(column1 int);
@ -341,7 +340,6 @@ ORDER BY shardid;
1360010 | t
(1 row)
DROP TABLE upgrade_reference_table_one_worker;
-- test valid cases, shard exists at both workers but one is unhealthy
SET citus.shard_replication_factor TO 2;
@ -458,7 +456,6 @@ ORDER BY shardid;
1360011 | t
(1 row)
DROP TABLE upgrade_reference_table_one_unhealthy;
-- test valid cases, shard exists at both workers and both are healthy
CREATE TABLE upgrade_reference_table_both_healthy(column1 int);
@ -570,7 +567,6 @@ ORDER BY shardid;
1360012 | t
(1 row)
DROP TABLE upgrade_reference_table_both_healthy;
-- test valid cases, do it in transaction and ROLLBACK
SET citus.shard_replication_factor TO 1;
@ -686,7 +682,6 @@ ORDER BY shardid;
1360013 | f
(1 row)
DROP TABLE upgrade_reference_table_transaction_rollback;
-- test valid cases, do it in transaction and COMMIT
SET citus.shard_replication_factor TO 1;
@ -872,11 +867,9 @@ ORDER BY shardid;
1360015
(1 row)
SELECT upgrade_to_reference_table('upgrade_reference_table_mx');
ERROR: cannot upgrade to reference table
DETAIL: Upgrade is only supported for statement-based replicated tables but "upgrade_reference_table_mx" is streaming replicated
-- situation after upgrade_reference_table
SELECT
partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel
@ -940,7 +933,6 @@ SELECT create_distributed_table('upgrade_reference_table_mx', 'column1');
UPDATE pg_dist_shard_placement SET shardstate = 3
WHERE nodeport = :worker_2_port AND
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='upgrade_reference_table_mx'::regclass);
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
-----------------------------
@ -995,7 +987,6 @@ ORDER BY shardid;
1360016
(1 row)
SET client_min_messages TO WARNING;
SELECT upgrade_to_reference_table('upgrade_reference_table_mx');
upgrade_to_reference_table
@ -1003,7 +994,6 @@ SELECT upgrade_to_reference_table('upgrade_reference_table_mx');
(1 row)
-- situation after upgrade_reference_table
SELECT
partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel
@ -1052,7 +1042,6 @@ ORDER BY shardid;
1360016 | t
(1 row)
-- situation on metadata worker
\c - - - :worker_1_port
SELECT
@ -1091,7 +1080,6 @@ ORDER BY shardid;
1360016 | t
(1 row)
\c - - - :master_port
DROP TABLE upgrade_reference_table_mx;
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);

View File

@ -51,7 +51,6 @@ BEGIN;
(1 row)
-- we can even run queries (sequentially) over the distributed table
SELECT * FROM dist_table;
key | value
@ -355,7 +354,6 @@ BEGIN;
(1 row)
CREATE TYPE test_type_3 AS (a int, b test_type, c issn);
CREATE TABLE t3 (a int, b test_type_3);
SELECT create_reference_table('t3');

View File

@ -194,7 +194,6 @@ SELECT create_distributed_table('test_table_rep_2', 'a');
(1 row)
-- 1PC should never use 2PC with rep > 1
SET citus.multi_shard_commit_protocol TO '1pc';
SET citus.multi_shard_modify_mode TO 'sequential';

View File

@ -280,7 +280,6 @@ FROM
WHERE
users_table.user_id = events_table.user_id AND
event_type IN (1,2,3,4)
) as bar
WHERE foo.user_id = bar.user_id
ORDER BY 1 DESC;
@ -333,7 +332,6 @@ FROM
event_type IN (1,2,3,4)
ORDER BY 2,1
LIMIT 2
) as bar
WHERE foo.user_id = bar.user_id
ORDER BY 1 DESC LIMIT 5;
@ -522,7 +520,6 @@ FROM
WHERE
users_table.user_id = events_table.user_id AND
event_type IN (1,2,3,4)
) as bar
WHERE foo.user_id = bar.user_id
ORDER BY 1 DESC;

View File

@ -343,7 +343,6 @@ DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT foo.user_id, f
-- cursor test
BEGIN;
DECLARE recursive_subquery CURSOR FOR
SELECT
event_type, count(distinct value_2)
@ -384,7 +383,6 @@ DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT event_type, co
COMMIT;
-- cursor test with FETCH ALL
BEGIN;
DECLARE recursive_subquery CURSOR FOR
SELECT
event_type, count(distinct value_2)

View File

@ -628,7 +628,6 @@ DEBUG: Plan 65 query after replacing subqueries and CTEs: SELECT generate_serie
3
(3 rows)
-- Local tables also planned recursively, so using it as part of the FROM clause
-- make the clause recurring
CREATE TABLE local_table(id int, value_1 int);
@ -656,7 +655,6 @@ DEBUG: Plan 67 query after replacing subqueries and CTEs: SELECT id, value_1 FR
2 | 2
(2 rows)
-- Use local table in WHERE clause
SELECT
COUNT(*)

View File

@ -6,7 +6,6 @@ SET search_path TO subquery_and_partitioning, public;
CREATE TABLE users_table_local AS SELECT * FROM users_table;
CREATE TABLE events_table_local AS SELECT * FROM events_table;
CREATE TABLE partitioning_test(id int, value_1 int, time date) PARTITION BY RANGE (time);
-- create its partitions
CREATE TABLE partitioning_test_2017 PARTITION OF partitioning_test FOR VALUES FROM ('2017-01-01') TO ('2018-01-01');
CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01');

View File

@ -151,7 +151,6 @@ DEBUG: Plan 24 query after replacing subqueries and CTEs: UPDATE with_dml.secon
UPDATE
second_distributed_table
SET dept =
(SELECT DISTINCT tenant_id::int FROM distributed_table WHERE tenant_id = '9')
WHERE dept = 8;
DEBUG: generating subplan 26_1 for subquery SELECT DISTINCT (tenant_id)::integer AS tenant_id FROM with_dml.distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) '9'::text)

View File

@ -81,7 +81,6 @@ ORDER BY
1 | 6272
(5 rows)
-- Subqueries in WHERE and FROM are mixed
-- In this query, only subquery in WHERE is not a colocated join
-- but we're able to recursively plan that as well

View File

@ -92,7 +92,6 @@ WITH users_events AS (
GROUP BY
users_table.user_id,
events_table.event_type
)
SELECT
uid, event_type, value_2, value_3

View File

@ -4,7 +4,6 @@ SET citus.shard_replication_factor TO 1;
CREATE TABLE with_partitioning.local_users_2 (user_id int, event_type int);
INSERT INTO local_users_2 VALUES (0, 0), (1, 4), (1, 7), (2, 1), (3, 3), (5, 4), (6, 2), (10, 7);
CREATE TABLE with_partitioning.partitioning_test(id int, time date) PARTITION BY RANGE (time);
-- create its partitions
CREATE TABLE with_partitioning.partitioning_test_2017 PARTITION OF partitioning_test FOR VALUES FROM ('2017-01-01') TO ('2018-01-01');
CREATE TABLE with_partitioning.partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01');

View File

@ -22,7 +22,6 @@ CREATE TABLE lineitem_hash (
l_shipmode char(10) not null,
l_comment varchar(44) not null,
PRIMARY KEY(l_orderkey, l_linenumber) );
SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
create_distributed_table
--------------------------

View File

@ -101,4 +101,3 @@ def initialize_citus_cluster(old_bindir, old_datadir, settings):
start_databases(old_bindir, old_datadir)
create_citus_extension(old_bindir)
add_workers(old_bindir)