mirror of https://github.com/citusdata/citus.git
Test more reference/local cases, also ALTER ROLE
Test ALTER ROLE doesn't deadlock when coordinator added, or propagate from mx workers Consolidate wait_until_metadata_sync & verify_metadata to multi_test_helperspull/3220/head
parent
3433fd0068
commit
5a17fd6d9d
|
@ -71,23 +71,6 @@ CitusExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||||
{
|
{
|
||||||
PlannedStmt *plannedStmt = queryDesc->plannedstmt;
|
PlannedStmt *plannedStmt = queryDesc->plannedstmt;
|
||||||
|
|
||||||
if (CitusHasBeenLoaded())
|
|
||||||
{
|
|
||||||
if (IsLocalReferenceTableJoinPlan(plannedStmt) &&
|
|
||||||
IsMultiStatementTransaction())
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Currently we don't support this to avoid problems with tuple
|
|
||||||
* visibility, locking, etc. For example, change to the reference
|
|
||||||
* table can go through a MultiConnection, which won't be visible
|
|
||||||
* to the locally planned queries.
|
|
||||||
*/
|
|
||||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
||||||
errmsg("cannot join local tables and reference tables in "
|
|
||||||
"a transaction block")));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We cannot modify XactReadOnly on Windows because it is not
|
* We cannot modify XactReadOnly on Windows because it is not
|
||||||
* declared with PGDLLIMPORT.
|
* declared with PGDLLIMPORT.
|
||||||
|
@ -143,6 +126,23 @@ CitusExecutorRun(QueryDesc *queryDesc,
|
||||||
FunctionCallLevel++;
|
FunctionCallLevel++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (CitusHasBeenLoaded())
|
||||||
|
{
|
||||||
|
if (IsLocalReferenceTableJoinPlan(queryDesc->plannedstmt) &&
|
||||||
|
IsMultiStatementTransaction())
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Currently we don't support this to avoid problems with tuple
|
||||||
|
* visibility, locking, etc. For example, change to the reference
|
||||||
|
* table can go through a MultiConnection, which won't be visible
|
||||||
|
* to the locally planned queries.
|
||||||
|
*/
|
||||||
|
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||||
|
errmsg("cannot join local tables and reference tables in "
|
||||||
|
"a transaction block")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable execution of ALTER TABLE constraint validation queries. These
|
* Disable execution of ALTER TABLE constraint validation queries. These
|
||||||
* constraints will be validated in worker nodes, so running these queries
|
* constraints will be validated in worker nodes, so running these queries
|
||||||
|
|
|
@ -3,5 +3,6 @@
|
||||||
# ----------
|
# ----------
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
test: multi_test_helpers multi_create_fdw
|
test: multi_test_helpers multi_create_fdw
|
||||||
|
test: multi_test_catalog_views
|
||||||
test: multi_create_table multi_behavioral_analytics_create_table
|
test: multi_create_table multi_behavioral_analytics_create_table
|
||||||
test: multi_load_data
|
test: multi_load_data
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
# The basic tests runs analyze which depends on shard numbers
|
# The basic tests runs analyze which depends on shard numbers
|
||||||
|
test: multi_test_helpers
|
||||||
|
test: multi_test_catalog_views
|
||||||
test: upgrade_basic_before
|
test: upgrade_basic_before
|
||||||
test: upgrade_type_before upgrade_ref2ref_before upgrade_distributed_function_before
|
test: upgrade_type_before upgrade_ref2ref_before upgrade_distributed_function_before
|
||||||
|
|
|
@ -13,19 +13,6 @@ CREATE SCHEMA function_tests AUTHORIZATION functionuser;
|
||||||
CREATE SCHEMA function_tests2 AUTHORIZATION functionuser;
|
CREATE SCHEMA function_tests2 AUTHORIZATION functionuser;
|
||||||
SET search_path TO function_tests;
|
SET search_path TO function_tests;
|
||||||
SET citus.shard_count TO 4;
|
SET citus.shard_count TO 4;
|
||||||
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
|
||||||
SELECT pg_reload_conf();
|
|
||||||
pg_reload_conf
|
|
||||||
----------------
|
|
||||||
t
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
|
||||||
RETURNS void
|
|
||||||
LANGUAGE C STRICT
|
|
||||||
AS 'citus';
|
|
||||||
-- Create and distribute a simple function
|
-- Create and distribute a simple function
|
||||||
CREATE FUNCTION add(integer, integer) RETURNS integer
|
CREATE FUNCTION add(integer, integer) RETURNS integer
|
||||||
AS 'select $1 + $2;'
|
AS 'select $1 + $2;'
|
||||||
|
@ -624,7 +611,7 @@ SELECT create_distributed_function('add_with_param_names(int, int)', '$1', coloc
|
||||||
ERROR: cannot colocate function "add_with_param_names" and table "replicated_table_func_test"
|
ERROR: cannot colocate function "add_with_param_names" and table "replicated_table_func_test"
|
||||||
DETAIL: Citus currently only supports colocating function with distributed tables that are created using streaming replication model.
|
DETAIL: Citus currently only supports colocating function with distributed tables that are created using streaming replication model.
|
||||||
HINT: When distributing tables make sure that citus.replication_model = 'streaming'
|
HINT: When distributing tables make sure that citus.replication_model = 'streaming'
|
||||||
SELECT wait_until_metadata_sync();
|
SELECT public.wait_until_metadata_sync();
|
||||||
wait_until_metadata_sync
|
wait_until_metadata_sync
|
||||||
--------------------------
|
--------------------------
|
||||||
|
|
||||||
|
@ -752,7 +739,7 @@ SELECT create_distributed_function('add_with_param_names(int, int)', 'val1');
|
||||||
ERROR: cannot distribute the function "add_with_param_names" since there is no table to colocate with
|
ERROR: cannot distribute the function "add_with_param_names" since there is no table to colocate with
|
||||||
HINT: Provide a distributed table via "colocate_with" option to create_distributed_function()
|
HINT: Provide a distributed table via "colocate_with" option to create_distributed_function()
|
||||||
-- sync metadata to workers for consistent results when clearing objects
|
-- sync metadata to workers for consistent results when clearing objects
|
||||||
SELECT wait_until_metadata_sync();
|
SELECT public.wait_until_metadata_sync();
|
||||||
wait_until_metadata_sync
|
wait_until_metadata_sync
|
||||||
--------------------------
|
--------------------------
|
||||||
|
|
||||||
|
|
|
@ -5,18 +5,6 @@
|
||||||
--
|
--
|
||||||
SET citus.next_shard_id TO 1420000;
|
SET citus.next_shard_id TO 1420000;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
|
||||||
SELECT pg_reload_conf();
|
|
||||||
pg_reload_conf
|
|
||||||
----------------
|
|
||||||
t
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
CREATE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
|
||||||
RETURNS void
|
|
||||||
LANGUAGE C STRICT
|
|
||||||
AS 'citus';
|
|
||||||
CREATE TABLE test (id integer, val integer);
|
CREATE TABLE test (id integer, val integer);
|
||||||
SELECT create_distributed_table('test', 'id');
|
SELECT create_distributed_table('test', 'id');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
@ -433,7 +421,7 @@ INSERT INTO full_access_user_schema.t1 VALUES (1),(2),(3);
|
||||||
-- not allowed to create a table
|
-- not allowed to create a table
|
||||||
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
|
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
|
||||||
ERROR: permission denied for schema full_access_user_schema
|
ERROR: permission denied for schema full_access_user_schema
|
||||||
CONTEXT: while executing command on localhost:57637
|
CONTEXT: while executing command on localhost:57638
|
||||||
RESET ROLE;
|
RESET ROLE;
|
||||||
SET ROLE usage_access;
|
SET ROLE usage_access;
|
||||||
CREATE TYPE usage_access_type AS ENUM ('a', 'b');
|
CREATE TYPE usage_access_type AS ENUM ('a', 'b');
|
||||||
|
@ -684,7 +672,7 @@ ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_0000
|
||||||
-- different user should not be able to fetch partition file
|
-- different user should not be able to fetch partition file
|
||||||
SET ROLE usage_access;
|
SET ROLE usage_access;
|
||||||
SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port);
|
SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port);
|
||||||
WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.37457": No such file or directory
|
WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.44518": No such file or directory
|
||||||
CONTEXT: while executing command on localhost:57637
|
CONTEXT: while executing command on localhost:57637
|
||||||
ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:57637
|
ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:57637
|
||||||
-- only the user whom created the files should be able to fetch
|
-- only the user whom created the files should be able to fetch
|
||||||
|
@ -723,7 +711,7 @@ RESET ROLE;
|
||||||
-- test that the super user is unable to read the contents of the intermediate file,
|
-- test that the super user is unable to read the contents of the intermediate file,
|
||||||
-- although it does create the table
|
-- although it does create the table
|
||||||
SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']);
|
SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']);
|
||||||
WARNING: Task file "task_000001.36145" does not have expected suffix ".10"
|
WARNING: Task file "task_000001.43115" does not have expected suffix ".10"
|
||||||
worker_merge_files_into_table
|
worker_merge_files_into_table
|
||||||
-------------------------------
|
-------------------------------
|
||||||
|
|
||||||
|
@ -765,7 +753,7 @@ SELECT worker_merge_files_and_run_query(42, 1,
|
||||||
'CREATE TABLE task_000001_merge(merge_column_0 int)',
|
'CREATE TABLE task_000001_merge(merge_column_0 int)',
|
||||||
'CREATE TABLE task_000001 (a) AS SELECT sum(merge_column_0) FROM task_000001_merge'
|
'CREATE TABLE task_000001 (a) AS SELECT sum(merge_column_0) FROM task_000001_merge'
|
||||||
);
|
);
|
||||||
WARNING: Task file "task_000001.36145" does not have expected suffix ".10"
|
WARNING: Task file "task_000001.43115" does not have expected suffix ".10"
|
||||||
worker_merge_files_and_run_query
|
worker_merge_files_and_run_query
|
||||||
----------------------------------
|
----------------------------------
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,16 @@ SET citus.next_shard_id TO 7000000;
|
||||||
SET citus.next_placement_id TO 7000000;
|
SET citus.next_placement_id TO 7000000;
|
||||||
SET citus.replication_model TO streaming;
|
SET citus.replication_model TO streaming;
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
|
CREATE USER reprefuser WITH LOGIN;
|
||||||
|
SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN');
|
||||||
|
run_command_on_workers
|
||||||
|
-----------------------------------
|
||||||
|
(localhost,57637,t,"CREATE ROLE")
|
||||||
|
(localhost,57638,t,"CREATE ROLE")
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SET citus.enable_alter_role_propagation TO ON;
|
||||||
|
ALTER ROLE reprefuser WITH CREATEDB;
|
||||||
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
||||||
?column?
|
?column?
|
||||||
----------
|
----------
|
||||||
|
@ -33,6 +43,32 @@ SELECT create_reference_table('ref');
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- alter role from mx worker isn't propagated
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SET citus.enable_alter_role_propagation TO ON;
|
||||||
|
ALTER ROLE reprefuser WITH CREATEROLE;
|
||||||
|
select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser';
|
||||||
|
rolcreatedb | rolcreaterole
|
||||||
|
-------------+---------------
|
||||||
|
t | t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser';
|
||||||
|
rolcreatedb | rolcreaterole
|
||||||
|
-------------+---------------
|
||||||
|
t | f
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO mx_add_coordinator,public;
|
||||||
|
SET client_min_messages TO WARNING;
|
||||||
|
select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser';
|
||||||
|
rolcreatedb | rolcreaterole
|
||||||
|
-------------+---------------
|
||||||
|
t | f
|
||||||
|
(1 row)
|
||||||
|
|
||||||
SET citus.log_local_commands TO ON;
|
SET citus.log_local_commands TO ON;
|
||||||
SET client_min_messages TO DEBUG;
|
SET client_min_messages TO DEBUG;
|
||||||
-- if the placement policy is not round-robin, SELECTs on the reference
|
-- if the placement policy is not round-robin, SELECTs on the reference
|
||||||
|
@ -99,6 +135,11 @@ SET search_path TO mx_add_coordinator,public;
|
||||||
INSERT INTO ref VALUES (1), (2), (3);
|
INSERT INTO ref VALUES (1), (2), (3);
|
||||||
UPDATE ref SET a = a + 1;
|
UPDATE ref SET a = a + 1;
|
||||||
DELETE FROM ref WHERE a > 3;
|
DELETE FROM ref WHERE a > 3;
|
||||||
|
-- Test we don't allow reference/local joins on mx workers
|
||||||
|
CREATE TABLE local_table (a int);
|
||||||
|
INSERT INTO local_table VALUES (2), (4);
|
||||||
|
SELECT r.a FROM ref r JOIN local_table lt on r.a = lt.a;
|
||||||
|
ERROR: relation local_table is not distributed
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET search_path TO mx_add_coordinator,public;
|
SET search_path TO mx_add_coordinator,public;
|
||||||
SELECT * FROM ref ORDER BY a;
|
SELECT * FROM ref ORDER BY a;
|
||||||
|
|
|
@ -8,45 +8,6 @@ SELECT nextval('pg_catalog.pg_dist_shardid_seq') AS last_shard_id \gset
|
||||||
SET citus.replication_model TO streaming;
|
SET citus.replication_model TO streaming;
|
||||||
SET citus.shard_count TO 8;
|
SET citus.shard_count TO 8;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
|
||||||
SELECT pg_reload_conf();
|
|
||||||
pg_reload_conf
|
|
||||||
----------------
|
|
||||||
t
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
CREATE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
|
||||||
RETURNS void
|
|
||||||
LANGUAGE C STRICT
|
|
||||||
AS 'citus';
|
|
||||||
-- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator
|
|
||||||
CREATE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636)
|
|
||||||
RETURNS BOOLEAN
|
|
||||||
LANGUAGE sql
|
|
||||||
AS $$
|
|
||||||
SELECT wait_until_metadata_sync();
|
|
||||||
WITH dist_node_summary AS (
|
|
||||||
SELECT 'SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport, isactive) ORDER BY nodeid) FROM pg_dist_node' as query
|
|
||||||
), dist_node_check AS (
|
|
||||||
SELECT count(distinct result) = 1 AS matches
|
|
||||||
FROM dist_node_summary CROSS JOIN LATERAL
|
|
||||||
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
|
|
||||||
ARRAY[dist_node_summary.query, dist_node_summary.query],
|
|
||||||
false)
|
|
||||||
), dist_placement_summary AS (
|
|
||||||
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement)' AS query
|
|
||||||
), dist_placement_check AS (
|
|
||||||
SELECT count(distinct result) = 1 AS matches
|
|
||||||
FROM dist_placement_summary CROSS JOIN LATERAL
|
|
||||||
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
|
|
||||||
ARRAY[dist_placement_summary.query, dist_placement_summary.query],
|
|
||||||
false)
|
|
||||||
)
|
|
||||||
SELECT dist_node_check.matches AND dist_placement_check.matches
|
|
||||||
FROM dist_node_check CROSS JOIN dist_placement_check
|
|
||||||
$$;
|
|
||||||
-- Simulates a readonly node by setting default_transaction_read_only.
|
-- Simulates a readonly node by setting default_transaction_read_only.
|
||||||
CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN)
|
CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN)
|
||||||
RETURNS TEXT
|
RETURNS TEXT
|
||||||
|
|
|
@ -0,0 +1,104 @@
|
||||||
|
-- The following views are intended as alternatives to \d commands, whose
|
||||||
|
-- output changed in PostgreSQL 10. In particular, they must be used any time
|
||||||
|
-- a test wishes to print out the structure of a relation, which previously
|
||||||
|
-- was safely accomplished by a \d invocation.
|
||||||
|
SELECT run_command_on_master_and_workers(
|
||||||
|
$desc_views$
|
||||||
|
CREATE VIEW table_fkey_cols AS
|
||||||
|
SELECT rc.constraint_name AS "name",
|
||||||
|
kcu.column_name AS "column_name",
|
||||||
|
uc_kcu.column_name AS "refd_column_name",
|
||||||
|
format('%I.%I', kcu.table_schema, kcu.table_name)::regclass::oid AS relid,
|
||||||
|
format('%I.%I', uc_kcu.table_schema, uc_kcu.table_name)::regclass::oid AS refd_relid,
|
||||||
|
rc.constraint_schema AS "schema"
|
||||||
|
FROM information_schema.referential_constraints rc,
|
||||||
|
information_schema.key_column_usage kcu,
|
||||||
|
information_schema.key_column_usage uc_kcu
|
||||||
|
WHERE rc.constraint_schema = kcu.constraint_schema AND
|
||||||
|
rc.constraint_name = kcu.constraint_name AND
|
||||||
|
rc.unique_constraint_schema = uc_kcu.constraint_schema AND
|
||||||
|
rc.unique_constraint_name = uc_kcu.constraint_name;
|
||||||
|
|
||||||
|
CREATE VIEW table_fkeys AS
|
||||||
|
SELECT name AS "Constraint",
|
||||||
|
format('FOREIGN KEY (%s) REFERENCES %s(%s)',
|
||||||
|
string_agg(DISTINCT quote_ident(column_name), ', '),
|
||||||
|
string_agg(DISTINCT refd_relid::regclass::text, ', '),
|
||||||
|
string_agg(DISTINCT quote_ident(refd_column_name), ', ')) AS "Definition",
|
||||||
|
"relid"
|
||||||
|
FROM table_fkey_cols
|
||||||
|
GROUP BY (name, relid);
|
||||||
|
|
||||||
|
CREATE VIEW table_attrs AS
|
||||||
|
SELECT c.column_name AS "name",
|
||||||
|
c.data_type AS "type",
|
||||||
|
CASE
|
||||||
|
WHEN character_maximum_length IS NOT NULL THEN
|
||||||
|
format('(%s)', character_maximum_length)
|
||||||
|
WHEN data_type = 'numeric' AND numeric_precision IS NOT NULL THEN
|
||||||
|
format('(%s,%s)', numeric_precision, numeric_scale)
|
||||||
|
ELSE ''
|
||||||
|
END AS "modifier",
|
||||||
|
c.column_default AS "default",
|
||||||
|
(NOT c.is_nullable::boolean) AS "notnull",
|
||||||
|
format('%I.%I', c.table_schema, c.table_name)::regclass::oid AS "relid"
|
||||||
|
FROM information_schema.columns AS c
|
||||||
|
ORDER BY ordinal_position;
|
||||||
|
|
||||||
|
CREATE VIEW table_desc AS
|
||||||
|
SELECT "name" AS "Column",
|
||||||
|
"type" || "modifier" AS "Type",
|
||||||
|
rtrim((
|
||||||
|
CASE "notnull"
|
||||||
|
WHEN true THEN 'not null '
|
||||||
|
ELSE ''
|
||||||
|
END
|
||||||
|
) || (
|
||||||
|
CASE WHEN "default" IS NULL THEN ''
|
||||||
|
ELSE 'default ' || "default"
|
||||||
|
END
|
||||||
|
)) AS "Modifiers",
|
||||||
|
"relid"
|
||||||
|
FROM table_attrs;
|
||||||
|
|
||||||
|
CREATE VIEW table_checks AS
|
||||||
|
SELECT cc.constraint_name AS "Constraint",
|
||||||
|
('CHECK ' || regexp_replace(check_clause, '^\((.*)\)$', '\1')) AS "Definition",
|
||||||
|
format('%I.%I', ccu.table_schema, ccu.table_name)::regclass::oid AS relid
|
||||||
|
FROM information_schema.check_constraints cc,
|
||||||
|
information_schema.constraint_column_usage ccu
|
||||||
|
WHERE cc.constraint_schema = ccu.constraint_schema AND
|
||||||
|
cc.constraint_name = ccu.constraint_name
|
||||||
|
ORDER BY cc.constraint_name ASC;
|
||||||
|
|
||||||
|
CREATE VIEW index_attrs AS
|
||||||
|
WITH indexoid AS (
|
||||||
|
SELECT c.oid,
|
||||||
|
n.nspname,
|
||||||
|
c.relname
|
||||||
|
FROM pg_catalog.pg_class c
|
||||||
|
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
||||||
|
WHERE pg_catalog.pg_table_is_visible(c.oid)
|
||||||
|
ORDER BY 2, 3
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
indexoid.nspname AS "nspname",
|
||||||
|
indexoid.relname AS "relname",
|
||||||
|
a.attrelid AS "relid",
|
||||||
|
a.attname AS "Column",
|
||||||
|
pg_catalog.format_type(a.atttypid, a.atttypmod) AS "Type",
|
||||||
|
pg_catalog.pg_get_indexdef(a.attrelid, a.attnum, TRUE) AS "Definition"
|
||||||
|
FROM pg_catalog.pg_attribute a
|
||||||
|
LEFT JOIN indexoid ON (a.attrelid = indexoid.oid)
|
||||||
|
WHERE true
|
||||||
|
AND a.attnum > 0
|
||||||
|
AND NOT a.attisdropped
|
||||||
|
ORDER BY a.attrelid, a.attnum;
|
||||||
|
|
||||||
|
$desc_views$
|
||||||
|
);
|
||||||
|
run_command_on_master_and_workers
|
||||||
|
-----------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
|
@ -6,110 +6,6 @@ BEGIN
|
||||||
EXECUTE p_sql;
|
EXECUTE p_sql;
|
||||||
PERFORM run_command_on_workers(p_sql);
|
PERFORM run_command_on_workers(p_sql);
|
||||||
END;$$;
|
END;$$;
|
||||||
-- The following views are intended as alternatives to \d commands, whose
|
|
||||||
-- output changed in PostgreSQL 10. In particular, they must be used any time
|
|
||||||
-- a test wishes to print out the structure of a relation, which previously
|
|
||||||
-- was safely accomplished by a \d invocation.
|
|
||||||
SELECT run_command_on_master_and_workers(
|
|
||||||
$desc_views$
|
|
||||||
CREATE VIEW table_fkey_cols AS
|
|
||||||
SELECT rc.constraint_name AS "name",
|
|
||||||
kcu.column_name AS "column_name",
|
|
||||||
uc_kcu.column_name AS "refd_column_name",
|
|
||||||
format('%I.%I', kcu.table_schema, kcu.table_name)::regclass::oid AS relid,
|
|
||||||
format('%I.%I', uc_kcu.table_schema, uc_kcu.table_name)::regclass::oid AS refd_relid,
|
|
||||||
rc.constraint_schema AS "schema"
|
|
||||||
FROM information_schema.referential_constraints rc,
|
|
||||||
information_schema.key_column_usage kcu,
|
|
||||||
information_schema.key_column_usage uc_kcu
|
|
||||||
WHERE rc.constraint_schema = kcu.constraint_schema AND
|
|
||||||
rc.constraint_name = kcu.constraint_name AND
|
|
||||||
rc.unique_constraint_schema = uc_kcu.constraint_schema AND
|
|
||||||
rc.unique_constraint_name = uc_kcu.constraint_name;
|
|
||||||
|
|
||||||
CREATE VIEW table_fkeys AS
|
|
||||||
SELECT name AS "Constraint",
|
|
||||||
format('FOREIGN KEY (%s) REFERENCES %s(%s)',
|
|
||||||
string_agg(DISTINCT quote_ident(column_name), ', '),
|
|
||||||
string_agg(DISTINCT refd_relid::regclass::text, ', '),
|
|
||||||
string_agg(DISTINCT quote_ident(refd_column_name), ', ')) AS "Definition",
|
|
||||||
"relid"
|
|
||||||
FROM table_fkey_cols
|
|
||||||
GROUP BY (name, relid);
|
|
||||||
|
|
||||||
CREATE VIEW table_attrs AS
|
|
||||||
SELECT c.column_name AS "name",
|
|
||||||
c.data_type AS "type",
|
|
||||||
CASE
|
|
||||||
WHEN character_maximum_length IS NOT NULL THEN
|
|
||||||
format('(%s)', character_maximum_length)
|
|
||||||
WHEN data_type = 'numeric' AND numeric_precision IS NOT NULL THEN
|
|
||||||
format('(%s,%s)', numeric_precision, numeric_scale)
|
|
||||||
ELSE ''
|
|
||||||
END AS "modifier",
|
|
||||||
c.column_default AS "default",
|
|
||||||
(NOT c.is_nullable::boolean) AS "notnull",
|
|
||||||
format('%I.%I', c.table_schema, c.table_name)::regclass::oid AS "relid"
|
|
||||||
FROM information_schema.columns AS c
|
|
||||||
ORDER BY ordinal_position;
|
|
||||||
|
|
||||||
CREATE VIEW table_desc AS
|
|
||||||
SELECT "name" AS "Column",
|
|
||||||
"type" || "modifier" AS "Type",
|
|
||||||
rtrim((
|
|
||||||
CASE "notnull"
|
|
||||||
WHEN true THEN 'not null '
|
|
||||||
ELSE ''
|
|
||||||
END
|
|
||||||
) || (
|
|
||||||
CASE WHEN "default" IS NULL THEN ''
|
|
||||||
ELSE 'default ' || "default"
|
|
||||||
END
|
|
||||||
)) AS "Modifiers",
|
|
||||||
"relid"
|
|
||||||
FROM table_attrs;
|
|
||||||
|
|
||||||
CREATE VIEW table_checks AS
|
|
||||||
SELECT cc.constraint_name AS "Constraint",
|
|
||||||
('CHECK ' || regexp_replace(check_clause, '^\((.*)\)$', '\1')) AS "Definition",
|
|
||||||
format('%I.%I', ccu.table_schema, ccu.table_name)::regclass::oid AS relid
|
|
||||||
FROM information_schema.check_constraints cc,
|
|
||||||
information_schema.constraint_column_usage ccu
|
|
||||||
WHERE cc.constraint_schema = ccu.constraint_schema AND
|
|
||||||
cc.constraint_name = ccu.constraint_name
|
|
||||||
ORDER BY cc.constraint_name ASC;
|
|
||||||
|
|
||||||
CREATE VIEW index_attrs AS
|
|
||||||
WITH indexoid AS (
|
|
||||||
SELECT c.oid,
|
|
||||||
n.nspname,
|
|
||||||
c.relname
|
|
||||||
FROM pg_catalog.pg_class c
|
|
||||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
|
||||||
WHERE pg_catalog.pg_table_is_visible(c.oid)
|
|
||||||
ORDER BY 2, 3
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
indexoid.nspname AS "nspname",
|
|
||||||
indexoid.relname AS "relname",
|
|
||||||
a.attrelid AS "relid",
|
|
||||||
a.attname AS "Column",
|
|
||||||
pg_catalog.format_type(a.atttypid, a.atttypmod) AS "Type",
|
|
||||||
pg_catalog.pg_get_indexdef(a.attrelid, a.attnum, TRUE) AS "Definition"
|
|
||||||
FROM pg_catalog.pg_attribute a
|
|
||||||
LEFT JOIN indexoid ON (a.attrelid = indexoid.oid)
|
|
||||||
WHERE true
|
|
||||||
AND a.attnum > 0
|
|
||||||
AND NOT a.attisdropped
|
|
||||||
ORDER BY a.attrelid, a.attnum;
|
|
||||||
|
|
||||||
$desc_views$
|
|
||||||
);
|
|
||||||
run_command_on_master_and_workers
|
|
||||||
-----------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-- Create a function to make sure that queries returning the same result
|
-- Create a function to make sure that queries returning the same result
|
||||||
CREATE FUNCTION raise_failed_execution(query text) RETURNS void AS $$
|
CREATE FUNCTION raise_failed_execution(query text) RETURNS void AS $$
|
||||||
BEGIN
|
BEGIN
|
||||||
|
@ -170,3 +66,42 @@ BEGIN
|
||||||
RETURN true;
|
RETURN true;
|
||||||
END;
|
END;
|
||||||
$func$;
|
$func$;
|
||||||
|
CREATE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||||
|
RETURNS void
|
||||||
|
LANGUAGE C STRICT
|
||||||
|
AS 'citus';
|
||||||
|
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
|
||||||
|
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
||||||
|
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
pg_reload_conf
|
||||||
|
----------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator
|
||||||
|
CREATE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636)
|
||||||
|
RETURNS BOOLEAN
|
||||||
|
LANGUAGE sql
|
||||||
|
AS $$
|
||||||
|
SELECT wait_until_metadata_sync();
|
||||||
|
WITH dist_node_summary AS (
|
||||||
|
SELECT 'SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport, isactive) ORDER BY nodeid) FROM pg_dist_node' as query
|
||||||
|
), dist_node_check AS (
|
||||||
|
SELECT count(distinct result) = 1 AS matches
|
||||||
|
FROM dist_node_summary CROSS JOIN LATERAL
|
||||||
|
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
|
||||||
|
ARRAY[dist_node_summary.query, dist_node_summary.query],
|
||||||
|
false)
|
||||||
|
), dist_placement_summary AS (
|
||||||
|
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement)' AS query
|
||||||
|
), dist_placement_check AS (
|
||||||
|
SELECT count(distinct result) = 1 AS matches
|
||||||
|
FROM dist_placement_summary CROSS JOIN LATERAL
|
||||||
|
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
|
||||||
|
ARRAY[dist_placement_summary.query, dist_placement_summary.query],
|
||||||
|
false)
|
||||||
|
)
|
||||||
|
SELECT dist_node_check.matches AND dist_placement_check.matches
|
||||||
|
FROM dist_node_check CROSS JOIN dist_placement_check
|
||||||
|
$$;
|
||||||
|
|
|
@ -113,11 +113,70 @@ SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1
|
||||||
20 | 20
|
20 | 20
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- test non equijoin
|
||||||
|
SELECT lt.a, sq.a, sq.b
|
||||||
|
FROM local_table lt
|
||||||
|
JOIN squares sq ON sq.a > lt.a and sq.b > 90
|
||||||
|
ORDER BY 1,2,3;
|
||||||
|
a | a | b
|
||||||
|
---+----+-----
|
||||||
|
2 | 10 | 100
|
||||||
|
4 | 10 | 100
|
||||||
|
7 | 10 | 100
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
-- error if in transaction block
|
-- error if in transaction block
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
||||||
ERROR: cannot join local tables and reference tables in a transaction block
|
ERROR: cannot join local tables and reference tables in a transaction block
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
|
-- error if in a DO block
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
PERFORM local_table.a, numbers.a FROM local_table NATURAL JOIN numbers;
|
||||||
|
END;
|
||||||
|
$$;
|
||||||
|
ERROR: cannot join local tables and reference tables in a transaction block
|
||||||
|
CONTEXT: SQL statement "SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers"
|
||||||
|
PL/pgSQL function inline_code_block line 3 at PERFORM
|
||||||
|
-- test plpgsql function
|
||||||
|
CREATE FUNCTION test_reference_local_join_plpgsql_func()
|
||||||
|
RETURNS void AS $$
|
||||||
|
BEGIN
|
||||||
|
INSERT INTO local_table VALUES (21);
|
||||||
|
INSERT INTO numbers VALUES (4);
|
||||||
|
PERFORM local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
||||||
|
RAISE EXCEPTION '';
|
||||||
|
PERFORM local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
SELECT test_reference_local_join_plpgsql_func();
|
||||||
|
LOG: executing the command locally: INSERT INTO replicate_ref_to_coordinator.numbers_8000001 (a) VALUES (4)
|
||||||
|
CONTEXT: SQL statement "INSERT INTO numbers VALUES (4)"
|
||||||
|
PL/pgSQL function test_reference_local_join_plpgsql_func() line 4 at SQL statement
|
||||||
|
ERROR: cannot join local tables and reference tables in a transaction block
|
||||||
|
CONTEXT: SQL statement "SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1"
|
||||||
|
PL/pgSQL function test_reference_local_join_plpgsql_func() line 5 at PERFORM
|
||||||
|
SELECT sum(a) FROM local_table;
|
||||||
|
sum
|
||||||
|
-----
|
||||||
|
33
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT sum(a) FROM numbers;
|
||||||
|
LOG: executing the command locally: SELECT sum(a) AS sum FROM replicate_ref_to_coordinator.numbers_8000001 numbers
|
||||||
|
sum
|
||||||
|
-----
|
||||||
|
41
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- error if in procedure's subtransaction
|
||||||
|
CREATE PROCEDURE test_reference_local_join_proc() AS $$
|
||||||
|
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
||||||
|
$$ LANGUAGE sql;
|
||||||
|
CALL test_reference_local_join_proc();
|
||||||
|
ERROR: cannot join local tables and reference tables in a transaction block
|
||||||
|
CONTEXT: SQL function "test_reference_local_join_proc" statement 1
|
||||||
-- error if in a transaction block even if reference table is not in search path
|
-- error if in a transaction block even if reference table is not in search path
|
||||||
CREATE SCHEMA s1;
|
CREATE SCHEMA s1;
|
||||||
CREATE TABLE s1.ref(a int);
|
CREATE TABLE s1.ref(a int);
|
||||||
|
@ -136,14 +195,17 @@ NOTICE: drop cascades to 2 other objects
|
||||||
DETAIL: drop cascades to table s1.ref
|
DETAIL: drop cascades to table s1.ref
|
||||||
drop cascades to table s1.ref_8000002
|
drop cascades to table s1.ref_8000002
|
||||||
-- shouldn't plan locally if modifications happen in CTEs, ...
|
-- shouldn't plan locally if modifications happen in CTEs, ...
|
||||||
WITH ins AS (INSERT INTO numbers VALUES (1) RETURNING *) SELECT * FROM numbers, local_table;
|
WITH ins AS (INSERT INTO numbers VALUES (1) RETURNING *)
|
||||||
|
SELECT * FROM numbers, local_table;
|
||||||
ERROR: relation local_table is not distributed
|
ERROR: relation local_table is not distributed
|
||||||
WITH t AS (SELECT *, random() x FROM numbers FOR UPDATE) SELECT * FROM numbers, local_table
|
WITH t AS (SELECT *, random() x FROM numbers FOR UPDATE)
|
||||||
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
SELECT * FROM numbers, local_table
|
||||||
|
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
||||||
ERROR: relation local_table is not distributed
|
ERROR: relation local_table is not distributed
|
||||||
-- but this should be fine
|
-- but this should be fine
|
||||||
WITH t AS (SELECT *, random() x FROM numbers) SELECT * FROM numbers, local_table
|
WITH t AS (SELECT *, random() x FROM numbers)
|
||||||
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
SELECT * FROM numbers, local_table
|
||||||
|
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
||||||
a | a
|
a | a
|
||||||
---+---
|
---+---
|
||||||
(0 rows)
|
(0 rows)
|
||||||
|
@ -156,21 +218,31 @@ SELECT create_distributed_table('dist', 'a');
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
WITH t AS (SELECT *, random() x FROM dist) SELECT * FROM numbers, local_table
|
INSERT INTO dist VALUES (20),(30);
|
||||||
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
WITH t AS (SELECT *, random() x FROM dist)
|
||||||
|
SELECT * FROM numbers, local_table
|
||||||
|
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
||||||
ERROR: relation local_table is not distributed
|
ERROR: relation local_table is not distributed
|
||||||
|
-- test CTE being reference/local join for distributed query
|
||||||
|
WITH t as (SELECT n.a, random() x FROM numbers n NATURAL JOIN local_table l)
|
||||||
|
SELECT a FROM t NATURAL JOIN dist;
|
||||||
|
a
|
||||||
|
----
|
||||||
|
20
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- error if FOR UPDATE/FOR SHARE
|
-- error if FOR UPDATE/FOR SHARE
|
||||||
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR SHARE;
|
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR SHARE;
|
||||||
ERROR: could not run distributed query with FOR UPDATE/SHARE commands
|
ERROR: could not run distributed query with FOR UPDATE/SHARE commands
|
||||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||||
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR UPDATE;
|
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR UPDATE;
|
||||||
ERROR: could not run distributed query with FOR UPDATE/SHARE commands
|
ERROR: could not run distributed query with FOR UPDATE/SHARE commands
|
||||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||||
-- clean-up
|
-- clean-up
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA replicate_ref_to_coordinator CASCADE;
|
DROP SCHEMA replicate_ref_to_coordinator CASCADE;
|
||||||
-- Make sure the shard was dropped
|
-- Make sure the shard was dropped
|
||||||
SELECT 'numbers_8000001'::regclass::oid;
|
SELECT 'numbers_8000001'::regclass::oid;
|
||||||
ERROR: relation "numbers_8000001" does not exist
|
ERROR: relation "numbers_8000001" does not exist
|
||||||
LINE 1: SELECT 'numbers_8000001'::regclass::oid;
|
LINE 1: SELECT 'numbers_8000001'::regclass::oid;
|
||||||
^
|
^
|
||||||
|
|
|
@ -2,19 +2,6 @@ CREATE SCHEMA upgrade_distributed_function_before;
|
||||||
SET search_path TO upgrade_distributed_function_before, public;
|
SET search_path TO upgrade_distributed_function_before, public;
|
||||||
SET citus.replication_model TO streaming;
|
SET citus.replication_model TO streaming;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
|
||||||
SELECT pg_reload_conf();
|
|
||||||
pg_reload_conf
|
|
||||||
----------------
|
|
||||||
t
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
CREATE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
|
||||||
RETURNS void
|
|
||||||
LANGUAGE C STRICT
|
|
||||||
AS 'citus';
|
|
||||||
CREATE TABLE t1 (a int PRIMARY KEY, b int);
|
CREATE TABLE t1 (a int PRIMARY KEY, b int);
|
||||||
SELECT create_distributed_table('t1','a');
|
SELECT create_distributed_table('t1','a');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
|
|
@ -4,3 +4,4 @@ test: failure_test_helpers
|
||||||
# this should only be run by pg_regress_multi, you don't need it
|
# this should only be run by pg_regress_multi, you don't need it
|
||||||
test: failure_setup
|
test: failure_setup
|
||||||
test: multi_test_helpers
|
test: multi_test_helpers
|
||||||
|
test: multi_test_catalog_views
|
||||||
|
|
|
@ -4,6 +4,7 @@ test: failure_test_helpers
|
||||||
# this should only be run by pg_regress_multi, you don't need it
|
# this should only be run by pg_regress_multi, you don't need it
|
||||||
test: failure_setup
|
test: failure_setup
|
||||||
test: multi_test_helpers
|
test: multi_test_helpers
|
||||||
|
test: multi_test_catalog_views
|
||||||
test: failure_ddl
|
test: failure_ddl
|
||||||
test: failure_truncate
|
test: failure_truncate
|
||||||
test: failure_create_index_concurrently
|
test: failure_create_index_concurrently
|
||||||
|
|
|
@ -1,2 +1,3 @@
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
test: multi_test_helpers
|
test: multi_test_helpers
|
||||||
|
test: multi_test_catalog_views
|
||||||
|
|
|
@ -14,9 +14,10 @@
|
||||||
# Tests around schema changes, these are run first, so there's no preexisting objects.
|
# Tests around schema changes, these are run first, so there's no preexisting objects.
|
||||||
# ---
|
# ---
|
||||||
test: multi_extension
|
test: multi_extension
|
||||||
|
test: multi_test_helpers
|
||||||
test: multi_mx_node_metadata
|
test: multi_mx_node_metadata
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
test: multi_test_helpers
|
test: multi_test_catalog_views
|
||||||
|
|
||||||
# the following test has to be run sequentially
|
# the following test has to be run sequentially
|
||||||
test: multi_mx_create_table
|
test: multi_mx_create_table
|
||||||
|
|
|
@ -25,6 +25,7 @@ test: multi_cluster_management
|
||||||
test: alter_role_propagation
|
test: alter_role_propagation
|
||||||
test: propagate_extension_commands
|
test: propagate_extension_commands
|
||||||
test: multi_test_helpers
|
test: multi_test_helpers
|
||||||
|
test: multi_test_catalog_views
|
||||||
test: multi_table_ddl
|
test: multi_table_ddl
|
||||||
test: multi_name_lengths
|
test: multi_name_lengths
|
||||||
test: multi_name_resolution
|
test: multi_name_resolution
|
||||||
|
@ -267,6 +268,7 @@ test: multi_replicate_reference_table
|
||||||
test: multi_reference_table
|
test: multi_reference_table
|
||||||
test: foreign_key_to_reference_table
|
test: foreign_key_to_reference_table
|
||||||
test: replicate_reference_tables_to_coordinator
|
test: replicate_reference_tables_to_coordinator
|
||||||
|
|
||||||
test: remove_coordinator
|
test: remove_coordinator
|
||||||
|
|
||||||
# ----------
|
# ----------
|
||||||
|
|
|
@ -17,6 +17,7 @@ test: multi_extension
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
test: multi_table_ddl
|
test: multi_table_ddl
|
||||||
test: multi_test_helpers
|
test: multi_test_helpers
|
||||||
|
test: multi_test_catalog_views
|
||||||
|
|
||||||
# ----------
|
# ----------
|
||||||
# The following distributed tests depend on creating a partitioned table and
|
# The following distributed tests depend on creating a partitioned table and
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
# ----------
|
# ----------
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
test: multi_test_helpers
|
test: multi_test_helpers
|
||||||
|
test: multi_test_catalog_views
|
||||||
|
|
||||||
# the following test has to be run sequentially
|
# the following test has to be run sequentially
|
||||||
test: multi_mx_create_table
|
test: multi_mx_create_table
|
||||||
|
|
|
@ -9,16 +9,6 @@ CREATE SCHEMA function_tests2 AUTHORIZATION functionuser;
|
||||||
SET search_path TO function_tests;
|
SET search_path TO function_tests;
|
||||||
SET citus.shard_count TO 4;
|
SET citus.shard_count TO 4;
|
||||||
|
|
||||||
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
|
||||||
SELECT pg_reload_conf();
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
|
||||||
RETURNS void
|
|
||||||
LANGUAGE C STRICT
|
|
||||||
AS 'citus';
|
|
||||||
|
|
||||||
-- Create and distribute a simple function
|
-- Create and distribute a simple function
|
||||||
CREATE FUNCTION add(integer, integer) RETURNS integer
|
CREATE FUNCTION add(integer, integer) RETURNS integer
|
||||||
AS 'select $1 + $2;'
|
AS 'select $1 + $2;'
|
||||||
|
@ -361,7 +351,7 @@ SET citus.replication_model TO "statement";
|
||||||
SELECT create_distributed_table('replicated_table_func_test', 'a');
|
SELECT create_distributed_table('replicated_table_func_test', 'a');
|
||||||
SELECT create_distributed_function('add_with_param_names(int, int)', '$1', colocate_with:='replicated_table_func_test');
|
SELECT create_distributed_function('add_with_param_names(int, int)', '$1', colocate_with:='replicated_table_func_test');
|
||||||
|
|
||||||
SELECT wait_until_metadata_sync();
|
SELECT public.wait_until_metadata_sync();
|
||||||
|
|
||||||
-- a function can be colocated with a different distribution argument type
|
-- a function can be colocated with a different distribution argument type
|
||||||
-- as long as there is a coercion path
|
-- as long as there is a coercion path
|
||||||
|
@ -429,7 +419,7 @@ SET citus.shard_count TO 55;
|
||||||
SELECT create_distributed_function('add_with_param_names(int, int)', 'val1');
|
SELECT create_distributed_function('add_with_param_names(int, int)', 'val1');
|
||||||
|
|
||||||
-- sync metadata to workers for consistent results when clearing objects
|
-- sync metadata to workers for consistent results when clearing objects
|
||||||
SELECT wait_until_metadata_sync();
|
SELECT public.wait_until_metadata_sync();
|
||||||
|
|
||||||
|
|
||||||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||||
|
|
|
@ -8,15 +8,6 @@ SET citus.next_shard_id TO 1420000;
|
||||||
|
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
|
||||||
SELECT pg_reload_conf();
|
|
||||||
CREATE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
|
||||||
RETURNS void
|
|
||||||
LANGUAGE C STRICT
|
|
||||||
AS 'citus';
|
|
||||||
|
|
||||||
|
|
||||||
CREATE TABLE test (id integer, val integer);
|
CREATE TABLE test (id integer, val integer);
|
||||||
SELECT create_distributed_table('test', 'id');
|
SELECT create_distributed_table('test', 'id');
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,11 @@ SET citus.next_placement_id TO 7000000;
|
||||||
SET citus.replication_model TO streaming;
|
SET citus.replication_model TO streaming;
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
|
|
||||||
|
CREATE USER reprefuser WITH LOGIN;
|
||||||
|
SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN');
|
||||||
|
SET citus.enable_alter_role_propagation TO ON;
|
||||||
|
ALTER ROLE reprefuser WITH CREATEDB;
|
||||||
|
|
||||||
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
||||||
|
|
||||||
-- test that coordinator pg_dist_node entry is synced to the workers
|
-- test that coordinator pg_dist_node entry is synced to the workers
|
||||||
|
@ -18,6 +23,18 @@ SELECT verify_metadata('localhost', :worker_1_port),
|
||||||
CREATE TABLE ref(a int);
|
CREATE TABLE ref(a int);
|
||||||
SELECT create_reference_table('ref');
|
SELECT create_reference_table('ref');
|
||||||
|
|
||||||
|
-- alter role from mx worker isn't propagated
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SET citus.enable_alter_role_propagation TO ON;
|
||||||
|
ALTER ROLE reprefuser WITH CREATEROLE;
|
||||||
|
select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser';
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser';
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO mx_add_coordinator,public;
|
||||||
|
SET client_min_messages TO WARNING;
|
||||||
|
select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser';
|
||||||
|
|
||||||
SET citus.log_local_commands TO ON;
|
SET citus.log_local_commands TO ON;
|
||||||
SET client_min_messages TO DEBUG;
|
SET client_min_messages TO DEBUG;
|
||||||
|
|
||||||
|
@ -45,6 +62,13 @@ INSERT INTO ref VALUES (1), (2), (3);
|
||||||
UPDATE ref SET a = a + 1;
|
UPDATE ref SET a = a + 1;
|
||||||
DELETE FROM ref WHERE a > 3;
|
DELETE FROM ref WHERE a > 3;
|
||||||
|
|
||||||
|
-- Test we don't allow reference/local joins on mx workers
|
||||||
|
CREATE TABLE local_table (a int);
|
||||||
|
INSERT INTO local_table VALUES (2), (4);
|
||||||
|
|
||||||
|
SELECT r.a FROM ref r JOIN local_table lt on r.a = lt.a;
|
||||||
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET search_path TO mx_add_coordinator,public;
|
SET search_path TO mx_add_coordinator,public;
|
||||||
SELECT * FROM ref ORDER BY a;
|
SELECT * FROM ref ORDER BY a;
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
-- Tests related to distributed DDL commands on mx cluster
|
-- Tests related to distributed DDL commands on mx cluster
|
||||||
|
|
||||||
SELECT * FROM mx_ddl_table ORDER BY key;
|
SELECT * FROM mx_ddl_table ORDER BY key;
|
||||||
|
|
||||||
-- CREATE INDEX
|
-- CREATE INDEX
|
||||||
|
@ -18,7 +17,6 @@ UPDATE mx_ddl_table SET version=0.1 WHERE version IS NULL;
|
||||||
-- SET NOT NULL
|
-- SET NOT NULL
|
||||||
ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL;
|
ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL;
|
||||||
|
|
||||||
|
|
||||||
-- See that the changes are applied on coordinator, worker tables and shards
|
-- See that the changes are applied on coordinator, worker tables and shards
|
||||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
|
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||||
|
|
|
@ -12,43 +12,6 @@ SET citus.replication_model TO streaming;
|
||||||
SET citus.shard_count TO 8;
|
SET citus.shard_count TO 8;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
|
||||||
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
|
||||||
SELECT pg_reload_conf();
|
|
||||||
|
|
||||||
CREATE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
|
||||||
RETURNS void
|
|
||||||
LANGUAGE C STRICT
|
|
||||||
AS 'citus';
|
|
||||||
|
|
||||||
-- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator
|
|
||||||
CREATE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636)
|
|
||||||
RETURNS BOOLEAN
|
|
||||||
LANGUAGE sql
|
|
||||||
AS $$
|
|
||||||
SELECT wait_until_metadata_sync();
|
|
||||||
WITH dist_node_summary AS (
|
|
||||||
SELECT 'SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport, isactive) ORDER BY nodeid) FROM pg_dist_node' as query
|
|
||||||
), dist_node_check AS (
|
|
||||||
SELECT count(distinct result) = 1 AS matches
|
|
||||||
FROM dist_node_summary CROSS JOIN LATERAL
|
|
||||||
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
|
|
||||||
ARRAY[dist_node_summary.query, dist_node_summary.query],
|
|
||||||
false)
|
|
||||||
), dist_placement_summary AS (
|
|
||||||
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement)' AS query
|
|
||||||
), dist_placement_check AS (
|
|
||||||
SELECT count(distinct result) = 1 AS matches
|
|
||||||
FROM dist_placement_summary CROSS JOIN LATERAL
|
|
||||||
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
|
|
||||||
ARRAY[dist_placement_summary.query, dist_placement_summary.query],
|
|
||||||
false)
|
|
||||||
)
|
|
||||||
SELECT dist_node_check.matches AND dist_placement_check.matches
|
|
||||||
FROM dist_node_check CROSS JOIN dist_placement_check
|
|
||||||
$$;
|
|
||||||
|
|
||||||
-- Simulates a readonly node by setting default_transaction_read_only.
|
-- Simulates a readonly node by setting default_transaction_read_only.
|
||||||
CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN)
|
CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN)
|
||||||
RETURNS TEXT
|
RETURNS TEXT
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
-- The following views are intended as alternatives to \d commands, whose
|
||||||
|
-- output changed in PostgreSQL 10. In particular, they must be used any time
|
||||||
|
-- a test wishes to print out the structure of a relation, which previously
|
||||||
|
-- was safely accomplished by a \d invocation.
|
||||||
|
SELECT run_command_on_master_and_workers(
|
||||||
|
$desc_views$
|
||||||
|
CREATE VIEW table_fkey_cols AS
|
||||||
|
SELECT rc.constraint_name AS "name",
|
||||||
|
kcu.column_name AS "column_name",
|
||||||
|
uc_kcu.column_name AS "refd_column_name",
|
||||||
|
format('%I.%I', kcu.table_schema, kcu.table_name)::regclass::oid AS relid,
|
||||||
|
format('%I.%I', uc_kcu.table_schema, uc_kcu.table_name)::regclass::oid AS refd_relid,
|
||||||
|
rc.constraint_schema AS "schema"
|
||||||
|
FROM information_schema.referential_constraints rc,
|
||||||
|
information_schema.key_column_usage kcu,
|
||||||
|
information_schema.key_column_usage uc_kcu
|
||||||
|
WHERE rc.constraint_schema = kcu.constraint_schema AND
|
||||||
|
rc.constraint_name = kcu.constraint_name AND
|
||||||
|
rc.unique_constraint_schema = uc_kcu.constraint_schema AND
|
||||||
|
rc.unique_constraint_name = uc_kcu.constraint_name;
|
||||||
|
|
||||||
|
CREATE VIEW table_fkeys AS
|
||||||
|
SELECT name AS "Constraint",
|
||||||
|
format('FOREIGN KEY (%s) REFERENCES %s(%s)',
|
||||||
|
string_agg(DISTINCT quote_ident(column_name), ', '),
|
||||||
|
string_agg(DISTINCT refd_relid::regclass::text, ', '),
|
||||||
|
string_agg(DISTINCT quote_ident(refd_column_name), ', ')) AS "Definition",
|
||||||
|
"relid"
|
||||||
|
FROM table_fkey_cols
|
||||||
|
GROUP BY (name, relid);
|
||||||
|
|
||||||
|
CREATE VIEW table_attrs AS
|
||||||
|
SELECT c.column_name AS "name",
|
||||||
|
c.data_type AS "type",
|
||||||
|
CASE
|
||||||
|
WHEN character_maximum_length IS NOT NULL THEN
|
||||||
|
format('(%s)', character_maximum_length)
|
||||||
|
WHEN data_type = 'numeric' AND numeric_precision IS NOT NULL THEN
|
||||||
|
format('(%s,%s)', numeric_precision, numeric_scale)
|
||||||
|
ELSE ''
|
||||||
|
END AS "modifier",
|
||||||
|
c.column_default AS "default",
|
||||||
|
(NOT c.is_nullable::boolean) AS "notnull",
|
||||||
|
format('%I.%I', c.table_schema, c.table_name)::regclass::oid AS "relid"
|
||||||
|
FROM information_schema.columns AS c
|
||||||
|
ORDER BY ordinal_position;
|
||||||
|
|
||||||
|
CREATE VIEW table_desc AS
|
||||||
|
SELECT "name" AS "Column",
|
||||||
|
"type" || "modifier" AS "Type",
|
||||||
|
rtrim((
|
||||||
|
CASE "notnull"
|
||||||
|
WHEN true THEN 'not null '
|
||||||
|
ELSE ''
|
||||||
|
END
|
||||||
|
) || (
|
||||||
|
CASE WHEN "default" IS NULL THEN ''
|
||||||
|
ELSE 'default ' || "default"
|
||||||
|
END
|
||||||
|
)) AS "Modifiers",
|
||||||
|
"relid"
|
||||||
|
FROM table_attrs;
|
||||||
|
|
||||||
|
CREATE VIEW table_checks AS
|
||||||
|
SELECT cc.constraint_name AS "Constraint",
|
||||||
|
('CHECK ' || regexp_replace(check_clause, '^\((.*)\)$', '\1')) AS "Definition",
|
||||||
|
format('%I.%I', ccu.table_schema, ccu.table_name)::regclass::oid AS relid
|
||||||
|
FROM information_schema.check_constraints cc,
|
||||||
|
information_schema.constraint_column_usage ccu
|
||||||
|
WHERE cc.constraint_schema = ccu.constraint_schema AND
|
||||||
|
cc.constraint_name = ccu.constraint_name
|
||||||
|
ORDER BY cc.constraint_name ASC;
|
||||||
|
|
||||||
|
CREATE VIEW index_attrs AS
|
||||||
|
WITH indexoid AS (
|
||||||
|
SELECT c.oid,
|
||||||
|
n.nspname,
|
||||||
|
c.relname
|
||||||
|
FROM pg_catalog.pg_class c
|
||||||
|
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
||||||
|
WHERE pg_catalog.pg_table_is_visible(c.oid)
|
||||||
|
ORDER BY 2, 3
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
indexoid.nspname AS "nspname",
|
||||||
|
indexoid.relname AS "relname",
|
||||||
|
a.attrelid AS "relid",
|
||||||
|
a.attname AS "Column",
|
||||||
|
pg_catalog.format_type(a.atttypid, a.atttypmod) AS "Type",
|
||||||
|
pg_catalog.pg_get_indexdef(a.attrelid, a.attnum, TRUE) AS "Definition"
|
||||||
|
FROM pg_catalog.pg_attribute a
|
||||||
|
LEFT JOIN indexoid ON (a.attrelid = indexoid.oid)
|
||||||
|
WHERE true
|
||||||
|
AND a.attnum > 0
|
||||||
|
AND NOT a.attisdropped
|
||||||
|
ORDER BY a.attrelid, a.attnum;
|
||||||
|
|
||||||
|
$desc_views$
|
||||||
|
);
|
||||||
|
|
|
@ -8,106 +8,6 @@ BEGIN
|
||||||
PERFORM run_command_on_workers(p_sql);
|
PERFORM run_command_on_workers(p_sql);
|
||||||
END;$$;
|
END;$$;
|
||||||
|
|
||||||
-- The following views are intended as alternatives to \d commands, whose
|
|
||||||
-- output changed in PostgreSQL 10. In particular, they must be used any time
|
|
||||||
-- a test wishes to print out the structure of a relation, which previously
|
|
||||||
-- was safely accomplished by a \d invocation.
|
|
||||||
SELECT run_command_on_master_and_workers(
|
|
||||||
$desc_views$
|
|
||||||
CREATE VIEW table_fkey_cols AS
|
|
||||||
SELECT rc.constraint_name AS "name",
|
|
||||||
kcu.column_name AS "column_name",
|
|
||||||
uc_kcu.column_name AS "refd_column_name",
|
|
||||||
format('%I.%I', kcu.table_schema, kcu.table_name)::regclass::oid AS relid,
|
|
||||||
format('%I.%I', uc_kcu.table_schema, uc_kcu.table_name)::regclass::oid AS refd_relid,
|
|
||||||
rc.constraint_schema AS "schema"
|
|
||||||
FROM information_schema.referential_constraints rc,
|
|
||||||
information_schema.key_column_usage kcu,
|
|
||||||
information_schema.key_column_usage uc_kcu
|
|
||||||
WHERE rc.constraint_schema = kcu.constraint_schema AND
|
|
||||||
rc.constraint_name = kcu.constraint_name AND
|
|
||||||
rc.unique_constraint_schema = uc_kcu.constraint_schema AND
|
|
||||||
rc.unique_constraint_name = uc_kcu.constraint_name;
|
|
||||||
|
|
||||||
CREATE VIEW table_fkeys AS
|
|
||||||
SELECT name AS "Constraint",
|
|
||||||
format('FOREIGN KEY (%s) REFERENCES %s(%s)',
|
|
||||||
string_agg(DISTINCT quote_ident(column_name), ', '),
|
|
||||||
string_agg(DISTINCT refd_relid::regclass::text, ', '),
|
|
||||||
string_agg(DISTINCT quote_ident(refd_column_name), ', ')) AS "Definition",
|
|
||||||
"relid"
|
|
||||||
FROM table_fkey_cols
|
|
||||||
GROUP BY (name, relid);
|
|
||||||
|
|
||||||
CREATE VIEW table_attrs AS
|
|
||||||
SELECT c.column_name AS "name",
|
|
||||||
c.data_type AS "type",
|
|
||||||
CASE
|
|
||||||
WHEN character_maximum_length IS NOT NULL THEN
|
|
||||||
format('(%s)', character_maximum_length)
|
|
||||||
WHEN data_type = 'numeric' AND numeric_precision IS NOT NULL THEN
|
|
||||||
format('(%s,%s)', numeric_precision, numeric_scale)
|
|
||||||
ELSE ''
|
|
||||||
END AS "modifier",
|
|
||||||
c.column_default AS "default",
|
|
||||||
(NOT c.is_nullable::boolean) AS "notnull",
|
|
||||||
format('%I.%I', c.table_schema, c.table_name)::regclass::oid AS "relid"
|
|
||||||
FROM information_schema.columns AS c
|
|
||||||
ORDER BY ordinal_position;
|
|
||||||
|
|
||||||
CREATE VIEW table_desc AS
|
|
||||||
SELECT "name" AS "Column",
|
|
||||||
"type" || "modifier" AS "Type",
|
|
||||||
rtrim((
|
|
||||||
CASE "notnull"
|
|
||||||
WHEN true THEN 'not null '
|
|
||||||
ELSE ''
|
|
||||||
END
|
|
||||||
) || (
|
|
||||||
CASE WHEN "default" IS NULL THEN ''
|
|
||||||
ELSE 'default ' || "default"
|
|
||||||
END
|
|
||||||
)) AS "Modifiers",
|
|
||||||
"relid"
|
|
||||||
FROM table_attrs;
|
|
||||||
|
|
||||||
CREATE VIEW table_checks AS
|
|
||||||
SELECT cc.constraint_name AS "Constraint",
|
|
||||||
('CHECK ' || regexp_replace(check_clause, '^\((.*)\)$', '\1')) AS "Definition",
|
|
||||||
format('%I.%I', ccu.table_schema, ccu.table_name)::regclass::oid AS relid
|
|
||||||
FROM information_schema.check_constraints cc,
|
|
||||||
information_schema.constraint_column_usage ccu
|
|
||||||
WHERE cc.constraint_schema = ccu.constraint_schema AND
|
|
||||||
cc.constraint_name = ccu.constraint_name
|
|
||||||
ORDER BY cc.constraint_name ASC;
|
|
||||||
|
|
||||||
CREATE VIEW index_attrs AS
|
|
||||||
WITH indexoid AS (
|
|
||||||
SELECT c.oid,
|
|
||||||
n.nspname,
|
|
||||||
c.relname
|
|
||||||
FROM pg_catalog.pg_class c
|
|
||||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
|
||||||
WHERE pg_catalog.pg_table_is_visible(c.oid)
|
|
||||||
ORDER BY 2, 3
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
indexoid.nspname AS "nspname",
|
|
||||||
indexoid.relname AS "relname",
|
|
||||||
a.attrelid AS "relid",
|
|
||||||
a.attname AS "Column",
|
|
||||||
pg_catalog.format_type(a.atttypid, a.atttypmod) AS "Type",
|
|
||||||
pg_catalog.pg_get_indexdef(a.attrelid, a.attnum, TRUE) AS "Definition"
|
|
||||||
FROM pg_catalog.pg_attribute a
|
|
||||||
LEFT JOIN indexoid ON (a.attrelid = indexoid.oid)
|
|
||||||
WHERE true
|
|
||||||
AND a.attnum > 0
|
|
||||||
AND NOT a.attisdropped
|
|
||||||
ORDER BY a.attrelid, a.attnum;
|
|
||||||
|
|
||||||
$desc_views$
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Create a function to make sure that queries returning the same result
|
-- Create a function to make sure that queries returning the same result
|
||||||
CREATE FUNCTION raise_failed_execution(query text) RETURNS void AS $$
|
CREATE FUNCTION raise_failed_execution(query text) RETURNS void AS $$
|
||||||
BEGIN
|
BEGIN
|
||||||
|
@ -172,3 +72,40 @@ BEGIN
|
||||||
RETURN true;
|
RETURN true;
|
||||||
END;
|
END;
|
||||||
$func$;
|
$func$;
|
||||||
|
|
||||||
|
CREATE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||||
|
RETURNS void
|
||||||
|
LANGUAGE C STRICT
|
||||||
|
AS 'citus';
|
||||||
|
|
||||||
|
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
|
||||||
|
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
||||||
|
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
|
||||||
|
-- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator
|
||||||
|
CREATE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636)
|
||||||
|
RETURNS BOOLEAN
|
||||||
|
LANGUAGE sql
|
||||||
|
AS $$
|
||||||
|
SELECT wait_until_metadata_sync();
|
||||||
|
WITH dist_node_summary AS (
|
||||||
|
SELECT 'SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport, isactive) ORDER BY nodeid) FROM pg_dist_node' as query
|
||||||
|
), dist_node_check AS (
|
||||||
|
SELECT count(distinct result) = 1 AS matches
|
||||||
|
FROM dist_node_summary CROSS JOIN LATERAL
|
||||||
|
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
|
||||||
|
ARRAY[dist_node_summary.query, dist_node_summary.query],
|
||||||
|
false)
|
||||||
|
), dist_placement_summary AS (
|
||||||
|
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement)' AS query
|
||||||
|
), dist_placement_check AS (
|
||||||
|
SELECT count(distinct result) = 1 AS matches
|
||||||
|
FROM dist_placement_summary CROSS JOIN LATERAL
|
||||||
|
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
|
||||||
|
ARRAY[dist_placement_summary.query, dist_placement_summary.query],
|
||||||
|
false)
|
||||||
|
)
|
||||||
|
SELECT dist_node_check.matches AND dist_placement_check.matches
|
||||||
|
FROM dist_node_check CROSS JOIN dist_placement_check
|
||||||
|
$$;
|
||||||
|
|
|
@ -48,11 +48,45 @@ INSERT INTO local_table VALUES (2), (4), (7), (20);
|
||||||
EXPLAIN SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers;
|
EXPLAIN SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers;
|
||||||
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
||||||
|
|
||||||
|
-- test non equijoin
|
||||||
|
SELECT lt.a, sq.a, sq.b
|
||||||
|
FROM local_table lt
|
||||||
|
JOIN squares sq ON sq.a > lt.a and sq.b > 90
|
||||||
|
ORDER BY 1,2,3;
|
||||||
|
|
||||||
-- error if in transaction block
|
-- error if in transaction block
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
|
|
||||||
|
-- error if in a DO block
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
PERFORM local_table.a, numbers.a FROM local_table NATURAL JOIN numbers;
|
||||||
|
END;
|
||||||
|
$$;
|
||||||
|
|
||||||
|
-- test plpgsql function
|
||||||
|
CREATE FUNCTION test_reference_local_join_plpgsql_func()
|
||||||
|
RETURNS void AS $$
|
||||||
|
BEGIN
|
||||||
|
INSERT INTO local_table VALUES (21);
|
||||||
|
INSERT INTO numbers VALUES (4);
|
||||||
|
PERFORM local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
||||||
|
RAISE EXCEPTION '';
|
||||||
|
PERFORM local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
SELECT test_reference_local_join_plpgsql_func();
|
||||||
|
SELECT sum(a) FROM local_table;
|
||||||
|
SELECT sum(a) FROM numbers;
|
||||||
|
|
||||||
|
-- error if in procedure's subtransaction
|
||||||
|
CREATE PROCEDURE test_reference_local_join_proc() AS $$
|
||||||
|
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1;
|
||||||
|
$$ LANGUAGE sql;
|
||||||
|
CALL test_reference_local_join_proc();
|
||||||
|
|
||||||
-- error if in a transaction block even if reference table is not in search path
|
-- error if in a transaction block even if reference table is not in search path
|
||||||
CREATE SCHEMA s1;
|
CREATE SCHEMA s1;
|
||||||
CREATE TABLE s1.ref(a int);
|
CREATE TABLE s1.ref(a int);
|
||||||
|
@ -65,30 +99,41 @@ ROLLBACK;
|
||||||
DROP SCHEMA s1 CASCADE;
|
DROP SCHEMA s1 CASCADE;
|
||||||
|
|
||||||
-- shouldn't plan locally if modifications happen in CTEs, ...
|
-- shouldn't plan locally if modifications happen in CTEs, ...
|
||||||
WITH ins AS (INSERT INTO numbers VALUES (1) RETURNING *) SELECT * FROM numbers, local_table;
|
WITH ins AS (INSERT INTO numbers VALUES (1) RETURNING *)
|
||||||
WITH t AS (SELECT *, random() x FROM numbers FOR UPDATE) SELECT * FROM numbers, local_table
|
SELECT * FROM numbers, local_table;
|
||||||
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
|
||||||
|
WITH t AS (SELECT *, random() x FROM numbers FOR UPDATE)
|
||||||
|
SELECT * FROM numbers, local_table
|
||||||
|
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
||||||
|
|
||||||
-- but this should be fine
|
-- but this should be fine
|
||||||
WITH t AS (SELECT *, random() x FROM numbers) SELECT * FROM numbers, local_table
|
WITH t AS (SELECT *, random() x FROM numbers)
|
||||||
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
SELECT * FROM numbers, local_table
|
||||||
|
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
||||||
|
|
||||||
-- shouldn't plan locally even if distributed table is in CTE or subquery
|
-- shouldn't plan locally even if distributed table is in CTE or subquery
|
||||||
CREATE TABLE dist(a int);
|
CREATE TABLE dist(a int);
|
||||||
SELECT create_distributed_table('dist', 'a');
|
SELECT create_distributed_table('dist', 'a');
|
||||||
WITH t AS (SELECT *, random() x FROM dist) SELECT * FROM numbers, local_table
|
INSERT INTO dist VALUES (20),(30);
|
||||||
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
|
||||||
|
WITH t AS (SELECT *, random() x FROM dist)
|
||||||
|
SELECT * FROM numbers, local_table
|
||||||
|
WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a);
|
||||||
|
|
||||||
|
-- test CTE being reference/local join for distributed query
|
||||||
|
WITH t as (SELECT n.a, random() x FROM numbers n NATURAL JOIN local_table l)
|
||||||
|
SELECT a FROM t NATURAL JOIN dist;
|
||||||
|
|
||||||
-- error if FOR UPDATE/FOR SHARE
|
-- error if FOR UPDATE/FOR SHARE
|
||||||
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR SHARE;
|
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR SHARE;
|
||||||
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR UPDATE;
|
SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR UPDATE;
|
||||||
|
|
||||||
-- clean-up
|
-- clean-up
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA replicate_ref_to_coordinator CASCADE;
|
DROP SCHEMA replicate_ref_to_coordinator CASCADE;
|
||||||
|
|
||||||
-- Make sure the shard was dropped
|
-- Make sure the shard was dropped
|
||||||
SELECT 'numbers_8000001'::regclass::oid;
|
SELECT 'numbers_8000001'::regclass::oid;
|
||||||
|
|
||||||
SET search_path TO DEFAULT;
|
SET search_path TO DEFAULT;
|
||||||
RESET client_min_messages;
|
RESET client_min_messages;
|
||||||
|
|
|
@ -3,17 +3,6 @@ SET search_path TO upgrade_distributed_function_before, public;
|
||||||
SET citus.replication_model TO streaming;
|
SET citus.replication_model TO streaming;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
|
||||||
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
|
||||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
|
||||||
SELECT pg_reload_conf();
|
|
||||||
|
|
||||||
CREATE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
|
||||||
RETURNS void
|
|
||||||
LANGUAGE C STRICT
|
|
||||||
AS 'citus';
|
|
||||||
|
|
||||||
|
|
||||||
CREATE TABLE t1 (a int PRIMARY KEY, b int);
|
CREATE TABLE t1 (a int PRIMARY KEY, b int);
|
||||||
SELECT create_distributed_table('t1','a');
|
SELECT create_distributed_table('t1','a');
|
||||||
INSERT INTO t1 VALUES (11), (12);
|
INSERT INTO t1 VALUES (11), (12);
|
||||||
|
|
Loading…
Reference in New Issue