Make sure that all dependencies of citus tables can be distributed

pull/5728/head
Burak Velioglu 2022-03-03 19:13:15 +03:00
parent 90974fdc8f
commit cb6d67a9a9
No known key found for this signature in database
GPG Key ID: F6827E620F6549C6
28 changed files with 368 additions and 127 deletions

View File

@ -31,6 +31,7 @@
#include "distributed/commands/sequence.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata/dependency.h"
#include "distributed/foreign_key_relationship.h"
#include "distributed/listutils.h"
#include "distributed/local_executor.h"
@ -317,6 +318,7 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve
* Ensure dependencies exist as we will create shell table on the other nodes
* in the MX case.
*/
EnsureRelationDependenciesCanBeDistributed(&tableAddress);
EnsureDependenciesExistOnAllNodes(&tableAddress);
/*

View File

@ -443,6 +443,8 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName,
*/
ObjectAddress tableAddress = { 0 };
ObjectAddressSet(tableAddress, RelationRelationId, relationId);
EnsureRelationDependenciesCanBeDistributed(&tableAddress);
EnsureDependenciesExistOnAllNodes(&tableAddress);
char replicationModel = DecideReplicationModel(distributionMethod,

View File

@ -1347,51 +1347,6 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString)
}
/*
* GetUndistributableDependency checks whether object has any non-distributable
* dependency. If any one found, it will be returned.
*/
ObjectAddress *
GetUndistributableDependency(ObjectAddress *objectAddress)
{
List *dependencies = GetAllDependenciesForObject(objectAddress);
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
{
if (IsObjectDistributed(dependency))
{
continue;
}
if (!SupportedDependencyByCitus(dependency))
{
/*
* Since roles should be handled manually with Citus community, skip them.
*/
if (getObjectClass(dependency) != OCLASS_ROLE)
{
return dependency;
}
}
if (getObjectClass(dependency) == OCLASS_CLASS)
{
/*
* Citus can only distribute dependent non-distributed sequence
* and composite types.
*/
char relKind = get_rel_relkind(dependency->objectId);
if (relKind != RELKIND_SEQUENCE && relKind != RELKIND_COMPOSITE_TYPE)
{
return dependency;
}
}
}
return NULL;
}
/*
* CreateFunctionStmtObjectAddress returns the ObjectAddress for the subject of the
* CREATE [OR REPLACE] FUNCTION statement. If missing_ok is false it will error with the

View File

@ -1955,6 +1955,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
/* changing a relation could introduce new dependencies */
ObjectAddress tableAddress = { 0 };
ObjectAddressSet(tableAddress, RelationRelationId, relationId);
EnsureRelationDependenciesCanBeDistributed(&tableAddress);
EnsureDependenciesExistOnAllNodes(&tableAddress);
}

View File

@ -741,6 +741,142 @@ SupportedDependencyByCitus(const ObjectAddress *address)
}
/*
* EnsureRelationDependenciesCanBeDistributed ensures all dependencies of the relation
* can be distributed.
*/
void
EnsureRelationDependenciesCanBeDistributed(ObjectAddress *relationAddress)
{
ObjectAddress *undistributableDependency =
GetUndistributableDependency(relationAddress);
if (undistributableDependency != NULL)
{
char *tableName = get_rel_name(relationAddress->objectId);
if (SupportedDependencyByCitus(undistributableDependency))
{
/*
* Citus can't distribute some relations as dependency, although those
* types as supported by Citus. So we can use get_rel_name directly
*
* For now the relations are the only type that is supported by Citus
* but can not be distributed as dependency, though we've added an
* explicit check below as well to not to break the logic here in case
* GetUndistributableDependency changes.
*/
if (getObjectClass(undistributableDependency) == OCLASS_CLASS)
{
char *dependentRelationName = get_rel_name(
undistributableDependency->objectId);
ereport(ERROR, (errmsg("Relation \"%s\" has dependency to a table"
" \"%s\" that is not in Citus' metadata",
tableName, dependentRelationName),
errhint("Distribute dependent relation first.")));
}
}
char *objectType = NULL;
#if PG_VERSION_NUM >= PG_VERSION_14
objectType = getObjectDescription(undistributableDependency, false);
#else
objectType = getObjectDescription(undistributableDependency);
#endif
ereport(ERROR, (errmsg("Relation \"%s\" has dependency on unsupported "
"object \"%s\"", tableName, objectType)));
}
}
/*
* GetUndistributableDependency checks whether object has any non-distributable
* dependency. If any one found, it will be returned.
*/
ObjectAddress *
GetUndistributableDependency(ObjectAddress *objectAddress)
{
List *dependencies = GetAllDependenciesForObject(objectAddress);
ObjectAddress *dependency = NULL;
/*
* Users can disable metadata sync by their own risk. If it is disabled, Citus
* doesn't propagate dependencies. So, if it is disabled, there is no undistributable
* dependency.
*/
if (!EnableMetadataSync)
{
return NULL;
}
foreach_ptr(dependency, dependencies)
{
/*
* Objects with the id smaller than FirstNormalObjectId should be created within
* initdb. Citus needs to have such objects as distributed, so we can not add
* such check to dependency resolution logic. Though, Citus shouldn't error
* out if such dependency is not supported. So, skip them here.
*/
if (dependency->objectId < FirstNormalObjectId)
{
continue;
}
/*
* If object is distributed already, ignore it.
*/
if (IsObjectDistributed(dependency))
{
continue;
}
/*
* If the dependency is not supported with Citus, return the dependency.
*/
if (!SupportedDependencyByCitus(dependency))
{
/*
* Since roles should be handled manually with Citus community, skip them.
*/
if (getObjectClass(dependency) != OCLASS_ROLE)
{
return dependency;
}
}
if (getObjectClass(dependency) == OCLASS_CLASS)
{
char relKind = get_rel_relkind(dependency->objectId);
if (relKind == RELKIND_SEQUENCE || relKind == RELKIND_COMPOSITE_TYPE)
{
/* citus knows how to auto-distribute these dependencies */
continue;
}
else if (relKind == RELKIND_INDEX || relKind == RELKIND_PARTITIONED_INDEX)
{
/*
* Indexes are only qualified for distributed objects for dependency
* tracking purposes, so we can ignore those.
*/
continue;
}
else
{
/*
* Citus doesn't know how to auto-distribute the rest of the RELKINDs
* via dependency resolution
*/
return dependency;
}
}
}
return NULL;
}
/*
* IsTableOwnedByExtension returns whether the table with the given relation ID is
* owned by an extension.

View File

@ -21,6 +21,8 @@ extern List * GetUniqueDependenciesList(List *objectAddressesList);
extern List * GetDependenciesForObject(const ObjectAddress *target);
extern List * GetAllSupportedDependenciesForObject(const ObjectAddress *target);
extern List * GetAllDependenciesForObject(const ObjectAddress *target);
extern void EnsureRelationDependenciesCanBeDistributed(ObjectAddress *relationAddress);
extern ObjectAddress * GetUndistributableDependency(ObjectAddress *target);
extern List * OrderObjectAddressListInDependencyOrder(List *objectAddressList);
extern bool SupportedDependencyByCitus(const ObjectAddress *address);
extern List * GetPgDependTuplesForDependingObjects(Oid targetObjectClassId,

View File

@ -474,11 +474,8 @@ SELECT c.relname, a.amname FROM pg_class c, pg_am a where c.relname SIMILAR TO '
table_type_ref | heap
(4 rows)
SELECT alter_table_set_access_method('table_type_dist', 'fake_am');
SELECT alter_table_set_access_method('table_type_dist', 'columnar');
NOTICE: creating a new table for alter_table_set_access_method.table_type_dist
WARNING: fake_scan_getnextslot
CONTEXT: SQL statement "SELECT TRUE FROM alter_table_set_access_method.table_type_dist_1533505599 LIMIT 1"
WARNING: fake_scan_getnextslot
NOTICE: moving the data of alter_table_set_access_method.table_type_dist
NOTICE: dropping the old alter_table_set_access_method.table_type_dist
NOTICE: renaming the new table to alter_table_set_access_method.table_type_dist
@ -487,11 +484,8 @@ NOTICE: renaming the new table to alter_table_set_access_method.table_type_dist
(1 row)
SELECT alter_table_set_access_method('table_type_ref', 'fake_am');
SELECT alter_table_set_access_method('table_type_ref', 'columnar');
NOTICE: creating a new table for alter_table_set_access_method.table_type_ref
WARNING: fake_scan_getnextslot
CONTEXT: SQL statement "SELECT TRUE FROM alter_table_set_access_method.table_type_ref_1037855087 LIMIT 1"
WARNING: fake_scan_getnextslot
NOTICE: moving the data of alter_table_set_access_method.table_type_ref
NOTICE: dropping the old alter_table_set_access_method.table_type_ref
NOTICE: renaming the new table to alter_table_set_access_method.table_type_ref
@ -500,7 +494,7 @@ NOTICE: renaming the new table to alter_table_set_access_method.table_type_ref
(1 row)
SELECT alter_table_set_access_method('table_type_pg_local', 'fake_am');
SELECT alter_table_set_access_method('table_type_pg_local', 'columnar');
NOTICE: creating a new table for alter_table_set_access_method.table_type_pg_local
NOTICE: moving the data of alter_table_set_access_method.table_type_pg_local
NOTICE: dropping the old alter_table_set_access_method.table_type_pg_local
@ -510,7 +504,7 @@ NOTICE: renaming the new table to alter_table_set_access_method.table_type_pg_l
(1 row)
SELECT alter_table_set_access_method('table_type_citus_local', 'fake_am');
SELECT alter_table_set_access_method('table_type_citus_local', 'columnar');
NOTICE: creating a new table for alter_table_set_access_method.table_type_citus_local
NOTICE: moving the data of alter_table_set_access_method.table_type_citus_local
NOTICE: dropping the old alter_table_set_access_method.table_type_citus_local
@ -523,17 +517,17 @@ NOTICE: renaming the new table to alter_table_set_access_method.table_type_citu
SELECT table_name, citus_table_type, distribution_column, shard_count, access_method FROM public.citus_tables WHERE table_name::text LIKE 'table\_type%' ORDER BY 1;
table_name | citus_table_type | distribution_column | shard_count | access_method
---------------------------------------------------------------------
table_type_dist | distributed | a | 4 | fake_am
table_type_ref | reference | <none> | 1 | fake_am
table_type_dist | distributed | a | 4 | columnar
table_type_ref | reference | <none> | 1 | columnar
(2 rows)
SELECT c.relname, a.amname FROM pg_class c, pg_am a where c.relname SIMILAR TO 'table_type\D*' AND c.relnamespace = 'alter_table_set_access_method'::regnamespace AND c.relam = a.oid;
relname | amname
---------------------------------------------------------------------
table_type_citus_local | fake_am
table_type_dist | fake_am
table_type_pg_local | fake_am
table_type_ref | fake_am
table_type_citus_local | columnar
table_type_dist | columnar
table_type_pg_local | columnar
table_type_ref | columnar
(4 rows)
-- test when the parent of a partition has foreign key to a reference table

View File

@ -224,12 +224,16 @@ CREATE FOREIGN TABLE foreign_table (
) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true');
-- observe that we do not create fdw server for shell table, both shard relation
-- & shell relation points to the same same server object
-- Disable metadata sync since citus doesn't support distributing
-- foreign data wrappers for now.
SET citus.enable_metadata_sync TO OFF;
SELECT citus_add_local_table_to_metadata('foreign_table');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
RESET citus.enable_metadata_sync;
DROP FOREIGN TABLE foreign_table;
NOTICE: executing the command locally: DROP FOREIGN TABLE IF EXISTS citus_local_tables_test_schema.foreign_table_xxxxx CASCADE
-- drop them for next tests

View File

@ -586,6 +586,8 @@ SELECT count(*) FROM coordinator_evaluation_table_2 WHERE key = 101;
CREATE TYPE comptype_int as (int_a int);
CREATE DOMAIN domain_comptype_int AS comptype_int CHECK ((VALUE).int_a > 0);
-- citus does not propagate domain types
-- TODO: Once domains are supported, remove enable_metadata_sync off/on change
-- on dependent table distribution below.
SELECT run_command_on_workers(
$$
CREATE DOMAIN coordinator_evaluation.domain_comptype_int AS coordinator_evaluation.comptype_int CHECK ((VALUE).int_a > 0)
@ -597,12 +599,16 @@ $$);
(2 rows)
CREATE TABLE reference_table(column_a coordinator_evaluation.domain_comptype_int);
-- Disable metadata sync since citus doesn't support distributing
-- domains for now.
SET citus.enable_metadata_sync TO OFF;
SELECT create_reference_table('reference_table');
create_reference_table
---------------------------------------------------------------------
(1 row)
RESET citus.enable_metadata_sync;
INSERT INTO reference_table (column_a) VALUES ('(1)');
INSERT INTO reference_table (column_a) VALUES ('(2)'), ('(3)');
INSERT INTO reference_table VALUES ('(4)'), ('(5)');

View File

@ -415,6 +415,8 @@ HINT: Use the column name to insert or update the composite type as a single va
CREATE TYPE two_ints as (if1 int, if2 int);
CREATE DOMAIN domain AS two_ints CHECK ((VALUE).if1 > 0);
-- citus does not propagate domain objects
-- TODO: Once domains are supported, remove enable_metadata_sync off/on change
-- on dependent table distribution below.
SELECT run_command_on_workers(
$$
CREATE DOMAIN type_tests.domain AS type_tests.two_ints CHECK ((VALUE).if1 > 0);
@ -426,12 +428,16 @@ $$);
(2 rows)
CREATE TABLE domain_indirection_test (f1 int, f3 domain, domain_array domain[]);
-- Disable metadata sync since citus doesn't support distributing
-- domains for now.
SET citus.enable_metadata_sync TO OFF;
SELECT create_distributed_table('domain_indirection_test', 'f1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
RESET citus.enable_metadata_sync;
-- not supported (field indirection to underlying composite type)
INSERT INTO domain_indirection_test (f1,f3.if1, f3.if2) VALUES (0, 1, 2);
ERROR: inserting or modifying composite type fields is not supported

View File

@ -435,8 +435,8 @@ $$;
CREATE TABLE table_to_prop_func_3(id int, col_1 int default func_in_transaction_3(NULL::non_dist_table));
-- It should error out as there is a non-distributed table dependency
SELECT create_distributed_table('table_to_prop_func_3','id');
ERROR: type function_propagation_schema.non_dist_table does not exist
CONTEXT: while executing command on localhost:xxxxx
ERROR: Relation "table_to_prop_func_3" has dependency to a table "non_dist_table" that is not in Citus' metadata
HINT: Distribute dependent relation first.
COMMIT;
-- Adding a column with default value should propagate the function
BEGIN;
@ -478,6 +478,28 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas
localhost | 57638 | t | (function,"{function_propagation_schema,func_in_transaction_4}",{})
(2 rows)
-- Adding a column with default function depending on non-distributable table should fail
BEGIN;
CREATE TABLE non_dist_table_for_function(id int);
CREATE OR REPLACE FUNCTION non_dist_func(col_1 non_dist_table_for_function)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
CREATE TABLE table_to_dist(id int);
SELECT create_distributed_table('table_to_dist', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
ALTER TABLE table_to_dist ADD COLUMN col_1 int default function_propagation_schema.non_dist_func(NULL::non_dist_table_for_function);
ERROR: Relation "table_to_dist" has dependency to a table "non_dist_table_for_function" that is not in Citus' metadata
HINT: Distribute dependent relation first.
ROLLBACK;
-- Adding multiple columns with default values should propagate the function
BEGIN;
CREATE OR REPLACE FUNCTION func_in_transaction_5()
@ -701,8 +723,8 @@ $$;
CREATE TABLE table_to_prop_func_9(id int, col_1 int check (func_in_transaction_11(col_1, NULL::local_table_for_const)));
-- It should error out since there is non-distributed table dependency exists
SELECT create_distributed_table('table_to_prop_func_9', 'id');
ERROR: type function_propagation_schema.local_table_for_const does not exist
CONTEXT: while executing command on localhost:xxxxx
ERROR: Relation "table_to_prop_func_9" has dependency to a table "local_table_for_const" that is not in Citus' metadata
HINT: Distribute dependent relation first.
COMMIT;
-- Show that function as a part of generated always is supporte
BEGIN;
@ -1020,6 +1042,34 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas
localhost | 57638 | t | (function,"{function_propagation_schema,func_in_transaction_def_with_seq}",{bigint})
(2 rows)
-- Show that having a dependency on another dist table work out tx
CREATE TABLE loc_for_func_dist (
product_no integer,
name text,
price numeric CONSTRAINT positive_price CHECK (price > 0));
SELECT create_distributed_table('loc_for_func_dist', 'product_no');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE OR REPLACE FUNCTION non_sense_func_for_default_val(loc_for_func_dist)
RETURNS int
LANGUAGE plpgsql IMMUTABLE AS
$$
BEGIN
return 1;
END;
$$;
CREATE TABLE table_non_for_func_dist (
a int,
b int DEFAULT non_sense_func_for_default_val(NULL::loc_for_func_dist));
SELECT create_distributed_table('table_non_for_func_dist', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
RESET search_path;
SET client_min_messages TO WARNING;
DROP SCHEMA function_propagation_schema CASCADE;

View File

@ -77,8 +77,6 @@ END
$func$ LANGUAGE plpgsql;
CREATE SCHEMA test;
:create_function_test_maintenance_worker
WARNING: Citus can't distribute functions having dependency on unsupported object of type "view"
DETAIL: Function will be created only locally
-- check maintenance daemon is started
SELECT datname, current_database(),
usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
@ -1222,8 +1220,6 @@ HINT: You can manually create a database and its extensions on workers.
CREATE EXTENSION citus;
CREATE SCHEMA test;
:create_function_test_maintenance_worker
WARNING: Citus can't distribute functions having dependency on unsupported object of type "view"
DETAIL: Function will be created only locally
-- see that the daemon started
SELECT datname, current_database(),
usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')

View File

@ -1049,6 +1049,8 @@ SELECT key, value FROM text_partition_column_table ORDER BY key;
DROP TABLE text_partition_column_table;
-- Domain type columns can give issues
CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$');
-- TODO: Once domains are supported, remove enable_metadata_sync off/on change
-- on dependent table distribution below.
SELECT run_command_on_workers($$
CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$')
$$);
@ -1062,12 +1064,16 @@ CREATE TABLE domain_partition_column_table (
key test_key NOT NULL,
value int
);
-- Disable metadata sync since citus doesn't support distributing
-- domains for now.
SET citus.enable_metadata_sync TO OFF;
SELECT create_distributed_table('domain_partition_column_table', 'key');
create_distributed_table
---------------------------------------------------------------------
(1 row)
RESET citus.enable_metadata_sync;
PREPARE prepared_coercion_to_domain_insert(text) AS
INSERT INTO domain_partition_column_table VALUES ($1, 1);
EXECUTE prepared_coercion_to_domain_insert('test-1');

View File

@ -164,8 +164,7 @@ CREATE TABLE my_table (a int, b myvarchar);
-- """Add ALTER TYPE options useful for extensions,
-- like TOAST and I/O functions control (Tomas Vondra, Tom Lane)"""
SELECT create_distributed_table('my_table', 'a');
ERROR: type "test_pg13.myvarchar" does not exist
CONTEXT: while executing command on localhost:xxxxx
ERROR: Relation "my_table" has dependency on unsupported object "type myvarchar"
CREATE TABLE test_table(a int, b tsvector);
SELECT create_distributed_table('test_table', 'a');
create_distributed_table
@ -209,7 +208,7 @@ INSERT INTO test_wal VALUES(2,22);
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Insert on test_wal_65013 (actual rows=0 loops=1)
-> Insert on test_wal_65011 (actual rows=0 loops=1)
WAL: records=1 bytes=63
-> Result (actual rows=1 loops=1)
(8 rows)
@ -227,7 +226,7 @@ INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *;
-> Task
Tuple data received from node: 24 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Insert on test_wal_65012 citus_table_alias (actual rows=3 loops=1)
-> Insert on test_wal_65010 citus_table_alias (actual rows=3 loops=1)
WAL: records=3 bytes=189
-> Values Scan on "*VALUES*" (actual rows=3 loops=1)
(10 rows)

View File

@ -20,15 +20,16 @@ SELECT key, value FROM text_partition_column_table ORDER BY key;
test | 1
(7 rows)
PREPARE prepared_coercion_to_domain_insert(text) AS
INSERT INTO domain_partition_column_table VALUES ($1, 1);
EXECUTE prepared_coercion_to_domain_insert('test-1');
EXECUTE prepared_coercion_to_domain_insert('test-2');
EXECUTE prepared_coercion_to_domain_insert('test-3');
EXECUTE prepared_coercion_to_domain_insert('test-4');
EXECUTE prepared_coercion_to_domain_insert('test-5');
EXECUTE prepared_coercion_to_domain_insert('test-6');
EXECUTE prepared_coercion_to_domain_insert('test-7');
-- TODO: Uncomment tests below once domains are supported
-- PREPARE prepared_coercion_to_domain_insert(text) AS
-- INSERT INTO domain_partition_column_table VALUES ($1, 1);
-- EXECUTE prepared_coercion_to_domain_insert('test-1');
-- EXECUTE prepared_coercion_to_domain_insert('test-2');
-- EXECUTE prepared_coercion_to_domain_insert('test-3');
-- EXECUTE prepared_coercion_to_domain_insert('test-4');
-- EXECUTE prepared_coercion_to_domain_insert('test-5');
-- EXECUTE prepared_coercion_to_domain_insert('test-6');
-- EXECUTE prepared_coercion_to_domain_insert('test-7');
PREPARE FOO AS INSERT INTO http_request (
site_id, ingest_time, url, request_country,
ip_address, status_code, response_time_msec

View File

@ -65,6 +65,9 @@ SELECT create_distributed_table('text_partition_column_table', 'key');
-- Domain type columns can give issues
-- and we use offset to prevent output diverging
CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$');
-- TODO: Once domains are supported, remove enable_metadata_sync off/on change
-- on dependent table distribution below. Also uncomment related tests on
-- prepared_statements_4 test file.
SELECT run_command_on_workers($$
CREATE DOMAIN "prepared statements".test_key AS text CHECK(VALUE ~ '^test-\d$')
$$) OFFSET 10000;
@ -72,6 +75,9 @@ $$) OFFSET 10000;
---------------------------------------------------------------------
(0 rows)
-- Disable metadata sync since citus doesn't support distributing
-- domains for now.
SET citus.enable_metadata_sync TO OFF;
CREATE TABLE domain_partition_column_table (
key test_key NOT NULL,
value int
@ -82,6 +88,7 @@ SELECT create_distributed_table('domain_partition_column_table', 'key');
(1 row)
RESET citus.enable_metadata_sync;
-- verify we re-evaluate volatile functions every time
CREATE TABLE http_request (
site_id INT,

View File

@ -899,6 +899,9 @@ CREATE TABLE hpart0 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remaind
CREATE TABLE hpart1 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 1);
CREATE TABLE hpart2 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 2);
CREATE TABLE hpart3 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 3);
-- Disable metadata sync since citus doesn't support distributing
-- operator class for now.
SET citus.enable_metadata_sync TO OFF;
SELECT create_distributed_table('hash_parted ', 'a');
create_distributed_table
---------------------------------------------------------------------
@ -925,6 +928,7 @@ ALTER TABLE hash_parted DETACH PARTITION hpart0;
ALTER TABLE hash_parted DETACH PARTITION hpart1;
ALTER TABLE hash_parted DETACH PARTITION hpart2;
ALTER TABLE hash_parted DETACH PARTITION hpart3;
RESET citus.enable_metadata_sync;
-- test range partition without creating partitions and inserting with generate_series()
-- should error out even in plain PG since no partition of relation "parent_tab" is found for row
-- in Citus it errors out because it fails to evaluate partition key in insert

View File

@ -23,6 +23,11 @@ $Q$);
(1 row)
-- Since Citus assumes access methods are part of the extension, make fake_am
-- owned manually to be able to pass checks on Citus while distributing tables.
ALTER EXTENSION citus ADD ACCESS METHOD fake_am;
NOTICE: Citus does not propagate adding/dropping member objects
HINT: You can add/drop the member objects on the workers as well.
--
-- Hash distributed table using a non-default table access method
--
@ -129,27 +134,6 @@ SELECT * FROM master_get_table_ddl_events('test_ref');
ALTER TABLE test_tableam.test_ref OWNER TO postgres
(2 rows)
-- replicate to coordinator
SET client_min_messages TO WARNING;
\set VERBOSIY terse
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
delete from test_ref;
WARNING: fake_scan_getnextslot
DETAIL: from localhost:xxxxx
ERROR: fake_tuple_delete not implemented
CONTEXT: while executing command on localhost:xxxxx
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
--
-- Range partitioned table using a non-default table access method
--
@ -323,5 +307,7 @@ CREATE TABLE test_partitioned(id int, p int, val int)
PARTITION BY RANGE (p) USING fake_am;
ERROR: specifying a table access method is not supported on a partitioned table
\set VERBOSITY terse
ALTER EXTENSION citus DROP ACCESS METHOD fake_am;
NOTICE: Citus does not propagate adding/dropping member objects
drop schema test_tableam cascade;
NOTICE: drop cascades to 6 other objects
NOTICE: drop cascades to 5 other objects

View File

@ -173,10 +173,10 @@ CREATE TABLE table_type_pg_local (a INT);
SELECT table_name, citus_table_type, distribution_column, shard_count, access_method FROM public.citus_tables WHERE table_name::text LIKE 'table\_type%' ORDER BY 1;
SELECT c.relname, a.amname FROM pg_class c, pg_am a where c.relname SIMILAR TO 'table_type\D*' AND c.relnamespace = 'alter_table_set_access_method'::regnamespace AND c.relam = a.oid;
SELECT alter_table_set_access_method('table_type_dist', 'fake_am');
SELECT alter_table_set_access_method('table_type_ref', 'fake_am');
SELECT alter_table_set_access_method('table_type_pg_local', 'fake_am');
SELECT alter_table_set_access_method('table_type_citus_local', 'fake_am');
SELECT alter_table_set_access_method('table_type_dist', 'columnar');
SELECT alter_table_set_access_method('table_type_ref', 'columnar');
SELECT alter_table_set_access_method('table_type_pg_local', 'columnar');
SELECT alter_table_set_access_method('table_type_citus_local', 'columnar');
SELECT table_name, citus_table_type, distribution_column, shard_count, access_method FROM public.citus_tables WHERE table_name::text LIKE 'table\_type%' ORDER BY 1;
SELECT c.relname, a.amname FROM pg_class c, pg_am a where c.relname SIMILAR TO 'table_type\D*' AND c.relnamespace = 'alter_table_set_access_method'::regnamespace AND c.relam = a.oid;

View File

@ -181,7 +181,11 @@ CREATE FOREIGN TABLE foreign_table (
-- observe that we do not create fdw server for shell table, both shard relation
-- & shell relation points to the same same server object
-- Disable metadata sync since citus doesn't support distributing
-- foreign data wrappers for now.
SET citus.enable_metadata_sync TO OFF;
SELECT citus_add_local_table_to_metadata('foreign_table');
RESET citus.enable_metadata_sync;
DROP FOREIGN TABLE foreign_table;

View File

@ -215,13 +215,20 @@ SELECT count(*) FROM coordinator_evaluation_table_2 WHERE key = 101;
CREATE TYPE comptype_int as (int_a int);
CREATE DOMAIN domain_comptype_int AS comptype_int CHECK ((VALUE).int_a > 0);
-- citus does not propagate domain types
-- TODO: Once domains are supported, remove enable_metadata_sync off/on change
-- on dependent table distribution below.
SELECT run_command_on_workers(
$$
CREATE DOMAIN coordinator_evaluation.domain_comptype_int AS coordinator_evaluation.comptype_int CHECK ((VALUE).int_a > 0)
$$);
CREATE TABLE reference_table(column_a coordinator_evaluation.domain_comptype_int);
-- Disable metadata sync since citus doesn't support distributing
-- domains for now.
SET citus.enable_metadata_sync TO OFF;
SELECT create_reference_table('reference_table');
RESET citus.enable_metadata_sync;
INSERT INTO reference_table (column_a) VALUES ('(1)');
INSERT INTO reference_table (column_a) VALUES ('(2)'), ('(3)');

View File

@ -262,12 +262,19 @@ UPDATE field_indirection_test_2 SET (ct2_col.text_1, ct1_col.int_2) = ('text2',
CREATE TYPE two_ints as (if1 int, if2 int);
CREATE DOMAIN domain AS two_ints CHECK ((VALUE).if1 > 0);
-- citus does not propagate domain objects
-- TODO: Once domains are supported, remove enable_metadata_sync off/on change
-- on dependent table distribution below.
SELECT run_command_on_workers(
$$
CREATE DOMAIN type_tests.domain AS type_tests.two_ints CHECK ((VALUE).if1 > 0);
$$);
CREATE TABLE domain_indirection_test (f1 int, f3 domain, domain_array domain[]);
-- Disable metadata sync since citus doesn't support distributing
-- domains for now.
SET citus.enable_metadata_sync TO OFF;
SELECT create_distributed_table('domain_indirection_test', 'f1');
RESET citus.enable_metadata_sync;
-- not supported (field indirection to underlying composite type)
INSERT INTO domain_indirection_test (f1,f3.if1, f3.if2) VALUES (0, 1, 2);

View File

@ -299,6 +299,27 @@ COMMIT;
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction_4'::regproc::oid;$$) ORDER BY 1,2;
-- Adding a column with default function depending on non-distributable table should fail
BEGIN;
CREATE TABLE non_dist_table_for_function(id int);
CREATE OR REPLACE FUNCTION non_dist_func(col_1 non_dist_table_for_function)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
CREATE TABLE table_to_dist(id int);
SELECT create_distributed_table('table_to_dist', 'id');
ALTER TABLE table_to_dist ADD COLUMN col_1 int default function_propagation_schema.non_dist_func(NULL::non_dist_table_for_function);
ROLLBACK;
-- Adding multiple columns with default values should propagate the function
BEGIN;
CREATE OR REPLACE FUNCTION func_in_transaction_5()
@ -665,6 +686,30 @@ COMMIT;
-- Function should be marked as distributed on the worker after committing changes
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction_def_with_seq'::regproc::oid;$$) ORDER BY 1,2;
-- Show that having a dependency on another dist table work out tx
CREATE TABLE loc_for_func_dist (
product_no integer,
name text,
price numeric CONSTRAINT positive_price CHECK (price > 0));
SELECT create_distributed_table('loc_for_func_dist', 'product_no');
CREATE OR REPLACE FUNCTION non_sense_func_for_default_val(loc_for_func_dist)
RETURNS int
LANGUAGE plpgsql IMMUTABLE AS
$$
BEGIN
return 1;
END;
$$;
CREATE TABLE table_non_for_func_dist (
a int,
b int DEFAULT non_sense_func_for_default_val(NULL::loc_for_func_dist));
SELECT create_distributed_table('table_non_for_func_dist', 'a');
RESET search_path;
SET client_min_messages TO WARNING;
DROP SCHEMA function_propagation_schema CASCADE;

View File

@ -546,6 +546,9 @@ DROP TABLE text_partition_column_table;
-- Domain type columns can give issues
CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$');
-- TODO: Once domains are supported, remove enable_metadata_sync off/on change
-- on dependent table distribution below.
SELECT run_command_on_workers($$
CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$')
$$);
@ -554,7 +557,12 @@ CREATE TABLE domain_partition_column_table (
key test_key NOT NULL,
value int
);
-- Disable metadata sync since citus doesn't support distributing
-- domains for now.
SET citus.enable_metadata_sync TO OFF;
SELECT create_distributed_table('domain_partition_column_table', 'key');
RESET citus.enable_metadata_sync;
PREPARE prepared_coercion_to_domain_insert(text) AS
INSERT INTO domain_partition_column_table VALUES ($1, 1);

View File

@ -16,17 +16,17 @@ SELECT key, value FROM text_partition_column_table ORDER BY key;
-- TODO: Uncomment tests below once domains are supported
-- PREPARE prepared_coercion_to_domain_insert(text) AS
-- INSERT INTO domain_partition_column_table VALUES ($1, 1);
PREPARE prepared_coercion_to_domain_insert(text) AS
INSERT INTO domain_partition_column_table VALUES ($1, 1);
EXECUTE prepared_coercion_to_domain_insert('test-1');
EXECUTE prepared_coercion_to_domain_insert('test-2');
EXECUTE prepared_coercion_to_domain_insert('test-3');
EXECUTE prepared_coercion_to_domain_insert('test-4');
EXECUTE prepared_coercion_to_domain_insert('test-5');
EXECUTE prepared_coercion_to_domain_insert('test-6');
EXECUTE prepared_coercion_to_domain_insert('test-7');
-- EXECUTE prepared_coercion_to_domain_insert('test-1');
-- EXECUTE prepared_coercion_to_domain_insert('test-2');
-- EXECUTE prepared_coercion_to_domain_insert('test-3');
-- EXECUTE prepared_coercion_to_domain_insert('test-4');
-- EXECUTE prepared_coercion_to_domain_insert('test-5');
-- EXECUTE prepared_coercion_to_domain_insert('test-6');
-- EXECUTE prepared_coercion_to_domain_insert('test-7');

View File

@ -54,16 +54,24 @@ SELECT create_distributed_table('text_partition_column_table', 'key');
-- and we use offset to prevent output diverging
CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$');
-- TODO: Once domains are supported, remove enable_metadata_sync off/on change
-- on dependent table distribution below. Also uncomment related tests on
-- prepared_statements_4 test file.
SELECT run_command_on_workers($$
CREATE DOMAIN "prepared statements".test_key AS text CHECK(VALUE ~ '^test-\d$')
$$) OFFSET 10000;
-- Disable metadata sync since citus doesn't support distributing
-- domains for now.
SET citus.enable_metadata_sync TO OFF;
CREATE TABLE domain_partition_column_table (
key test_key NOT NULL,
value int
);
SELECT create_distributed_table('domain_partition_column_table', 'key');
SELECT create_distributed_table('domain_partition_column_table', 'key');
RESET citus.enable_metadata_sync;
-- verify we re-evaluate volatile functions every time
CREATE TABLE http_request (

View File

@ -538,6 +538,9 @@ CREATE TABLE hpart1 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remaind
CREATE TABLE hpart2 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 2);
CREATE TABLE hpart3 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 3);
-- Disable metadata sync since citus doesn't support distributing
-- operator class for now.
SET citus.enable_metadata_sync TO OFF;
SELECT create_distributed_table('hash_parted ', 'a');
INSERT INTO hash_parted VALUES (1, generate_series(1, 10));
@ -548,6 +551,7 @@ ALTER TABLE hash_parted DETACH PARTITION hpart0;
ALTER TABLE hash_parted DETACH PARTITION hpart1;
ALTER TABLE hash_parted DETACH PARTITION hpart2;
ALTER TABLE hash_parted DETACH PARTITION hpart3;
RESET citus.enable_metadata_sync;
-- test range partition without creating partitions and inserting with generate_series()
-- should error out even in plain PG since no partition of relation "parent_tab" is found for row

View File

@ -22,12 +22,17 @@ SELECT public.run_command_on_coordinator_and_workers($Q$
CREATE ACCESS METHOD fake_am TYPE TABLE HANDLER fake_am_handler;
$Q$);
-- Since Citus assumes access methods are part of the extension, make fake_am
-- owned manually to be able to pass checks on Citus while distributing tables.
ALTER EXTENSION citus ADD ACCESS METHOD fake_am;
--
-- Hash distributed table using a non-default table access method
--
create table test_hash_dist(id int, val int) using fake_am;
insert into test_hash_dist values (1, 1);
select create_distributed_table('test_hash_dist','id');
select * from test_hash_dist;
@ -48,6 +53,7 @@ SELECT * FROM master_get_table_ddl_events('test_hash_dist');
create table test_ref(a int) using fake_am;
insert into test_ref values (1);
select create_reference_table('test_ref');
select * from test_ref;
@ -62,20 +68,14 @@ RESET client_min_messages;
-- ddl events should include "USING fake_am"
SELECT * FROM master_get_table_ddl_events('test_ref');
-- replicate to coordinator
SET client_min_messages TO WARNING;
\set VERBOSIY terse
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
RESET client_min_messages;
delete from test_ref;
SELECT master_remove_node('localhost', :master_port);
--
-- Range partitioned table using a non-default table access method
--
CREATE TABLE test_range_dist(id int, val int) using fake_am;
SELECT create_distributed_table('test_range_dist', 'id', 'range');
CALL public.create_range_partitioned_shards('test_range_dist', '{"0","25"}','{"24","49"}');
select * from test_range_dist;
@ -148,4 +148,5 @@ CREATE TABLE test_partitioned(id int, p int, val int)
PARTITION BY RANGE (p) USING fake_am;
\set VERBOSITY terse
ALTER EXTENSION citus DROP ACCESS METHOD fake_am;
drop schema test_tableam cascade;