Skip dropping shards when we know it's a partition (#5176)

pull/5200/head
Naisila Puka 2021-08-31 17:41:37 +03:00 committed by GitHub
parent 5ae01303d4
commit acb5ae6ab6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 825 additions and 116 deletions

View File

@ -89,11 +89,13 @@ citus_truncate_trigger(PG_FUNCTION_ARGS)
Oid schemaId = get_rel_namespace(relationId);
char *schemaName = get_namespace_name(schemaId);
char *relationName = get_rel_name(relationId);
bool dropShardsMetadataOnly = false;
DirectFunctionCall3(citus_drop_all_shards,
DirectFunctionCall4(citus_drop_all_shards,
ObjectIdGetDatum(relationId),
CStringGetTextDatum(relationName),
CStringGetTextDatum(schemaName));
CStringGetTextDatum(schemaName),
BoolGetDatum(dropShardsMetadataOnly));
}
else
{

View File

@ -75,7 +75,7 @@ static void CheckPartitionColumn(Oid relationId, Node *whereClause);
static List * ShardsMatchingDeleteCriteria(Oid relationId, List *shardList,
Node *deleteCriteria);
static int DropShards(Oid relationId, char *schemaName, char *relationName,
List *deletableShardIntervalList);
List *deletableShardIntervalList, bool dropShardsMetadataOnly);
static List * DropTaskList(Oid relationId, char *schemaName, char *relationName,
List *deletableShardIntervalList);
static void ExecuteDropShardPlacementCommandRemotely(ShardPlacement *shardPlacement,
@ -193,8 +193,10 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
deleteCriteria);
}
bool dropShardsMetadataOnly = false;
int droppedShardCount = DropShards(relationId, schemaName, relationName,
deletableShardIntervalList);
deletableShardIntervalList,
dropShardsMetadataOnly);
PG_RETURN_INT32(droppedShardCount);
}
@ -213,6 +215,7 @@ citus_drop_all_shards(PG_FUNCTION_ARGS)
Oid relationId = PG_GETARG_OID(0);
text *schemaNameText = PG_GETARG_TEXT_P(1);
text *relationNameText = PG_GETARG_TEXT_P(2);
bool dropShardsMetadataOnly = PG_GETARG_BOOL(3);
char *schemaName = text_to_cstring(schemaNameText);
char *relationName = text_to_cstring(relationNameText);
@ -239,7 +242,7 @@ citus_drop_all_shards(PG_FUNCTION_ARGS)
List *shardIntervalList = LoadShardIntervalList(relationId);
int droppedShardCount = DropShards(relationId, schemaName, relationName,
shardIntervalList);
shardIntervalList, dropShardsMetadataOnly);
PG_RETURN_INT32(droppedShardCount);
}
@ -251,7 +254,25 @@ citus_drop_all_shards(PG_FUNCTION_ARGS)
Datum
master_drop_all_shards(PG_FUNCTION_ARGS)
{
return citus_drop_all_shards(fcinfo);
Oid relationId = PG_GETARG_OID(0);
text *schemaNameText = PG_GETARG_TEXT_P(1);
text *relationNameText = PG_GETARG_TEXT_P(2);
bool dropShardsMetadataOnly = false;
LOCAL_FCINFO(local_fcinfo, 4);
InitFunctionCallInfoData(*local_fcinfo, NULL, 4, InvalidOid, NULL, NULL);
local_fcinfo->args[0].value = ObjectIdGetDatum(relationId);
local_fcinfo->args[0].isnull = false;
local_fcinfo->args[1].value = PointerGetDatum(schemaNameText);
local_fcinfo->args[1].isnull = false;
local_fcinfo->args[2].value = PointerGetDatum(relationNameText);
local_fcinfo->args[2].isnull = false;
local_fcinfo->args[3].value = BoolGetDatum(dropShardsMetadataOnly);
local_fcinfo->args[3].isnull = false;
return citus_drop_all_shards(local_fcinfo);
}
@ -299,10 +320,13 @@ CheckTableSchemaNameForDrop(Oid relationId, char **schemaName, char **tableName)
*
* We mark shard placements that we couldn't drop as to be deleted later, but
* we do delete the shard metadadata.
*
* If dropShardsMetadataOnly is true, then we don't send remote commands to drop the shards:
* we only remove pg_dist_placement and pg_dist_shard rows.
*/
static int
DropShards(Oid relationId, char *schemaName, char *relationName,
List *deletableShardIntervalList)
List *deletableShardIntervalList, bool dropShardsMetadataOnly)
{
Assert(OidIsValid(relationId));
Assert(schemaName != NULL);
@ -345,51 +369,58 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
bool isLocalShardPlacement = (shardPlacementGroupId == localGroupId);
if (isLocalShardPlacement && DropSchemaOrDBInProgress() &&
localGroupId == COORDINATOR_GROUP_ID)
{
/*
* The active DROP SCHEMA/DATABASE ... CASCADE will drop the
* shard, if we try to drop it over another connection, we will
* get into a distributed deadlock. Hence, just delete the shard
* placement metadata and skip it for now.
*/
DeleteShardPlacementRow(shardPlacementId);
continue;
}
/*
* If this variable is true, that means the active DROP SCHEMA/DATABASE ... CASCADE
* will drop the shard. If we try to drop it over another connection, we will
* get into a distributed deadlock. Hence, if this variable is true we should just
* delete the shard placement metadata and skip dropping the shard for now.
*/
bool skipIfDropSchemaOrDBInProgress = isLocalShardPlacement &&
DropSchemaOrDBInProgress() &&
localGroupId == COORDINATOR_GROUP_ID;
/*
* If it is a local placement of a distributed table or a reference table,
* then execute the DROP command locally.
* We want to send commands to drop shards when both
* skipIfDropSchemaOrDBInProgress and dropShardsMetadataOnly are false.
*/
if (isLocalShardPlacement && shouldExecuteTasksLocally)
{
List *singleTaskList = list_make1(task);
bool applyRemoteShardsDrop =
!skipIfDropSchemaOrDBInProgress && !dropShardsMetadataOnly;
ExecuteLocalUtilityTaskList(singleTaskList);
}
else
if (applyRemoteShardsDrop)
{
/*
* Either it was not a local placement or we could not use
* local execution even if it was a local placement.
* If it is the second case, then it is possibly because in
* current transaction, some commands or queries connected
* to local group as well.
*
* Regardless of the node is a remote node or the current node,
* try to open a new connection (or use an existing one) to
* connect to that node to drop the shard placement over that
* remote connection.
* If it is a local placement of a distributed table or a reference table,
* then execute the DROP command locally.
*/
const char *dropShardPlacementCommand = TaskQueryString(task);
ExecuteDropShardPlacementCommandRemotely(shardPlacement,
relationName,
dropShardPlacementCommand);
if (isLocalShardPlacement)
if (isLocalShardPlacement && shouldExecuteTasksLocally)
{
SetLocalExecutionStatus(LOCAL_EXECUTION_DISABLED);
List *singleTaskList = list_make1(task);
ExecuteLocalUtilityTaskList(singleTaskList);
}
else
{
/*
* Either it was not a local placement or we could not use
* local execution even if it was a local placement.
* If it is the second case, then it is possibly because in
* current transaction, some commands or queries connected
* to local group as well.
*
* Regardless of the node is a remote node or the current node,
* try to open a new connection (or use an existing one) to
* connect to that node to drop the shard placement over that
* remote connection.
*/
const char *dropShardPlacementCommand = TaskQueryString(task);
ExecuteDropShardPlacementCommandRemotely(shardPlacement,
relationName,
dropShardPlacementCommand);
if (isLocalShardPlacement)
{
SetLocalExecutionStatus(LOCAL_EXECUTION_DISABLED);
}
}
}

View File

@ -16,3 +16,15 @@ ALTER TABLE pg_catalog.pg_dist_placement ADD CONSTRAINT placement_shardid_groupi
#include "udfs/citus_internal_update_placement_metadata/10.2-1.sql";
#include "udfs/citus_internal_delete_shard_metadata/10.2-1.sql";
#include "udfs/citus_internal_update_relation_colocation/10.2-1.sql";
DROP FUNCTION pg_catalog.citus_drop_all_shards(regclass, text, text);
CREATE FUNCTION pg_catalog.citus_drop_all_shards(logicalrelid regclass,
schema_name text,
table_name text,
drop_shards_metadata_only boolean default false)
RETURNS integer
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_drop_all_shards$$;
COMMENT ON FUNCTION pg_catalog.citus_drop_all_shards(regclass, text, text, boolean)
IS 'drop all shards in a relation and update metadata';
#include "udfs/citus_drop_trigger/10.2-1.sql";

View File

@ -20,3 +20,14 @@ DROP FUNCTION pg_catalog.citus_internal_update_relation_colocation(oid, integer)
REVOKE ALL ON FUNCTION pg_catalog.worker_record_sequence_dependency(regclass,regclass,name) FROM PUBLIC;
ALTER TABLE pg_catalog.pg_dist_placement DROP CONSTRAINT placement_shardid_groupid_unique_index;
DROP FUNCTION pg_catalog.citus_drop_all_shards(regclass, text, text, boolean);
CREATE FUNCTION pg_catalog.citus_drop_all_shards(logicalrelid regclass,
schema_name text,
table_name text)
RETURNS integer
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$master_drop_all_shards$$;
COMMENT ON FUNCTION pg_catalog.citus_drop_all_shards(regclass, text, text)
IS 'drop all shards in a relation and update metadata';
#include "../udfs/citus_drop_trigger/10.0-1.sql"

View File

@ -0,0 +1,57 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger()
RETURNS event_trigger
LANGUAGE plpgsql
SET search_path = pg_catalog
AS $cdbdt$
DECLARE
constraint_event_count INTEGER;
v_obj record;
dropped_table_is_a_partition boolean := false;
BEGIN
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
WHERE object_type IN ('table', 'foreign table')
LOOP
-- first drop the table and metadata on the workers
-- then drop all the shards on the workers
-- finally remove the pg_dist_partition entry on the coordinator
PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name);
-- If both original and normal values are false, the dropped table was a partition
-- that was dropped as a result of its parent being dropped
-- NOTE: the other way around is not true:
-- the table being a partition doesn't imply both original and normal values are false
SELECT (v_obj.original = false AND v_obj.normal = false) INTO dropped_table_is_a_partition;
-- The partition's shards will be dropped when dropping the parent's shards, so we can skip:
-- i.e. we call citus_drop_all_shards with drop_shards_metadata_only parameter set to true
IF dropped_table_is_a_partition
THEN
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := true);
ELSE
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false);
END IF;
PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name);
END LOOP;
-- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
LOOP
PERFORM master_unmark_object_distributed(v_obj.classid, v_obj.objid, v_obj.objsubid);
END LOOP;
SELECT COUNT(*) INTO constraint_event_count
FROM pg_event_trigger_dropped_objects()
WHERE object_type IN ('table constraint');
IF constraint_event_count > 0
THEN
-- Tell utility hook that a table constraint is dropped so we might
-- need to undistribute some of the citus local tables that are not
-- connected to any reference tables.
PERFORM notify_constraint_dropped();
END IF;
END;
$cdbdt$;
COMMENT ON FUNCTION pg_catalog.citus_drop_trigger()
IS 'perform checks and actions at the end of DROP actions';

View File

@ -1,8 +1,3 @@
CREATE OR REPLACE FUNCTION pg_catalog.notify_constraint_dropped()
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$notify_constraint_dropped$$;
CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger()
RETURNS event_trigger
LANGUAGE plpgsql
@ -11,9 +6,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger()
DECLARE
constraint_event_count INTEGER;
v_obj record;
sequence_names text[] := '{}';
table_colocation_id integer;
propagate_drop boolean := false;
dropped_table_is_a_partition boolean := false;
BEGIN
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
WHERE object_type IN ('table', 'foreign table')
@ -22,7 +15,22 @@ BEGIN
-- then drop all the shards on the workers
-- finally remove the pg_dist_partition entry on the coordinator
PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name);
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name);
-- If both original and normal values are false, the dropped table was a partition
-- that was dropped as a result of its parent being dropped
-- NOTE: the other way around is not true:
-- the table being a partition doesn't imply both original and normal values are false
SELECT (v_obj.original = false AND v_obj.normal = false) INTO dropped_table_is_a_partition;
-- The partition's shards will be dropped when dropping the parent's shards, so we can skip:
-- i.e. we call citus_drop_all_shards with drop_shards_metadata_only parameter set to true
IF dropped_table_is_a_partition
THEN
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := true);
ELSE
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false);
END IF;
PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name);
END LOOP;

View File

@ -0,0 +1,383 @@
--
-- DROP_PARTITIONED_TABLE
--
-- Tests to make sure that we properly drop distributed partitioned tables
--
SET citus.next_shard_id TO 720000;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
CREATE SCHEMA drop_partitioned_table;
SET search_path = drop_partitioned_table;
-- create a function that allows us to see the values of
-- original and normal values for each dropped table
-- coming from pg_event_trigger_dropped_objects()
-- for now the only case where we can distinguish a
-- dropped partition because of its dropped parent
-- is when both values are false: check citus_drop_trigger
CREATE FUNCTION check_original_normal_values()
RETURNS event_trigger LANGUAGE plpgsql AS $$
DECLARE
v_obj record;
BEGIN
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
WHERE object_type IN ('table', 'foreign table')
LOOP
RAISE NOTICE 'dropped object: %.% original: % normal: %',
v_obj.schema_name,
v_obj.object_name,
v_obj.original,
v_obj.normal;
END LOOP;
END;
$$;
CREATE EVENT TRIGGER new_trigger_for_drops
ON sql_drop
EXECUTE FUNCTION check_original_normal_values();
-- create a view printing the same output as \d for this test's schemas
-- since \d output is not guaranteed to be consistent between different PG versions etc
CREATE VIEW tables_info AS
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'p' THEN 'partitioned table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_user u ON u.usesysid = c.relowner
WHERE n.nspname IN ('drop_partitioned_table', 'schema1')
AND c.relkind IN ('r','p')
ORDER BY 1, 2;
\c - - - :worker_1_port
CREATE SCHEMA drop_partitioned_table;
SET search_path = drop_partitioned_table;
CREATE VIEW tables_info AS
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'p' THEN 'partitioned table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_user u ON u.usesysid = c.relowner
WHERE n.nspname IN ('drop_partitioned_table', 'schema1')
AND c.relkind IN ('r','p')
ORDER BY 1, 2;
\c - - - :master_port
SET search_path = drop_partitioned_table;
SET citus.next_shard_id TO 720000;
-- CASE 1
-- Dropping the parent table
CREATE TABLE parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
SELECT create_distributed_table('parent','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
drop_partitioned_table | child1 | table | postgres
drop_partitioned_table | child2 | table | postgres
drop_partitioned_table | parent | partitioned table | postgres
(3 rows)
\set VERBOSITY terse
DROP TABLE parent;
NOTICE: dropped object: drop_partitioned_table.parent original: t normal: f
NOTICE: dropped object: drop_partitioned_table.child1 original: f normal: f
NOTICE: dropped object: drop_partitioned_table.child2 original: f normal: f
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :worker_1_port
SET search_path = drop_partitioned_table;
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
SET search_path = drop_partitioned_table;
SET citus.next_shard_id TO 720000;
-- CASE 2
-- Dropping the parent table, but including children in the DROP command
CREATE TABLE parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
SELECT create_distributed_table('parent','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
drop_partitioned_table | child1 | table | postgres
drop_partitioned_table | child2 | table | postgres
drop_partitioned_table | parent | partitioned table | postgres
(3 rows)
\set VERBOSITY terse
DROP TABLE child1, parent, child2;
NOTICE: dropped object: drop_partitioned_table.parent original: t normal: f
NOTICE: dropped object: drop_partitioned_table.child2 original: t normal: f
NOTICE: dropped object: drop_partitioned_table.child1 original: t normal: f
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :worker_1_port
SET search_path = drop_partitioned_table;
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
SET search_path = drop_partitioned_table;
SET citus.next_shard_id TO 720000;
-- CASE 3
-- DROP OWNED BY role1; Only parent is owned by role1, children are owned by another owner
SET client_min_messages TO warning;
SET citus.enable_ddl_propagation TO off;
CREATE ROLE role1;
RESET client_min_messages;
RESET citus.enable_ddl_propagation;
SELECT run_command_on_workers('CREATE ROLE role1');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(2 rows)
GRANT ALL ON SCHEMA drop_partitioned_table TO role1;
SET ROLE role1;
CREATE TABLE drop_partitioned_table.parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
RESET ROLE;
CREATE TABLE child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
SELECT create_distributed_table('parent','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
drop_partitioned_table | child1 | table | postgres
drop_partitioned_table | child2 | table | postgres
drop_partitioned_table | parent | partitioned table | role1
(3 rows)
\set VERBOSITY terse
DROP OWNED BY role1;
NOTICE: dropped object: drop_partitioned_table.parent original: t normal: f
NOTICE: dropped object: drop_partitioned_table.child1 original: f normal: f
NOTICE: dropped object: drop_partitioned_table.child2 original: f normal: f
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :worker_1_port
SET search_path = drop_partitioned_table;
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
SET search_path = drop_partitioned_table;
SET citus.next_shard_id TO 720000;
-- CASE 4
-- DROP OWNED BY role1; Parent and children are owned by role1
GRANT ALL ON SCHEMA drop_partitioned_table TO role1;
SET ROLE role1;
CREATE TABLE drop_partitioned_table.parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE drop_partitioned_table.child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE drop_partitioned_table.parent ATTACH PARTITION drop_partitioned_table.child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE drop_partitioned_table.child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE drop_partitioned_table.parent ATTACH PARTITION drop_partitioned_table.child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
RESET ROLE;
SELECT create_distributed_table('parent','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
drop_partitioned_table | child1 | table | role1
drop_partitioned_table | child2 | table | role1
drop_partitioned_table | parent | partitioned table | role1
(3 rows)
\set VERBOSITY terse
DROP OWNED BY role1;
NOTICE: dropped object: drop_partitioned_table.parent original: t normal: f
NOTICE: dropped object: drop_partitioned_table.child1 original: t normal: f
NOTICE: dropped object: drop_partitioned_table.child2 original: t normal: f
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :worker_1_port
SET search_path = drop_partitioned_table;
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
SET search_path = drop_partitioned_table;
SET citus.next_shard_id TO 720000;
REVOKE ALL ON SCHEMA drop_partitioned_table FROM role1;
DROP ROLE role1;
SELECT run_command_on_workers('DROP ROLE IF EXISTS role1');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(2 rows)
-- CASE 5
-- DROP SCHEMA schema1 CASCADE; Parent is in schema1, children are in another schema
CREATE SCHEMA schema1;
CREATE TABLE schema1.parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE schema1.parent ATTACH PARTITION child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE schema1.parent ATTACH PARTITION child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
SELECT create_distributed_table('schema1.parent','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SET search_path = drop_partitioned_table, schema1;
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
drop_partitioned_table | child1 | table | postgres
drop_partitioned_table | child2 | table | postgres
schema1 | parent | partitioned table | postgres
(3 rows)
\set VERBOSITY terse
DROP SCHEMA schema1 CASCADE;
NOTICE: drop cascades to table parent
NOTICE: dropped object: schema1.parent original: f normal: t
NOTICE: dropped object: drop_partitioned_table.child1 original: f normal: f
NOTICE: dropped object: drop_partitioned_table.child2 original: f normal: f
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :worker_1_port
SET search_path = drop_partitioned_table, schema1;
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
SET citus.next_shard_id TO 720000;
-- CASE 6
-- DROP SCHEMA schema1 CASCADE; Parent and children are in schema1
CREATE SCHEMA schema1;
CREATE TABLE schema1.parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE schema1.child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE schema1.parent ATTACH PARTITION schema1.child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE schema1.child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE schema1.parent ATTACH PARTITION schema1.child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
SELECT create_distributed_table('schema1.parent','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SET search_path = drop_partitioned_table, schema1;
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
schema1 | child1 | table | postgres
schema1 | child2 | table | postgres
schema1 | parent | partitioned table | postgres
(3 rows)
\set VERBOSITY terse
DROP SCHEMA schema1 CASCADE;
NOTICE: drop cascades to table parent
NOTICE: dropped object: schema1.parent original: f normal: t
NOTICE: dropped object: schema1.child1 original: f normal: t
NOTICE: dropped object: schema1.child2 original: f normal: t
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :worker_1_port
SET search_path = drop_partitioned_table, schema1;
SELECT * FROM drop_partitioned_table.tables_info;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
SET search_path = drop_partitioned_table;
-- Check that we actually skip sending remote commands to skip shards
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 720000;
DROP EVENT TRIGGER new_trigger_for_drops;
-- Case 1 - we should skip
CREATE TABLE parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
SELECT create_distributed_table('parent','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
BEGIN;
SET citus.log_remote_commands TO on;
DROP TABLE parent;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE
ROLLBACK;
NOTICE: issuing ROLLBACK
-- Case 2 - we shouldn't skip
BEGIN;
SET citus.log_remote_commands TO on;
DROP TABLE parent, child1;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE
NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.child1_xxxxx CASCADE
ROLLBACK;
NOTICE: issuing ROLLBACK
DROP SCHEMA drop_partitioned_table CASCADE;
NOTICE: drop cascades to 3 other objects
SELECT run_command_on_workers('DROP SCHEMA IF EXISTS drop_partitioned_table CASCADE');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
SET search_path TO public;

View File

@ -814,50 +814,6 @@ NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_xxxxx CASCADE
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_xxxxx CASCADE
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_xxxxx CASCADE
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500197" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500200" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500203" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500206" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500209" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500212" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500215" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500218" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500221" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500224" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2012_xxxxx CASCADE
NOTICE: table "partitioning_test_2012_1500227" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500229" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500232" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500235" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500238" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500241" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500244" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500247" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500250" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500253" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500256" does not exist, skipping
NOTICE: executing the command locally: DROP TABLE IF EXISTS local_commands_test_schema.partitioning_test_2013_xxxxx CASCADE
NOTICE: table "partitioning_test_2013_1500259" does not exist, skipping
ROLLBACK;
-- below should be executed via remote connections
TRUNCATE partitioning_test;

View File

@ -793,19 +793,21 @@ SELECT * FROM multi_extension.print_extension_changes();
-- Snapshot of state at 10.2-1
ALTER EXTENSION citus UPDATE TO '10.2-1';
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
previous_object | current_object
---------------------------------------------------------------------
function stop_metadata_sync_to_node(text,integer) void |
| function citus_internal.downgrade_columnar_storage(regclass) void
| function citus_internal.upgrade_columnar_storage(regclass) void
| function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") void
| function citus_internal_add_placement_metadata(bigint,integer,bigint,integer,bigint) void
| function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text) void
| function citus_internal_delete_shard_metadata(bigint) void
| function citus_internal_update_placement_metadata(bigint,integer,integer) void
| function citus_internal_update_relation_colocation(oid,integer) void
| function stop_metadata_sync_to_node(text,integer,boolean) void
(10 rows)
function citus_drop_all_shards(regclass,text,text) integer |
function stop_metadata_sync_to_node(text,integer) void |
| function citus_drop_all_shards(regclass,text,text,boolean) integer
| function citus_internal.downgrade_columnar_storage(regclass) void
| function citus_internal.upgrade_columnar_storage(regclass) void
| function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") void
| function citus_internal_add_placement_metadata(bigint,integer,bigint,integer,bigint) void
| function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text) void
| function citus_internal_delete_shard_metadata(bigint) void
| function citus_internal_update_placement_metadata(bigint,integer,integer) void
| function citus_internal_update_relation_colocation(oid,integer) void
| function stop_metadata_sync_to_node(text,integer,boolean) void
(12 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version

View File

@ -2091,20 +2091,20 @@ SET client_min_messages TO DEBUG1;
DROP TABLE partitioning_test, reference_table;
DEBUG: switching to sequential query execution mode
DETAIL: Table "<dropped>" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name)"
PL/pgSQL function citus_drop_trigger() line 16 at PERFORM
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line 25 at PERFORM
DEBUG: drop cascades to 2 other objects
DETAIL: drop cascades to constraint partitioning_reference_fkey_1660302 on table partitioning_schema.partitioning_test_1660302
drop cascades to constraint partitioning_reference_fkey_1660304 on table partitioning_schema.partitioning_test_1660304
DETAIL: from localhost:xxxxx
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name)"
PL/pgSQL function citus_drop_trigger() line 16 at PERFORM
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line 25 at PERFORM
DEBUG: drop cascades to 2 other objects
DETAIL: drop cascades to constraint partitioning_reference_fkey_1660303 on table partitioning_schema.partitioning_test_1660303
drop cascades to constraint partitioning_reference_fkey_1660305 on table partitioning_schema.partitioning_test_1660305
DETAIL: from localhost:xxxxx
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name)"
PL/pgSQL function citus_drop_trigger() line 16 at PERFORM
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line 25 at PERFORM
RESET client_min_messages;
RESET SEARCH_PATH;
-- not timestamp partitioned

View File

@ -51,7 +51,7 @@ ORDER BY 1;
function citus_dist_shard_cache_invalidate()
function citus_dist_stat_activity()
function citus_drain_node(text,integer,citus.shard_transfer_mode,name)
function citus_drop_all_shards(regclass,text,text)
function citus_drop_all_shards(regclass,text,text,boolean)
function citus_drop_trigger()
function citus_executor_name(integer)
function citus_extradata_container(internal)

View File

@ -66,6 +66,7 @@ test: ensure_no_intermediate_data_leak
# Tests for partitioning support
# ----------
test: multi_partitioning_utils multi_partitioning partitioning_issue_3970 replicated_partitioned_table
test: drop_partitioned_table
# ----------
# Tests for foreign data wrapper support

View File

@ -0,0 +1,246 @@
--
-- DROP_PARTITIONED_TABLE
--
-- Tests to make sure that we properly drop distributed partitioned tables
--
SET citus.next_shard_id TO 720000;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
CREATE SCHEMA drop_partitioned_table;
SET search_path = drop_partitioned_table;
-- create a function that allows us to see the values of
-- original and normal values for each dropped table
-- coming from pg_event_trigger_dropped_objects()
-- for now the only case where we can distinguish a
-- dropped partition because of its dropped parent
-- is when both values are false: check citus_drop_trigger
CREATE FUNCTION check_original_normal_values()
RETURNS event_trigger LANGUAGE plpgsql AS $$
DECLARE
v_obj record;
BEGIN
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
WHERE object_type IN ('table', 'foreign table')
LOOP
RAISE NOTICE 'dropped object: %.% original: % normal: %',
v_obj.schema_name,
v_obj.object_name,
v_obj.original,
v_obj.normal;
END LOOP;
END;
$$;
CREATE EVENT TRIGGER new_trigger_for_drops
ON sql_drop
EXECUTE FUNCTION check_original_normal_values();
-- create a view printing the same output as \d for this test's schemas
-- since \d output is not guaranteed to be consistent between different PG versions etc
CREATE VIEW tables_info AS
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'p' THEN 'partitioned table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_user u ON u.usesysid = c.relowner
WHERE n.nspname IN ('drop_partitioned_table', 'schema1')
AND c.relkind IN ('r','p')
ORDER BY 1, 2;
\c - - - :worker_1_port
CREATE SCHEMA drop_partitioned_table;
SET search_path = drop_partitioned_table;
CREATE VIEW tables_info AS
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'p' THEN 'partitioned table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_user u ON u.usesysid = c.relowner
WHERE n.nspname IN ('drop_partitioned_table', 'schema1')
AND c.relkind IN ('r','p')
ORDER BY 1, 2;
\c - - - :master_port
SET search_path = drop_partitioned_table;
SET citus.next_shard_id TO 720000;
-- CASE 1
-- Dropping the parent table
CREATE TABLE parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
SELECT create_distributed_table('parent','x');
SELECT * FROM drop_partitioned_table.tables_info;
\set VERBOSITY terse
DROP TABLE parent;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :worker_1_port
SET search_path = drop_partitioned_table;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :master_port
SET search_path = drop_partitioned_table;
SET citus.next_shard_id TO 720000;
-- CASE 2
-- Dropping the parent table, but including children in the DROP command
CREATE TABLE parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
SELECT create_distributed_table('parent','x');
SELECT * FROM drop_partitioned_table.tables_info;
\set VERBOSITY terse
DROP TABLE child1, parent, child2;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :worker_1_port
SET search_path = drop_partitioned_table;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :master_port
SET search_path = drop_partitioned_table;
SET citus.next_shard_id TO 720000;
-- CASE 3
-- DROP OWNED BY role1; Only parent is owned by role1, children are owned by another owner
SET client_min_messages TO warning;
SET citus.enable_ddl_propagation TO off;
CREATE ROLE role1;
RESET client_min_messages;
RESET citus.enable_ddl_propagation;
SELECT run_command_on_workers('CREATE ROLE role1');
GRANT ALL ON SCHEMA drop_partitioned_table TO role1;
SET ROLE role1;
CREATE TABLE drop_partitioned_table.parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
RESET ROLE;
CREATE TABLE child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
SELECT create_distributed_table('parent','x');
SELECT * FROM drop_partitioned_table.tables_info;
\set VERBOSITY terse
DROP OWNED BY role1;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :worker_1_port
SET search_path = drop_partitioned_table;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :master_port
SET search_path = drop_partitioned_table;
SET citus.next_shard_id TO 720000;
-- CASE 4
-- DROP OWNED BY role1; Parent and children are owned by role1
GRANT ALL ON SCHEMA drop_partitioned_table TO role1;
SET ROLE role1;
CREATE TABLE drop_partitioned_table.parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE drop_partitioned_table.child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE drop_partitioned_table.parent ATTACH PARTITION drop_partitioned_table.child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE drop_partitioned_table.child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE drop_partitioned_table.parent ATTACH PARTITION drop_partitioned_table.child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
RESET ROLE;
SELECT create_distributed_table('parent','x');
SELECT * FROM drop_partitioned_table.tables_info;
\set VERBOSITY terse
DROP OWNED BY role1;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :worker_1_port
SET search_path = drop_partitioned_table;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :master_port
SET search_path = drop_partitioned_table;
SET citus.next_shard_id TO 720000;
REVOKE ALL ON SCHEMA drop_partitioned_table FROM role1;
DROP ROLE role1;
SELECT run_command_on_workers('DROP ROLE IF EXISTS role1');
-- CASE 5
-- DROP SCHEMA schema1 CASCADE; Parent is in schema1, children are in another schema
CREATE SCHEMA schema1;
CREATE TABLE schema1.parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE schema1.parent ATTACH PARTITION child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE schema1.parent ATTACH PARTITION child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
SELECT create_distributed_table('schema1.parent','x');
SET search_path = drop_partitioned_table, schema1;
SELECT * FROM drop_partitioned_table.tables_info;
\set VERBOSITY terse
DROP SCHEMA schema1 CASCADE;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :worker_1_port
SET search_path = drop_partitioned_table, schema1;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :master_port
SET citus.next_shard_id TO 720000;
-- CASE 6
-- DROP SCHEMA schema1 CASCADE; Parent and children are in schema1
CREATE SCHEMA schema1;
CREATE TABLE schema1.parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE schema1.child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE schema1.parent ATTACH PARTITION schema1.child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
CREATE TABLE schema1.child2 (x text, t timestamptz DEFAULT now());
ALTER TABLE schema1.parent ATTACH PARTITION schema1.child2 FOR VALUES FROM ('2021-06-30') TO ('2021-07-01');
SELECT create_distributed_table('schema1.parent','x');
SET search_path = drop_partitioned_table, schema1;
SELECT * FROM drop_partitioned_table.tables_info;
\set VERBOSITY terse
DROP SCHEMA schema1 CASCADE;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :worker_1_port
SET search_path = drop_partitioned_table, schema1;
SELECT * FROM drop_partitioned_table.tables_info;
\c - - - :master_port
SET search_path = drop_partitioned_table;
-- Check that we actually skip sending remote commands to skip shards
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 720000;
DROP EVENT TRIGGER new_trigger_for_drops;
-- Case 1 - we should skip
CREATE TABLE parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t);
CREATE TABLE child1 (x text, t timestamptz DEFAULT now());
ALTER TABLE parent ATTACH PARTITION child1 FOR VALUES FROM ('2021-05-31') TO ('2021-06-01');
SELECT create_distributed_table('parent','x');
BEGIN;
SET citus.log_remote_commands TO on;
DROP TABLE parent;
ROLLBACK;
-- Case 2 - we shouldn't skip
BEGIN;
SET citus.log_remote_commands TO on;
DROP TABLE parent, child1;
ROLLBACK;
DROP SCHEMA drop_partitioned_table CASCADE;
SELECT run_command_on_workers('DROP SCHEMA IF EXISTS drop_partitioned_table CASCADE');
SET search_path TO public;