Propagate dependent views upon distribution (#5950)

pull/5967/head^2
Ahmet Gedemenli 2022-05-26 14:23:45 +03:00 committed by GitHub
parent 74ce210f8b
commit 26d927178c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 774 additions and 53 deletions

View File

@ -194,7 +194,6 @@ static void EnsureTableNotPartition(Oid relationId);
static TableConversionState * CreateTableConversion(TableConversionParameters *params);
static void CreateDistributedTableLike(TableConversionState *con);
static void CreateCitusTableLike(TableConversionState *con);
static List * GetViewCreationCommandsOfTable(Oid relationId);
static void ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
bool suppressNoticeMessages);
static bool HasAnyGeneratedStoredColumns(Oid relationId);
@ -574,8 +573,9 @@ ConvertTable(TableConversionState *con)
List *justBeforeDropCommands = NIL;
List *attachPartitionCommands = NIL;
postLoadCommands = list_concat(postLoadCommands,
GetViewCreationCommandsOfTable(con->relationId));
postLoadCommands =
list_concat(postLoadCommands,
GetViewCreationTableDDLCommandsOfTable(con->relationId));
List *foreignKeyCommands = NIL;
if (con->conversionType == ALTER_DISTRIBUTED_TABLE)
@ -1281,13 +1281,33 @@ GetViewCreationCommandsOfTable(Oid relationId)
char *alterViewCommmand = AlterViewOwnerCommand(viewOid);
appendStringInfoString(query, alterViewCommmand);
commands = lappend(commands, makeTableDDLCommandString(query->data));
commands = lappend(commands, query->data);
}
return commands;
}
/*
* GetViewCreationTableDDLCommandsOfTable is the same as GetViewCreationCommandsOfTable,
* but the returned list includes objects of TableDDLCommand's, not strings.
*/
List *
GetViewCreationTableDDLCommandsOfTable(Oid relationId)
{
List *commands = GetViewCreationCommandsOfTable(relationId);
List *tableDDLCommands = NIL;
char *command = NULL;
foreach_ptr(command, commands)
{
tableDDLCommands = lappend(tableDDLCommands, makeTableDDLCommandString(command));
}
return tableDDLCommands;
}
/*
* CreateMaterializedViewDDLCommand creates the command to create materialized view.
* Note that this function doesn't support

View File

@ -512,6 +512,43 @@ ExecuteCascadeOperationForRelationIdList(List *relationIdList,
}
/*
* ExecuteAndLogUtilityCommandListInTableTypeConversion is a wrapper function
* around ExecuteAndLogUtilityCommandList, that makes it execute with the flag
* InTableTypeConversionFunctionCall is set to true.
*/
void
ExecuteAndLogUtilityCommandListInTableTypeConversion(List *utilityCommandList)
{
bool oldValue = InTableTypeConversionFunctionCall;
InTableTypeConversionFunctionCall = true;
MemoryContext savedMemoryContext = CurrentMemoryContext;
PG_TRY();
{
ExecuteAndLogUtilityCommandList(utilityCommandList);
}
PG_CATCH();
{
InTableTypeConversionFunctionCall = oldValue;
MemoryContextSwitchTo(savedMemoryContext);
ErrorData *errorData = CopyErrorData();
FlushErrorState();
if (errorData->elevel != ERROR)
{
PG_RE_THROW();
}
ThrowErrorData(errorData);
}
PG_END_TRY();
InTableTypeConversionFunctionCall = oldValue;
}
/*
* ExecuteAndLogUtilityCommandList takes a list of utility commands and calls
* ExecuteAndLogUtilityCommand function for each of them.

View File

@ -81,7 +81,9 @@ static char * GetRenameShardTriggerCommand(Oid shardRelationId, char *triggerNam
uint64 shardId);
static void DropRelationTruncateTriggers(Oid relationId);
static char * GetDropTriggerCommand(Oid relationId, char *triggerName);
static void DropViewsOnTable(Oid relationId);
static List * GetRenameStatsCommandList(List *statsOidList, uint64 shardId);
static List * ReversedOidList(List *oidList);
static void AppendExplicitIndexIdsToList(Form_pg_index indexForm,
List **explicitIndexIdList,
int flags);
@ -328,6 +330,7 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve
EnsureReferenceTablesExistOnAllNodes();
List *shellTableDDLEvents = GetShellTableDDLEventsForCitusLocalTable(relationId);
List *tableViewCreationCommands = GetViewCreationCommandsOfTable(relationId);
char *relationName = get_rel_name(relationId);
Oid relationSchemaId = get_rel_namespace(relationId);
@ -342,6 +345,12 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve
*/
ExecuteAndLogUtilityCommandList(shellTableDDLEvents);
/*
* Execute the view creation commands with the shell table.
* Views will be distributed via FinalizeCitusLocalTableCreation below.
*/
ExecuteAndLogUtilityCommandListInTableTypeConversion(tableViewCreationCommands);
/*
* Set shellRelationId as the relation with relationId now points
* to the shard relation.
@ -699,6 +708,9 @@ ConvertLocalTableToShard(Oid relationId)
*/
DropRelationTruncateTriggers(relationId);
/* drop views that depend on the shard table */
DropViewsOnTable(relationId);
/*
* We create INSERT|DELETE|UPDATE triggers on shard relation too.
* This is because citus prevents postgres executor to fire those
@ -1019,6 +1031,53 @@ GetDropTriggerCommand(Oid relationId, char *triggerName)
}
/*
* DropViewsOnTable drops the views that depend on the given relation.
*/
static void
DropViewsOnTable(Oid relationId)
{
List *views = GetDependingViews(relationId);
/*
* GetDependingViews returns views in the dependency order. We should drop views
* in the reversed order since dropping views can cascade to other views below.
*/
List *reverseOrderedViews = ReversedOidList(views);
Oid viewId = InvalidOid;
foreach_oid(viewId, reverseOrderedViews)
{
char *viewName = get_rel_name(viewId);
char *schemaName = get_namespace_name(get_rel_namespace(viewId));
char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName);
StringInfo dropCommand = makeStringInfo();
appendStringInfo(dropCommand, "DROP VIEW IF EXISTS %s",
qualifiedViewName);
ExecuteAndLogUtilityCommand(dropCommand->data);
}
}
/*
* ReversedOidList takes a list of oids and returns the reverse ordered version of it.
*/
static List *
ReversedOidList(List *oidList)
{
List *reversed = NIL;
Oid oid = InvalidOid;
foreach_oid(oid, oidList)
{
reversed = lcons_oid(oid, reversed);
}
return reversed;
}
/*
* GetExplicitIndexOidList returns a list of index oids defined "explicitly"
* on the relation with relationId by the "CREATE INDEX" commands. That means,

View File

@ -39,7 +39,6 @@ static List * FilterDistributedExtensions(List *extensionObjectList);
static List * ExtensionNameListToObjectAddressList(List *extensionObjectList);
static void MarkExistingObjectDependenciesDistributedIfSupported(void);
static List * GetAllViews(void);
static bool ShouldMarkRelationDistributedOnUpgrade(Oid relationId);
static bool ShouldPropagateExtensionCommand(Node *parseTree);
static bool IsAlterExtensionSetSchemaCitus(Node *parseTree);
static Node * RecreateExtensionStmt(Oid extensionOid);
@ -513,7 +512,7 @@ MarkExistingObjectDependenciesDistributedIfSupported()
Oid citusTableId = InvalidOid;
foreach_oid(citusTableId, citusTableIdList)
{
if (!ShouldMarkRelationDistributedOnUpgrade(citusTableId))
if (!ShouldMarkRelationDistributed(citusTableId))
{
continue;
}
@ -557,7 +556,7 @@ MarkExistingObjectDependenciesDistributedIfSupported()
Oid viewOid = InvalidOid;
foreach_oid(viewOid, viewList)
{
if (!ShouldMarkRelationDistributedOnUpgrade(viewOid))
if (!ShouldMarkRelationDistributed(viewOid))
{
continue;
}
@ -654,51 +653,6 @@ GetAllViews(void)
}
/*
* ShouldMarkRelationDistributedOnUpgrade is a helper function that
* decides whether the input relation should be marked as distributed
* during the upgrade.
*/
static bool
ShouldMarkRelationDistributedOnUpgrade(Oid relationId)
{
if (!EnableMetadataSync)
{
/*
* Just in case anything goes wrong, we should still be able
* to continue to the version upgrade.
*/
return false;
}
ObjectAddress relationAddress = { 0 };
ObjectAddressSet(relationAddress, RelationRelationId, relationId);
bool pgObject = (relationId < FirstNormalObjectId);
bool ownedByExtension = IsTableOwnedByExtension(relationId);
bool alreadyDistributed = IsObjectDistributed(&relationAddress);
bool hasUnsupportedDependency =
DeferErrorIfHasUnsupportedDependency(&relationAddress) != NULL;
bool hasCircularDependency =
DeferErrorIfCircularDependencyExists(&relationAddress) != NULL;
/*
* pgObject: Citus never marks pg objects as distributed
* ownedByExtension: let extensions manage its own objects
* alreadyDistributed: most likely via earlier versions
* hasUnsupportedDependency: Citus doesn't know how to distribute its dependencies
* hasCircularDependency: Citus cannot handle circular dependencies
*/
if (pgObject || ownedByExtension || alreadyDistributed ||
hasUnsupportedDependency || hasCircularDependency)
{
return false;
}
return true;
}
/*
* PreprocessAlterExtensionContentsStmt issues a notice. It does not propagate.
*/

View File

@ -29,7 +29,9 @@
#include "citus_version.h"
#include "commands/extension.h"
#include "distributed/colocation_utils.h"
#include "distributed/commands.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/metadata/dependency.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata/pg_dist_object.h"
#include "distributed/metadata_cache.h"
@ -220,6 +222,52 @@ MarkObjectDistributedLocally(const ObjectAddress *distAddress)
}
/*
* ShouldMarkRelationDistributed is a helper function that
* decides whether the input relation should be marked as distributed.
*/
bool
ShouldMarkRelationDistributed(Oid relationId)
{
if (!EnableMetadataSync)
{
/*
* Just in case anything goes wrong, we should still be able
* to continue to the version upgrade.
*/
return false;
}
ObjectAddress relationAddress = { 0 };
ObjectAddressSet(relationAddress, RelationRelationId, relationId);
bool pgObject = (relationId < FirstNormalObjectId);
bool isObjectSupported = SupportedDependencyByCitus(&relationAddress);
bool ownedByExtension = IsTableOwnedByExtension(relationId);
bool alreadyDistributed = IsObjectDistributed(&relationAddress);
bool hasUnsupportedDependency =
DeferErrorIfHasUnsupportedDependency(&relationAddress) != NULL;
bool hasCircularDependency =
DeferErrorIfCircularDependencyExists(&relationAddress) != NULL;
/*
* pgObject: Citus never marks pg objects as distributed
* isObjectSupported: Citus does not support propagation of some objects
* ownedByExtension: let extensions manage its own objects
* alreadyDistributed: most likely via earlier versions
* hasUnsupportedDependency: Citus doesn't know how to distribute its dependencies
* hasCircularDependency: Citus cannot handle circular dependencies
*/
if (pgObject || !isObjectSupported || ownedByExtension || alreadyDistributed ||
hasUnsupportedDependency || hasCircularDependency)
{
return false;
}
return true;
}
/*
* CreatePgDistObjectEntryCommand creates command to insert pg_dist_object tuple
* for the given object address.

View File

@ -50,6 +50,7 @@
#include "distributed/metadata_cache.h"
#include "distributed/metadata_sync.h"
#include "distributed/metadata_utility.h"
#include "distributed/metadata/dependency.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata/pg_dist_object.h"
#include "distributed/multi_executor.h"
@ -97,6 +98,7 @@ static char * SchemaOwnerName(Oid objectId);
static bool HasMetadataWorkers(void);
static void CreateShellTableOnWorkers(Oid relationId);
static void CreateTableMetadataOnWorkers(Oid relationId);
static void CreateDependingViewsOnWorkers(Oid relationId);
static NodeMetadataSyncResult SyncNodeMetadataToNodesOptional(void);
static bool ShouldSyncTableMetadataInternal(bool hashDistributed,
bool citusTableWithNoDistKey);
@ -302,7 +304,8 @@ SyncNodeMetadataToNode(const char *nodeNameString, int32 nodePort)
* SyncCitusTableMetadata syncs citus table metadata to worker nodes with metadata.
* Our definition of metadata includes the shell table and its inter relations with
* other shell tables, corresponding pg_dist_object, pg_dist_partiton, pg_dist_shard
* and pg_dist_shard placement entries.
* and pg_dist_shard placement entries. This function also propagates the views that
* depend on the given relation, to the metadata workers.
*/
void
SyncCitusTableMetadata(Oid relationId)
@ -317,6 +320,51 @@ SyncCitusTableMetadata(Oid relationId)
ObjectAddressSet(relationAddress, RelationRelationId, relationId);
MarkObjectDistributed(&relationAddress);
}
CreateDependingViewsOnWorkers(relationId);
}
/*
* CreateDependingViewsOnWorkers takes a relationId and creates the views that depend on
* that relation on workers with metadata. Propagated views are marked as distributed.
*/
static void
CreateDependingViewsOnWorkers(Oid relationId)
{
List *views = GetDependingViews(relationId);
if (list_length(views) < 1)
{
/* no view to propagate */
return;
}
SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION);
Oid viewOid = InvalidOid;
foreach_oid(viewOid, views)
{
if (!ShouldMarkRelationDistributed(viewOid))
{
continue;
}
ObjectAddress viewAddress = { 0 };
ObjectAddressSet(viewAddress, RelationRelationId, viewOid);
EnsureDependenciesExistOnAllNodes(&viewAddress);
char *createViewCommand = CreateViewDDLCommand(viewOid);
char *alterViewOwnerCommand = AlterViewOwnerCommand(viewOid);
SendCommandToWorkersWithMetadata(createViewCommand);
SendCommandToWorkersWithMetadata(alterViewOwnerCommand);
MarkObjectDistributed(&viewAddress);
}
SendCommandToWorkersWithMetadata(ENABLE_DDL_PROPAGATION);
}

View File

@ -181,6 +181,7 @@ extern Oid get_constraint_typid(Oid conoid);
/* extension.c - forward declarations */
extern bool IsDropCitusExtensionStmt(Node *parsetree);
extern bool IsCreateAlterExtensionUpdateCitusStmt(Node *parsetree);
extern bool ShouldMarkRelationDistributed(Oid relationId);
extern void ErrorIfUnstableCreateOrAlterExtensionStmt(Node *parsetree);
extern List * PostprocessCreateExtensionStmt(Node *stmt, const char *queryString);
extern List * PreprocessDropExtensionStmt(Node *stmt, const char *queryString,
@ -533,6 +534,8 @@ extern ObjectAddress AlterViewStmtObjectAddress(Node *node, bool missing_ok);
extern List * PreprocessDropViewStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern char * CreateViewDDLCommand(Oid viewOid);
extern List * GetViewCreationCommandsOfTable(Oid relationId);
extern List * GetViewCreationTableDDLCommandsOfTable(Oid relationId);
extern char * AlterViewOwnerCommand(Oid viewOid);
extern char * DeparseViewStmt(Node *node);
extern char * DeparseDropViewStmt(Node *node);
@ -609,6 +612,8 @@ extern bool RelationIdListHasReferenceTable(List *relationIdList);
extern List * GetFKeyCreationCommandsForRelationIdList(List *relationIdList);
extern void DropRelationForeignKeys(Oid relationId, int flags);
extern void SetLocalEnableLocalReferenceForeignKeys(bool state);
extern void ExecuteAndLogUtilityCommandListInTableTypeConversion(
List *utilityCommandList);
extern void ExecuteAndLogUtilityCommandList(List *ddlCommandList);
extern void ExecuteAndLogUtilityCommand(const char *commandString);
extern void ExecuteForeignKeyCreateCommandList(List *ddlCommandList,

View File

@ -773,6 +773,141 @@ SELECT logicalrelid, partmethod, partkey FROM pg_dist_partition
parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location -1}
(2 rows)
-- some tests for view propagation on citus local tables
CREATE TABLE view_tbl_1 (a int);
CREATE TABLE view_tbl_2 (a int);
CREATE SCHEMA viewsc;
-- create dependent views, in a different schema
-- the first one depends on a citus metadata table
CREATE VIEW viewsc.prop_view AS SELECT COUNT (*) FROM view_tbl_1 JOIN pg_dist_node ON view_tbl_1.a=pg_dist_node.nodeid;
CREATE VIEW viewsc.prop_view2 AS SELECT COUNT (*) FROM view_tbl_1;
SELECT citus_add_local_table_to_metadata('view_tbl_1');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
-- verify the shard view is dropped, and created&propagated the correct view
SELECT viewname, definition FROM pg_views WHERE viewname LIKE 'prop_view%' ORDER BY viewname;
viewname | definition
---------------------------------------------------------------------
prop_view | SELECT count(*) AS count +
| FROM (view_tbl_1 +
| JOIN pg_dist_node ON ((view_tbl_1.a = pg_dist_node.nodeid)));
prop_view2 | SELECT count(*) AS count +
| FROM view_tbl_1;
(2 rows)
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_views WHERE viewname LIKE 'prop_view%';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,2)
(localhost,57638,t,2)
(2 rows)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid IN('viewsc.prop_view'::regclass::oid, 'viewsc.prop_view2'::regclass::oid);
pg_identify_object_as_address
---------------------------------------------------------------------
(view,"{viewsc,prop_view2}",{})
(view,"{viewsc,prop_view}",{})
(2 rows)
-- drop views
DROP VIEW viewsc.prop_view;
DROP VIEW viewsc.prop_view2;
-- verify dropped on workers
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_views WHERE viewname LIKE 'prop_view%';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
-- create a view that depends on a pg_ table
CREATE VIEW viewsc.prop_view3 AS SELECT COUNT (*) FROM view_tbl_1 JOIN pg_namespace ON view_tbl_1.a=pg_namespace.nspowner;
-- create a view that depends on two different tables, one of them is local for now
CREATE VIEW viewsc.prop_view4 AS SELECT COUNT (*) FROM view_tbl_1 JOIN view_tbl_2 ON view_tbl_1.a=view_tbl_2.a;
-- distribute the first table
SELECT create_distributed_table('view_tbl_1','a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- verify the last view is not distributed
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_views WHERE viewname LIKE 'prop_view%';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
-- add the other table to metadata, so the local view gets distributed
SELECT citus_add_local_table_to_metadata('view_tbl_2');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
-- verify both views are distributed
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_views WHERE viewname LIKE 'prop_view%';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,2)
(localhost,57638,t,2)
(2 rows)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid IN('viewsc.prop_view3'::regclass::oid, 'viewsc.prop_view4'::regclass::oid);
pg_identify_object_as_address
---------------------------------------------------------------------
(view,"{viewsc,prop_view3}",{})
(view,"{viewsc,prop_view4}",{})
(2 rows)
-- test with fkey cascading
create table ref_tb(a int primary key);
SELECT create_reference_table('ref_tb');
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE loc_tb (a int );
CREATE VIEW v100 AS SELECT * FROM loc_tb;
CREATE VIEW v101 AS SELECT * FROM loc_tb JOIN ref_tb USING (a);
CREATE VIEW v102 AS SELECT * FROM v101;
ALTER TABLE loc_tb ADD CONSTRAINT fkey FOREIGN KEY (a) references ref_tb(a);
-- works fine
select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100, citus_local_tables_mx.v101, citus_local_tables_mx.v102$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
ALTER TABLE loc_tb DROP CONSTRAINT fkey;
-- fails because fkey is dropped and table is converted to local table
select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,f,"ERROR: relation ""citus_local_tables_mx.v100"" does not exist")
(localhost,57638,f,"ERROR: relation ""citus_local_tables_mx.v100"" does not exist")
(2 rows)
select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v101$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,f,"ERROR: relation ""citus_local_tables_mx.v101"" does not exist")
(localhost,57638,f,"ERROR: relation ""citus_local_tables_mx.v101"" does not exist")
(2 rows)
select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v102$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,f,"ERROR: relation ""citus_local_tables_mx.v102"" does not exist")
(localhost,57638,f,"ERROR: relation ""citus_local_tables_mx.v102"" does not exist")
(2 rows)
-- cleanup at exit
set client_min_messages to error;
DROP SCHEMA citus_local_tables_mx CASCADE;

View File

@ -593,6 +593,205 @@ ERROR: cannot run view command because there was a parallel operation on a dist
DETAIL: When running command on/for a distributed view, Citus needs to perform all operations over a single connection per node to ensure consistency.
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK;
-- verify that the views get distributed after the table is distributed
create table table_to_depend_on_1 (a int);
create table table_to_depend_on_2 (a int);
-- the first view depends on a table
create view dependent_view_1 as select count(*) from table_to_depend_on_1;
WARNING: "view dependent_view_1" has dependency to "table table_to_depend_on_1" that is not in Citus' metadata
DETAIL: "view dependent_view_1" will be created only locally
HINT: Distribute "table table_to_depend_on_1" first to distribute "view dependent_view_1"
-- the second view depends on two tables
create view dependent_view_2 as select count(*) from table_to_depend_on_1 join table_to_depend_on_2 on table_to_depend_on_1.a=table_to_depend_on_2.a;
WARNING: "view dependent_view_2" has dependency to "table table_to_depend_on_2" that is not in Citus' metadata
DETAIL: "view dependent_view_2" will be created only locally
HINT: Distribute "table table_to_depend_on_2" first to distribute "view dependent_view_2"
-- the third view depends on the first view
create view dependent_view_3 as select count(*) from table_to_depend_on_1;
WARNING: "view dependent_view_3" has dependency to "table table_to_depend_on_1" that is not in Citus' metadata
DETAIL: "view dependent_view_3" will be created only locally
HINT: Distribute "table table_to_depend_on_1" first to distribute "view dependent_view_3"
-- the fourth view depends on the second view
create view dependent_view_4 as select count(*) from table_to_depend_on_2;
WARNING: "view dependent_view_4" has dependency to "table table_to_depend_on_2" that is not in Citus' metadata
DETAIL: "view dependent_view_4" will be created only locally
HINT: Distribute "table table_to_depend_on_2" first to distribute "view dependent_view_4"
-- distribute only one table
select create_distributed_table('table_to_depend_on_1','a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- see all four views on the coordinator
select viewname from pg_views where viewname like 'dependent_view__';
viewname
---------------------------------------------------------------------
dependent_view_1
dependent_view_2
dependent_view_3
dependent_view_4
(4 rows)
\c - - - :worker_1_port
-- see 1st and 3rd view on the worker
select viewname from pg_views where viewname like 'dependent_view__';
viewname
---------------------------------------------------------------------
dependent_view_1
dependent_view_3
(2 rows)
\c - - - :master_port
CREATE TABLE parent_1 (a INT UNIQUE) PARTITION BY RANGE(a);
SELECT create_distributed_table('parent_1','a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE parent_1_child_1 (a int);
CREATE TABLE parent_1_child_2 (a int);
CREATE VIEW v1 AS SELECT * FROM parent_1_child_1;
WARNING: "view v1" has dependency to "table parent_1_child_1" that is not in Citus' metadata
DETAIL: "view v1" will be created only locally
HINT: Distribute "table parent_1_child_1" first to distribute "view v1"
CREATE VIEW v2 AS SELECT * FROM parent_1_child_2;
WARNING: "view v2" has dependency to "table parent_1_child_2" that is not in Citus' metadata
DETAIL: "view v2" will be created only locally
HINT: Distribute "table parent_1_child_2" first to distribute "view v2"
CREATE VIEW v3 AS SELECT parent_1_child_2.* FROM parent_1_child_2 JOIN parent_1_child_1 USING(a);
WARNING: "view v3" has dependency to "table parent_1_child_2" that is not in Citus' metadata
DETAIL: "view v3" will be created only locally
HINT: Distribute "table parent_1_child_2" first to distribute "view v3"
CREATE VIEW v4 AS SELECT * FROM v3;
WARNING: "view v4" has dependency to "table parent_1_child_2" that is not in Citus' metadata
DETAIL: "view v4" will be created only locally
HINT: Distribute "table parent_1_child_2" first to distribute "view v4"
alter table parent_1 attach partition parent_1_child_1 FOR VALUES FROM (0) TO (10) ;
-- only v1 distributed
SELECT run_command_on_workers($$SELECT count(*) FROM v1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
SELECT run_command_on_workers($$SELECT count(*) FROM v2$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,f,"ERROR: relation ""v2"" does not exist")
(localhost,57638,f,"ERROR: relation ""v2"" does not exist")
(2 rows)
SELECT run_command_on_workers($$SELECT count(*) FROM v3$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,f,"ERROR: relation ""v3"" does not exist")
(localhost,57638,f,"ERROR: relation ""v3"" does not exist")
(2 rows)
SELECT run_command_on_workers($$SELECT count(*) FROM v4$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,f,"ERROR: relation ""v4"" does not exist")
(localhost,57638,f,"ERROR: relation ""v4"" does not exist")
(2 rows)
-- all views becomes distributed
alter table parent_1 attach partition parent_1_child_2 FOR VALUES FROM (10) TO (20);
SELECT run_command_on_workers($$SELECT count(*) FROM v1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
SELECT run_command_on_workers($$SELECT count(*) FROM v2$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
SELECT run_command_on_workers($$SELECT count(*) FROM v3$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
SELECT run_command_on_workers($$SELECT count(*) FROM v4$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%v1%';
obj_identifier
---------------------------------------------------------------------
(view,"{public,v1}",{})
(1 row)
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%v2%';
obj_identifier
---------------------------------------------------------------------
(view,"{public,v2}",{})
(1 row)
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%v3%';
obj_identifier
---------------------------------------------------------------------
(view,"{public,v3}",{})
(1 row)
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%v4%';
obj_identifier
---------------------------------------------------------------------
(view,"{public,v4}",{})
(1 row)
CREATE TABLE employees (employee_id int, manager_id int, full_name text);
-- v_test_1 and v_test_2 becomes circularly dependend views
-- so we should not try to distribute any of the views
CREATE VIEW v_test_1 AS SELECT * FROM employees;
WARNING: "view v_test_1" has dependency to "table employees" that is not in Citus' metadata
DETAIL: "view v_test_1" will be created only locally
HINT: Distribute "table employees" first to distribute "view v_test_1"
CREATE VIEW v_test_2 AS SELECT * FROM employees;
WARNING: "view v_test_2" has dependency to "table employees" that is not in Citus' metadata
DETAIL: "view v_test_2" will be created only locally
HINT: Distribute "table employees" first to distribute "view v_test_2"
CREATE OR REPLACE VIEW v_test_1 AS SELECT employees.* FROM employees JOIN v_test_2 USING (employee_id);
WARNING: "view v_test_1" has dependency to "table employees" that is not in Citus' metadata
DETAIL: "view v_test_1" will be created only locally
HINT: Distribute "table employees" first to distribute "view v_test_1"
CREATE OR REPLACE VIEW v_test_2 AS SELECT employees.* FROM employees JOIN v_test_1 USING (employee_id);
WARNING: "view v_test_2" has dependency to "table employees" that is not in Citus' metadata
DETAIL: "view v_test_2" will be created only locally
HINT: Distribute "table employees" first to distribute "view v_test_2"
SELECT create_distributed_table('employees','employee_id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- verify not distributed
SELECT run_command_on_workers($$SELECT count(*) FROM v_test_1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,f,"ERROR: relation ""v_test_1"" does not exist")
(localhost,57638,f,"ERROR: relation ""v_test_1"" does not exist")
(2 rows)
SELECT run_command_on_workers($$SELECT count(*) FROM v_test_2$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,f,"ERROR: relation ""v_test_2"" does not exist")
(localhost,57638,f,"ERROR: relation ""v_test_2"" does not exist")
(2 rows)
SET client_min_messages TO ERROR;
DROP SCHEMA view_prop_schema_inner CASCADE;
DROP SCHEMA view_prop_schema CASCADE;

View File

@ -0,0 +1,30 @@
SET search_path TO views_create;
SELECT * FROM "local regular view";
id | title
---------------------------------------------------------------------
(0 rows)
SELECT * FROM dist_regular_view;
id | name | created_at
---------------------------------------------------------------------
(0 rows)
SELECT * FROM local_regular_view2;
count
---------------------------------------------------------------------
0
(1 row)
SELECT * FROM local_regular_view3;
count
---------------------------------------------------------------------
0
(1 row)
SELECT * FROM "local regular view4";
my cny
---------------------------------------------------------------------
0
(1 row)
RESET search_path;

View File

@ -79,3 +79,36 @@ SELECT COUNT(*) FROM select_all_view a JOIN view_test_table b ON a.c=b.c;
2
(1 row)
CREATE TABLE distributed (id bigserial PRIMARY KEY,
name text,
created_at timestamptz DEFAULT now());
CREATE TABLE reference (id bigserial PRIMARY KEY,
title text);
CREATE TABLE local (id bigserial PRIMARY KEY,
title text);
SET client_min_messages TO ERROR;
CREATE VIEW "local regular view" AS SELECT * FROM local;
CREATE VIEW dist_regular_view AS SELECT * FROM distributed;
CREATE VIEW local_regular_view2 as SELECT count(*) FROM distributed JOIN "local regular view" USING (id);
CREATE VIEW local_regular_view3 as SELECT count(*) FROM local JOIN dist_regular_view USING (id);
CREATE VIEW "local regular view4" as SELECT count(*) as "my cny" FROM dist_regular_view JOIN "local regular view" USING (id);
RESET client_min_messages;
-- these above restrictions brought us to the following schema
SELECT create_reference_table('reference');
create_reference_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('distributed', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_reference_table('local');
create_reference_table
---------------------------------------------------------------------
(1 row)

View File

@ -409,6 +409,61 @@ SELECT logicalrelid, partmethod, partkey FROM pg_dist_partition
WHERE logicalrelid IN ('parent_dropped_col'::regclass, 'parent_dropped_col_2'::regclass)
ORDER BY logicalrelid;
-- some tests for view propagation on citus local tables
CREATE TABLE view_tbl_1 (a int);
CREATE TABLE view_tbl_2 (a int);
CREATE SCHEMA viewsc;
-- create dependent views, in a different schema
-- the first one depends on a citus metadata table
CREATE VIEW viewsc.prop_view AS SELECT COUNT (*) FROM view_tbl_1 JOIN pg_dist_node ON view_tbl_1.a=pg_dist_node.nodeid;
CREATE VIEW viewsc.prop_view2 AS SELECT COUNT (*) FROM view_tbl_1;
SELECT citus_add_local_table_to_metadata('view_tbl_1');
-- verify the shard view is dropped, and created&propagated the correct view
SELECT viewname, definition FROM pg_views WHERE viewname LIKE 'prop_view%' ORDER BY viewname;
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_views WHERE viewname LIKE 'prop_view%';$$);
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid IN('viewsc.prop_view'::regclass::oid, 'viewsc.prop_view2'::regclass::oid);
-- drop views
DROP VIEW viewsc.prop_view;
DROP VIEW viewsc.prop_view2;
-- verify dropped on workers
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_views WHERE viewname LIKE 'prop_view%';$$);
-- create a view that depends on a pg_ table
CREATE VIEW viewsc.prop_view3 AS SELECT COUNT (*) FROM view_tbl_1 JOIN pg_namespace ON view_tbl_1.a=pg_namespace.nspowner;
-- create a view that depends on two different tables, one of them is local for now
CREATE VIEW viewsc.prop_view4 AS SELECT COUNT (*) FROM view_tbl_1 JOIN view_tbl_2 ON view_tbl_1.a=view_tbl_2.a;
-- distribute the first table
SELECT create_distributed_table('view_tbl_1','a');
-- verify the last view is not distributed
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_views WHERE viewname LIKE 'prop_view%';$$);
-- add the other table to metadata, so the local view gets distributed
SELECT citus_add_local_table_to_metadata('view_tbl_2');
-- verify both views are distributed
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_views WHERE viewname LIKE 'prop_view%';$$);
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid IN('viewsc.prop_view3'::regclass::oid, 'viewsc.prop_view4'::regclass::oid);
-- test with fkey cascading
create table ref_tb(a int primary key);
SELECT create_reference_table('ref_tb');
CREATE TABLE loc_tb (a int );
CREATE VIEW v100 AS SELECT * FROM loc_tb;
CREATE VIEW v101 AS SELECT * FROM loc_tb JOIN ref_tb USING (a);
CREATE VIEW v102 AS SELECT * FROM v101;
ALTER TABLE loc_tb ADD CONSTRAINT fkey FOREIGN KEY (a) references ref_tb(a);
-- works fine
select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100, citus_local_tables_mx.v101, citus_local_tables_mx.v102$$);
ALTER TABLE loc_tb DROP CONSTRAINT fkey;
-- fails because fkey is dropped and table is converted to local table
select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100$$);
select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v101$$);
select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v102$$);
-- cleanup at exit
set client_min_messages to error;
DROP SCHEMA citus_local_tables_mx CASCADE;

View File

@ -374,6 +374,73 @@ BEGIN;
ALTER TABLE view_prop_schema_inner.view_in_transaction RENAME COLUMN a TO b;
DROP VIEW view_prop_schema_inner.view_in_transaction;
ROLLBACK;
-- verify that the views get distributed after the table is distributed
create table table_to_depend_on_1 (a int);
create table table_to_depend_on_2 (a int);
-- the first view depends on a table
create view dependent_view_1 as select count(*) from table_to_depend_on_1;
-- the second view depends on two tables
create view dependent_view_2 as select count(*) from table_to_depend_on_1 join table_to_depend_on_2 on table_to_depend_on_1.a=table_to_depend_on_2.a;
-- the third view depends on the first view
create view dependent_view_3 as select count(*) from table_to_depend_on_1;
-- the fourth view depends on the second view
create view dependent_view_4 as select count(*) from table_to_depend_on_2;
-- distribute only one table
select create_distributed_table('table_to_depend_on_1','a');
-- see all four views on the coordinator
select viewname from pg_views where viewname like 'dependent_view__';
\c - - - :worker_1_port
-- see 1st and 3rd view on the worker
select viewname from pg_views where viewname like 'dependent_view__';
\c - - - :master_port
CREATE TABLE parent_1 (a INT UNIQUE) PARTITION BY RANGE(a);
SELECT create_distributed_table('parent_1','a');
CREATE TABLE parent_1_child_1 (a int);
CREATE TABLE parent_1_child_2 (a int);
CREATE VIEW v1 AS SELECT * FROM parent_1_child_1;
CREATE VIEW v2 AS SELECT * FROM parent_1_child_2;
CREATE VIEW v3 AS SELECT parent_1_child_2.* FROM parent_1_child_2 JOIN parent_1_child_1 USING(a);
CREATE VIEW v4 AS SELECT * FROM v3;
alter table parent_1 attach partition parent_1_child_1 FOR VALUES FROM (0) TO (10) ;
-- only v1 distributed
SELECT run_command_on_workers($$SELECT count(*) FROM v1$$);
SELECT run_command_on_workers($$SELECT count(*) FROM v2$$);
SELECT run_command_on_workers($$SELECT count(*) FROM v3$$);
SELECT run_command_on_workers($$SELECT count(*) FROM v4$$);
-- all views becomes distributed
alter table parent_1 attach partition parent_1_child_2 FOR VALUES FROM (10) TO (20);
SELECT run_command_on_workers($$SELECT count(*) FROM v1$$);
SELECT run_command_on_workers($$SELECT count(*) FROM v2$$);
SELECT run_command_on_workers($$SELECT count(*) FROM v3$$);
SELECT run_command_on_workers($$SELECT count(*) FROM v4$$);
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%v1%';
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%v2%';
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%v3%';
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%v4%';
CREATE TABLE employees (employee_id int, manager_id int, full_name text);
-- v_test_1 and v_test_2 becomes circularly dependend views
-- so we should not try to distribute any of the views
CREATE VIEW v_test_1 AS SELECT * FROM employees;
CREATE VIEW v_test_2 AS SELECT * FROM employees;
CREATE OR REPLACE VIEW v_test_1 AS SELECT employees.* FROM employees JOIN v_test_2 USING (employee_id);
CREATE OR REPLACE VIEW v_test_2 AS SELECT employees.* FROM employees JOIN v_test_1 USING (employee_id);
SELECT create_distributed_table('employees','employee_id');
-- verify not distributed
SELECT run_command_on_workers($$SELECT count(*) FROM v_test_1$$);
SELECT run_command_on_workers($$SELECT count(*) FROM v_test_2$$);
SET client_min_messages TO ERROR;
DROP SCHEMA view_prop_schema_inner CASCADE;

View File

@ -0,0 +1,9 @@
SET search_path TO views_create;
SELECT * FROM "local regular view";
SELECT * FROM dist_regular_view;
SELECT * FROM local_regular_view2;
SELECT * FROM local_regular_view3;
SELECT * FROM "local regular view4";
RESET search_path;

View File

@ -41,3 +41,24 @@ SELECT * FROM select_filtered_matview;
SELECT COUNT(*) FROM select_all_view a JOIN select_filtered_matview b ON a.c=b.c;
SELECT COUNT(*) FROM select_all_view a JOIN view_test_table b ON a.c=b.c;
CREATE TABLE distributed (id bigserial PRIMARY KEY,
name text,
created_at timestamptz DEFAULT now());
CREATE TABLE reference (id bigserial PRIMARY KEY,
title text);
CREATE TABLE local (id bigserial PRIMARY KEY,
title text);
SET client_min_messages TO ERROR;
CREATE VIEW "local regular view" AS SELECT * FROM local;
CREATE VIEW dist_regular_view AS SELECT * FROM distributed;
CREATE VIEW local_regular_view2 as SELECT count(*) FROM distributed JOIN "local regular view" USING (id);
CREATE VIEW local_regular_view3 as SELECT count(*) FROM local JOIN dist_regular_view USING (id);
CREATE VIEW "local regular view4" as SELECT count(*) as "my cny" FROM dist_regular_view JOIN "local regular view" USING (id);
RESET client_min_messages;
-- these above restrictions brought us to the following schema
SELECT create_reference_table('reference');
SELECT create_distributed_table('distributed', 'id');
SELECT create_reference_table('local');

View File

@ -7,6 +7,7 @@ test: dropped_columns_1 distributed_planning
test: local_dist_join nested_execution
test: connectivity_checks citus_run_command
test: schemas
test: views
test: sequences
test: functions
test: arbitrary_configs_truncate