mirror of https://github.com/citusdata/citus.git
Merge pull request #5966 from citusdata/velioglu/fix_depending_view_creation
Fix schema and ownership of view while altering depending distributed tablepull/5962/head
commit
86781ec85a
|
@ -206,6 +206,7 @@ static char * CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaNa
|
||||||
char *sourceName,
|
char *sourceName,
|
||||||
char *targetSchemaName,
|
char *targetSchemaName,
|
||||||
char *targetName);
|
char *targetName);
|
||||||
|
static char * CreateMaterializedViewDDLCommand(Oid matViewOid);
|
||||||
static char * GetAccessMethodForMatViewIfExists(Oid viewOid);
|
static char * GetAccessMethodForMatViewIfExists(Oid viewOid);
|
||||||
static bool WillRecreateForeignKeyToReferenceTable(Oid relationId,
|
static bool WillRecreateForeignKeyToReferenceTable(Oid relationId,
|
||||||
CascadeToColocatedOption cascadeOption);
|
CascadeToColocatedOption cascadeOption);
|
||||||
|
@ -1263,33 +1264,22 @@ GetViewCreationCommandsOfTable(Oid relationId)
|
||||||
Oid viewOid = InvalidOid;
|
Oid viewOid = InvalidOid;
|
||||||
foreach_oid(viewOid, views)
|
foreach_oid(viewOid, views)
|
||||||
{
|
{
|
||||||
Datum viewDefinitionDatum = DirectFunctionCall1(pg_get_viewdef,
|
|
||||||
ObjectIdGetDatum(viewOid));
|
|
||||||
char *viewDefinition = TextDatumGetCString(viewDefinitionDatum);
|
|
||||||
StringInfo query = makeStringInfo();
|
StringInfo query = makeStringInfo();
|
||||||
char *viewName = get_rel_name(viewOid);
|
|
||||||
char *schemaName = get_namespace_name(get_rel_namespace(viewOid));
|
|
||||||
char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName);
|
|
||||||
bool isMatView = get_rel_relkind(viewOid) == RELKIND_MATVIEW;
|
|
||||||
|
|
||||||
/* here we need to get the access method of the view to recreate it */
|
/* See comments on CreateMaterializedViewDDLCommand for its limitations */
|
||||||
char *accessMethodName = GetAccessMethodForMatViewIfExists(viewOid);
|
if (get_rel_relkind(viewOid) == RELKIND_MATVIEW)
|
||||||
|
|
||||||
appendStringInfoString(query, "CREATE ");
|
|
||||||
|
|
||||||
if (isMatView)
|
|
||||||
{
|
{
|
||||||
appendStringInfoString(query, "MATERIALIZED ");
|
char *matViewCreateCommands = CreateMaterializedViewDDLCommand(viewOid);
|
||||||
|
appendStringInfoString(query, matViewCreateCommands);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
char *viewCreateCommand = CreateViewDDLCommand(viewOid);
|
||||||
|
appendStringInfoString(query, viewCreateCommand);
|
||||||
}
|
}
|
||||||
|
|
||||||
appendStringInfo(query, "VIEW %s ", qualifiedViewName);
|
char *alterViewCommmand = AlterViewOwnerCommand(viewOid);
|
||||||
|
appendStringInfoString(query, alterViewCommmand);
|
||||||
if (accessMethodName)
|
|
||||||
{
|
|
||||||
appendStringInfo(query, "USING %s ", accessMethodName);
|
|
||||||
}
|
|
||||||
|
|
||||||
appendStringInfo(query, "AS %s", viewDefinition);
|
|
||||||
|
|
||||||
commands = lappend(commands, makeTableDDLCommandString(query->data));
|
commands = lappend(commands, makeTableDDLCommandString(query->data));
|
||||||
}
|
}
|
||||||
|
@ -1298,6 +1288,64 @@ GetViewCreationCommandsOfTable(Oid relationId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CreateMaterializedViewDDLCommand creates the command to create materialized view.
|
||||||
|
* Note that this function doesn't support
|
||||||
|
* - Aliases
|
||||||
|
* - Storage parameters
|
||||||
|
* - Tablespace
|
||||||
|
* - WITH [NO] DATA
|
||||||
|
* options for the given materialized view. Parser functions for materialized views
|
||||||
|
* should be added to handle them.
|
||||||
|
*
|
||||||
|
* Related issue: https://github.com/citusdata/citus/issues/5968
|
||||||
|
*/
|
||||||
|
static char *
|
||||||
|
CreateMaterializedViewDDLCommand(Oid matViewOid)
|
||||||
|
{
|
||||||
|
StringInfo query = makeStringInfo();
|
||||||
|
|
||||||
|
char *viewName = get_rel_name(matViewOid);
|
||||||
|
char *schemaName = get_namespace_name(get_rel_namespace(matViewOid));
|
||||||
|
char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName);
|
||||||
|
|
||||||
|
/* here we need to get the access method of the view to recreate it */
|
||||||
|
char *accessMethodName = GetAccessMethodForMatViewIfExists(matViewOid);
|
||||||
|
|
||||||
|
appendStringInfo(query, "CREATE MATERIALIZED VIEW %s ", qualifiedViewName);
|
||||||
|
|
||||||
|
if (accessMethodName)
|
||||||
|
{
|
||||||
|
appendStringInfo(query, "USING %s ", accessMethodName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set search_path to NIL so that all objects outside of pg_catalog will be
|
||||||
|
* schema-prefixed.
|
||||||
|
*/
|
||||||
|
OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext);
|
||||||
|
overridePath->schemas = NIL;
|
||||||
|
overridePath->addCatalog = true;
|
||||||
|
PushOverrideSearchPath(overridePath);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Push the transaction snapshot to be able to get vief definition with pg_get_viewdef
|
||||||
|
*/
|
||||||
|
PushActiveSnapshot(GetTransactionSnapshot());
|
||||||
|
|
||||||
|
Datum viewDefinitionDatum = DirectFunctionCall1(pg_get_viewdef,
|
||||||
|
ObjectIdGetDatum(matViewOid));
|
||||||
|
char *viewDefinition = TextDatumGetCString(viewDefinitionDatum);
|
||||||
|
|
||||||
|
PopActiveSnapshot();
|
||||||
|
PopOverrideSearchPath();
|
||||||
|
|
||||||
|
appendStringInfo(query, "AS %s", viewDefinition);
|
||||||
|
|
||||||
|
return query->data;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ReplaceTable replaces the source table with the target table.
|
* ReplaceTable replaces the source table with the target table.
|
||||||
* It moves all the rows of the source table to target table with INSERT SELECT.
|
* It moves all the rows of the source table to target table with INSERT SELECT.
|
||||||
|
|
|
@ -401,7 +401,7 @@ AppendViewDefinitionToCreateViewCommand(StringInfo buf, Oid viewOid)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* AlterViewOwnerCommand returns the command to alter view owner command for the
|
* AlterViewOwnerCommand returns the command to alter view owner command for the
|
||||||
* given view oid.
|
* given view or materialized view oid.
|
||||||
*/
|
*/
|
||||||
char *
|
char *
|
||||||
AlterViewOwnerCommand(Oid viewOid)
|
AlterViewOwnerCommand(Oid viewOid)
|
||||||
|
@ -416,9 +416,18 @@ AlterViewOwnerCommand(Oid viewOid)
|
||||||
char *viewOwnerName = TableOwner(viewOid);
|
char *viewOwnerName = TableOwner(viewOid);
|
||||||
char *qualifiedViewName = NameListToQuotedString(list_make2(makeString(schemaName),
|
char *qualifiedViewName = NameListToQuotedString(list_make2(makeString(schemaName),
|
||||||
makeString(viewName)));
|
makeString(viewName)));
|
||||||
appendStringInfo(alterOwnerCommand,
|
|
||||||
"ALTER VIEW %s OWNER TO %s", qualifiedViewName,
|
if (get_rel_relkind(viewOid) == RELKIND_MATVIEW)
|
||||||
quote_identifier(viewOwnerName));
|
{
|
||||||
|
appendStringInfo(alterOwnerCommand, "ALTER MATERIALIZED VIEW %s ",
|
||||||
|
qualifiedViewName);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
appendStringInfo(alterOwnerCommand, "ALTER VIEW %s ", qualifiedViewName);
|
||||||
|
}
|
||||||
|
|
||||||
|
appendStringInfo(alterOwnerCommand, "OWNER TO %s", quote_identifier(viewOwnerName));
|
||||||
|
|
||||||
return alterOwnerCommand->data;
|
return alterOwnerCommand->data;
|
||||||
}
|
}
|
||||||
|
@ -457,16 +466,20 @@ PreprocessAlterViewStmt(Node *node, const char *queryString, ProcessUtilityConte
|
||||||
QualifyTreeNode((Node *) stmt);
|
QualifyTreeNode((Node *) stmt);
|
||||||
|
|
||||||
EnsureCoordinator();
|
EnsureCoordinator();
|
||||||
EnsureSequentialMode(OBJECT_VIEW);
|
|
||||||
|
|
||||||
/* reconstruct alter statement in a portable fashion */
|
/* reconstruct alter statement in a portable fashion */
|
||||||
const char *alterViewStmtSql = DeparseTreeNode((Node *) stmt);
|
const char *alterViewStmtSql = DeparseTreeNode((Node *) stmt);
|
||||||
|
|
||||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
/*
|
||||||
(void *) alterViewStmtSql,
|
* To avoid sequential mode, we are using metadata connection. For the
|
||||||
ENABLE_DDL_PROPAGATION);
|
* detailed explanation, please check the comment on PostprocessViewStmt.
|
||||||
|
*/
|
||||||
|
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||||
|
ddlJob->targetObjectAddress = viewAddress;
|
||||||
|
ddlJob->metadataSyncCommand = alterViewStmtSql;
|
||||||
|
ddlJob->taskList = NIL;
|
||||||
|
|
||||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
return list_make1(ddlJob);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -527,8 +540,8 @@ List *
|
||||||
PreprocessRenameViewStmt(Node *node, const char *queryString,
|
PreprocessRenameViewStmt(Node *node, const char *queryString,
|
||||||
ProcessUtilityContext processUtilityContext)
|
ProcessUtilityContext processUtilityContext)
|
||||||
{
|
{
|
||||||
ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, true);
|
ObjectAddress viewAddress = GetObjectAddressFromParseTree(node, true);
|
||||||
if (!ShouldPropagateObject(&typeAddress))
|
if (!ShouldPropagateObject(&viewAddress))
|
||||||
{
|
{
|
||||||
return NIL;
|
return NIL;
|
||||||
}
|
}
|
||||||
|
@ -541,14 +554,16 @@ PreprocessRenameViewStmt(Node *node, const char *queryString,
|
||||||
/* deparse sql*/
|
/* deparse sql*/
|
||||||
const char *renameStmtSql = DeparseTreeNode(node);
|
const char *renameStmtSql = DeparseTreeNode(node);
|
||||||
|
|
||||||
EnsureSequentialMode(OBJECT_VIEW);
|
/*
|
||||||
|
* To avoid sequential mode, we are using metadata connection. For the
|
||||||
|
* detailed explanation, please check the comment on PostprocessViewStmt.
|
||||||
|
*/
|
||||||
|
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||||
|
ddlJob->targetObjectAddress = viewAddress;
|
||||||
|
ddlJob->metadataSyncCommand = renameStmtSql;
|
||||||
|
ddlJob->taskList = NIL;
|
||||||
|
|
||||||
/* to prevent recursion with mx we disable ddl propagation */
|
return list_make1(ddlJob);
|
||||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
|
||||||
(void *) renameStmtSql,
|
|
||||||
ENABLE_DDL_PROPAGATION);
|
|
||||||
|
|
||||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -580,24 +595,28 @@ PreprocessAlterViewSchemaStmt(Node *node, const char *queryString,
|
||||||
{
|
{
|
||||||
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
|
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
|
||||||
|
|
||||||
ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, true);
|
ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, true);
|
||||||
if (!ShouldPropagateObject(&typeAddress))
|
if (!ShouldPropagateObject(&viewAddress))
|
||||||
{
|
{
|
||||||
return NIL;
|
return NIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
EnsureCoordinator();
|
EnsureCoordinator();
|
||||||
EnsureSequentialMode(OBJECT_VIEW);
|
|
||||||
|
|
||||||
QualifyTreeNode((Node *) stmt);
|
QualifyTreeNode((Node *) stmt);
|
||||||
|
|
||||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||||
|
|
||||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
/*
|
||||||
(void *) sql,
|
* To avoid sequential mode, we are using metadata connection. For the
|
||||||
ENABLE_DDL_PROPAGATION);
|
* detailed explanation, please check the comment on PostprocessViewStmt.
|
||||||
|
*/
|
||||||
|
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||||
|
ddlJob->targetObjectAddress = viewAddress;
|
||||||
|
ddlJob->metadataSyncCommand = sql;
|
||||||
|
ddlJob->taskList = NIL;
|
||||||
|
|
||||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
return list_make1(ddlJob);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -904,5 +904,82 @@ SELECT amname FROM pg_am WHERE oid IN (SELECT relam FROM pg_class WHERE relname
|
||||||
columnar
|
columnar
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- verify that alter_distributed_table works if it has dependent views and materialized views
|
||||||
|
-- set colocate_with explicitly to not to affect other tables
|
||||||
|
CREATE SCHEMA schema_to_test_alter_dist_table;
|
||||||
|
SET search_path to schema_to_test_alter_dist_table;
|
||||||
|
CREATE TABLE test_alt_dist_table_1(a int, b int);
|
||||||
|
SELECT create_distributed_table('test_alt_dist_table_1', 'a', colocate_with => 'None');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CREATE TABLE test_alt_dist_table_2(a int, b int);
|
||||||
|
SELECT create_distributed_table('test_alt_dist_table_2', 'a', colocate_with => 'test_alt_dist_table_1');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CREATE VIEW dependent_view_1 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2;
|
||||||
|
CREATE VIEW dependent_view_2 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2 JOIN test_alt_dist_table_1 USING(a);
|
||||||
|
CREATE MATERIALIZED VIEW dependent_mat_view_1 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2;
|
||||||
|
-- Alter owner to make sure that alter_distributed_table doesn't change view's owner
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
|
CREATE USER alter_dist_table_test_user;
|
||||||
|
SELECT 1 FROM run_command_on_workers($$CREATE USER alter_dist_table_test_user$$);
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
1
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
ALTER VIEW dependent_view_1 OWNER TO alter_dist_table_test_user;
|
||||||
|
ALTER VIEW dependent_view_2 OWNER TO alter_dist_table_test_user;
|
||||||
|
ALTER MATERIALIZED VIEW dependent_mat_view_1 OWNER TO alter_dist_table_test_user;
|
||||||
|
SELECT alter_distributed_table('test_alt_dist_table_1', shard_count:=12, cascade_to_colocated:=true);
|
||||||
|
alter_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT viewowner FROM pg_views WHERE viewname IN ('dependent_view_1', 'dependent_view_2');
|
||||||
|
viewowner
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
alter_dist_table_test_user
|
||||||
|
alter_dist_table_test_user
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SELECT matviewowner FROM pg_matviews WHERE matviewname = 'dependent_mat_view_1';
|
||||||
|
matviewowner
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
alter_dist_table_test_user
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Check the existence of the view on the worker node as well
|
||||||
|
SELECT run_command_on_workers($$SELECT viewowner FROM pg_views WHERE viewname = 'dependent_view_1'$$);
|
||||||
|
run_command_on_workers
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(localhost,57637,t,alter_dist_table_test_user)
|
||||||
|
(localhost,57638,t,alter_dist_table_test_user)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SELECT run_command_on_workers($$SELECT viewowner FROM pg_views WHERE viewname = 'dependent_view_2'$$);
|
||||||
|
run_command_on_workers
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(localhost,57637,t,alter_dist_table_test_user)
|
||||||
|
(localhost,57638,t,alter_dist_table_test_user)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
-- It is expected to not have mat view on worker node
|
||||||
|
SELECT run_command_on_workers($$SELECT count(*) FROM pg_matviews WHERE matviewname = 'dependent_mat_view_1';$$);
|
||||||
|
run_command_on_workers
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(localhost,57637,t,0)
|
||||||
|
(localhost,57638,t,0)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
RESET search_path;
|
||||||
DROP SCHEMA alter_distributed_table CASCADE;
|
DROP SCHEMA alter_distributed_table CASCADE;
|
||||||
|
DROP SCHEMA schema_to_test_alter_dist_table CASCADE;
|
||||||
|
|
|
@ -564,6 +564,35 @@ ALTER VIEW IF EXISTS non_existing_view RENAME val2 TO val1;
|
||||||
NOTICE: relation "non_existing_view" does not exist, skipping
|
NOTICE: relation "non_existing_view" does not exist, skipping
|
||||||
ALTER VIEW IF EXISTS non_existing_view SET SCHEMA view_prop_schema;
|
ALTER VIEW IF EXISTS non_existing_view SET SCHEMA view_prop_schema;
|
||||||
NOTICE: relation "non_existing_view" does not exist, skipping
|
NOTICE: relation "non_existing_view" does not exist, skipping
|
||||||
|
-- Show that create view and alter view commands can be run from same transaction
|
||||||
|
-- but not the drop view. Since we can not use metadata connection for drop view commands
|
||||||
|
BEGIN;
|
||||||
|
SET LOCAL citus.force_max_query_parallelization TO ON;
|
||||||
|
CREATE TABLE table_1_to_view_in_transaction(a int);
|
||||||
|
SELECT create_distributed_table('table_1_to_view_in_transaction', 'a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CREATE TABLE table_2_to_view_in_transaction(a int);
|
||||||
|
SELECT create_distributed_table('table_2_to_view_in_transaction', 'a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- we can create/alter/drop views even in parallel mode
|
||||||
|
CREATE VIEW view_in_transaction AS SELECT table_1_to_view_in_transaction.* FROM table_2_to_view_in_transaction JOIN table_1_to_view_in_transaction USING (a);
|
||||||
|
ALTER TABLE view_in_transaction SET (security_barrier);
|
||||||
|
ALTER VIEW view_in_transaction SET SCHEMA public;
|
||||||
|
ALTER VIEW public.view_in_transaction SET SCHEMA view_prop_schema_inner;
|
||||||
|
ALTER TABLE view_prop_schema_inner.view_in_transaction RENAME COLUMN a TO b;
|
||||||
|
DROP VIEW view_prop_schema_inner.view_in_transaction;
|
||||||
|
ERROR: cannot run view command because there was a parallel operation on a distributed table in the transaction
|
||||||
|
DETAIL: When running command on/for a distributed view, Citus needs to perform all operations over a single connection per node to ensure consistency.
|
||||||
|
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
|
||||||
|
ROLLBACK;
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA view_prop_schema_inner CASCADE;
|
DROP SCHEMA view_prop_schema_inner CASCADE;
|
||||||
DROP SCHEMA view_prop_schema CASCADE;
|
DROP SCHEMA view_prop_schema CASCADE;
|
||||||
|
|
|
@ -300,5 +300,41 @@ CREATE MATERIALIZED VIEW test_mat_view_am USING COLUMNAR AS SELECT count(*), a F
|
||||||
SELECT alter_distributed_table('test_am_matview', shard_count:= 52);
|
SELECT alter_distributed_table('test_am_matview', shard_count:= 52);
|
||||||
SELECT amname FROM pg_am WHERE oid IN (SELECT relam FROM pg_class WHERE relname ='test_mat_view_am');
|
SELECT amname FROM pg_am WHERE oid IN (SELECT relam FROM pg_class WHERE relname ='test_mat_view_am');
|
||||||
|
|
||||||
|
-- verify that alter_distributed_table works if it has dependent views and materialized views
|
||||||
|
-- set colocate_with explicitly to not to affect other tables
|
||||||
|
CREATE SCHEMA schema_to_test_alter_dist_table;
|
||||||
|
SET search_path to schema_to_test_alter_dist_table;
|
||||||
|
|
||||||
|
CREATE TABLE test_alt_dist_table_1(a int, b int);
|
||||||
|
SELECT create_distributed_table('test_alt_dist_table_1', 'a', colocate_with => 'None');
|
||||||
|
|
||||||
|
CREATE TABLE test_alt_dist_table_2(a int, b int);
|
||||||
|
SELECT create_distributed_table('test_alt_dist_table_2', 'a', colocate_with => 'test_alt_dist_table_1');
|
||||||
|
|
||||||
|
CREATE VIEW dependent_view_1 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2;
|
||||||
|
CREATE VIEW dependent_view_2 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2 JOIN test_alt_dist_table_1 USING(a);
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW dependent_mat_view_1 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2;
|
||||||
|
|
||||||
|
-- Alter owner to make sure that alter_distributed_table doesn't change view's owner
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
|
CREATE USER alter_dist_table_test_user;
|
||||||
|
SELECT 1 FROM run_command_on_workers($$CREATE USER alter_dist_table_test_user$$);
|
||||||
|
|
||||||
|
ALTER VIEW dependent_view_1 OWNER TO alter_dist_table_test_user;
|
||||||
|
ALTER VIEW dependent_view_2 OWNER TO alter_dist_table_test_user;
|
||||||
|
ALTER MATERIALIZED VIEW dependent_mat_view_1 OWNER TO alter_dist_table_test_user;
|
||||||
|
|
||||||
|
SELECT alter_distributed_table('test_alt_dist_table_1', shard_count:=12, cascade_to_colocated:=true);
|
||||||
|
SELECT viewowner FROM pg_views WHERE viewname IN ('dependent_view_1', 'dependent_view_2');
|
||||||
|
SELECT matviewowner FROM pg_matviews WHERE matviewname = 'dependent_mat_view_1';
|
||||||
|
|
||||||
|
-- Check the existence of the view on the worker node as well
|
||||||
|
SELECT run_command_on_workers($$SELECT viewowner FROM pg_views WHERE viewname = 'dependent_view_1'$$);
|
||||||
|
SELECT run_command_on_workers($$SELECT viewowner FROM pg_views WHERE viewname = 'dependent_view_2'$$);
|
||||||
|
-- It is expected to not have mat view on worker node
|
||||||
|
SELECT run_command_on_workers($$SELECT count(*) FROM pg_matviews WHERE matviewname = 'dependent_mat_view_1';$$);
|
||||||
|
RESET search_path;
|
||||||
|
|
||||||
DROP SCHEMA alter_distributed_table CASCADE;
|
DROP SCHEMA alter_distributed_table CASCADE;
|
||||||
|
DROP SCHEMA schema_to_test_alter_dist_table CASCADE;
|
||||||
|
|
|
@ -356,6 +356,25 @@ ALTER VIEW IF EXISTS non_existing_view RENAME COLUMN val1 TO val2;
|
||||||
ALTER VIEW IF EXISTS non_existing_view RENAME val2 TO val1;
|
ALTER VIEW IF EXISTS non_existing_view RENAME val2 TO val1;
|
||||||
ALTER VIEW IF EXISTS non_existing_view SET SCHEMA view_prop_schema;
|
ALTER VIEW IF EXISTS non_existing_view SET SCHEMA view_prop_schema;
|
||||||
|
|
||||||
|
-- Show that create view and alter view commands can be run from same transaction
|
||||||
|
-- but not the drop view. Since we can not use metadata connection for drop view commands
|
||||||
|
BEGIN;
|
||||||
|
SET LOCAL citus.force_max_query_parallelization TO ON;
|
||||||
|
CREATE TABLE table_1_to_view_in_transaction(a int);
|
||||||
|
SELECT create_distributed_table('table_1_to_view_in_transaction', 'a');
|
||||||
|
|
||||||
|
CREATE TABLE table_2_to_view_in_transaction(a int);
|
||||||
|
SELECT create_distributed_table('table_2_to_view_in_transaction', 'a');
|
||||||
|
|
||||||
|
-- we can create/alter/drop views even in parallel mode
|
||||||
|
CREATE VIEW view_in_transaction AS SELECT table_1_to_view_in_transaction.* FROM table_2_to_view_in_transaction JOIN table_1_to_view_in_transaction USING (a);
|
||||||
|
ALTER TABLE view_in_transaction SET (security_barrier);
|
||||||
|
ALTER VIEW view_in_transaction SET SCHEMA public;
|
||||||
|
ALTER VIEW public.view_in_transaction SET SCHEMA view_prop_schema_inner;
|
||||||
|
ALTER TABLE view_prop_schema_inner.view_in_transaction RENAME COLUMN a TO b;
|
||||||
|
DROP VIEW view_prop_schema_inner.view_in_transaction;
|
||||||
|
ROLLBACK;
|
||||||
|
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA view_prop_schema_inner CASCADE;
|
DROP SCHEMA view_prop_schema_inner CASCADE;
|
||||||
DROP SCHEMA view_prop_schema CASCADE;
|
DROP SCHEMA view_prop_schema CASCADE;
|
||||||
|
|
Loading…
Reference in New Issue