Merge pull request #2486 from citusdata/fix/create-distributed-table-as-owner

Fix create_distributed_table as non-table-owner
pull/2508/head
Nils Dijk 2018-11-29 16:16:34 +01:00 committed by GitHub
commit e17d98b0e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 485 additions and 178 deletions

View File

@ -77,7 +77,6 @@ OBJS = src/backend/distributed/shared_library_init.o \
src/backend/distributed/test/distribution_metadata.o \
src/backend/distributed/test/fake_fdw.o \
src/backend/distributed/test/foreign_key_relationship_query.o \
src/backend/distributed/test/generate_ddl_commands.o \
src/backend/distributed/test/metadata_sync.o \
src/backend/distributed/test/partitioning_utils.o \
src/backend/distributed/test/progress_utils.o \

View File

@ -622,6 +622,7 @@ GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults)
char *tableSchemaDef = NULL;
char *tableColumnOptionsDef = NULL;
char *createSchemaCommand = NULL;
char *tableOwnerDef = NULL;
Oid schemaId = InvalidOid;
/*
@ -666,6 +667,12 @@ GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults)
tableDDLEventList = lappend(tableDDLEventList, tableColumnOptionsDef);
}
tableOwnerDef = TableOwnerResetCommand(relationId);
if (tableOwnerDef != NULL)
{
tableDDLEventList = lappend(tableDDLEventList, tableOwnerDef);
}
/* revert back to original search_path */
PopOverrideSearchPath();

View File

@ -487,7 +487,7 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements,
bool useExclusiveConnection, bool colocatedShard)
{
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedRelationId);
char *placementOwner = TableOwner(distributedRelationId);
bool includeSequenceDefaults = false;
List *ddlCommandList = GetTableDDLEvents(distributedRelationId,
includeSequenceDefaults);
@ -562,12 +562,12 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements,
relationShardList);
connection = GetPlacementListConnection(connectionFlags, placementAccessList,
placementOwner);
NULL);
}
else
{
connection = GetPlacementConnection(connectionFlags, shardPlacement,
placementOwner);
NULL);
}
if (useExclusiveConnection)

View File

@ -1,68 +0,0 @@
/*-------------------------------------------------------------------------
*
* test/src/generate_ddl_commands.c
*
* This file contains functions to exercise DDL generation functionality
* within Citus.
*
* Copyright (c) 2014-2016, Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "c.h"
#include "fmgr.h"
#include <stddef.h>
#include "catalog/pg_type.h"
#include "distributed/listutils.h"
#include "distributed/master_protocol.h"
#include "lib/stringinfo.h"
#include "nodes/makefuncs.h"
#include "nodes/nodes.h"
#include "nodes/parsenodes.h"
#include "nodes/pg_list.h"
#include "nodes/value.h"
#include "utils/array.h"
#include "utils/builtins.h"
#include "utils/palloc.h"
/* declarations for dynamic loading */
PG_FUNCTION_INFO_V1(table_ddl_command_array);
/*
* table_ddl_command_array returns an array of strings, each of which is a DDL
* command required to recreate a table (specified by OID).
*/
Datum
table_ddl_command_array(PG_FUNCTION_ARGS)
{
Oid distributedTableId = PG_GETARG_OID(0);
ArrayType *ddlCommandArrayType = NULL;
bool includeSequenceDefaults = true;
List *ddlCommandList = GetTableDDLEvents(distributedTableId, includeSequenceDefaults);
int ddlCommandCount = list_length(ddlCommandList);
Datum *ddlCommandDatumArray = palloc0(ddlCommandCount * sizeof(Datum));
ListCell *ddlCommandCell = NULL;
int ddlCommandIndex = 0;
Oid ddlCommandTypeId = TEXTOID;
foreach(ddlCommandCell, ddlCommandList)
{
char *ddlCommand = (char *) lfirst(ddlCommandCell);
Datum ddlCommandDatum = CStringGetTextDatum(ddlCommand);
ddlCommandDatumArray[ddlCommandIndex] = ddlCommandDatum;
ddlCommandIndex++;
}
ddlCommandArrayType = DatumArrayToArrayType(ddlCommandDatumArray, ddlCommandCount,
ddlCommandTypeId);
PG_RETURN_ARRAYTYPE_P(ddlCommandArrayType);
}

View File

@ -36,6 +36,8 @@ SELECT citus.dump_network_traffic();
(0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
(0,coordinator,"[""Query(query=SELECT worker_apply_shard_ddl_command (100400, 'CREATE TABLE public.copy_test (key integer, value integer)'))""]")
(0,worker,"[""RowDescription(fieldcount=1,fields=['F(name=worker_apply_shard_ddl_command,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']")
(0,coordinator,"[""Query(query=SELECT worker_apply_shard_ddl_command (100400, 'ALTER TABLE public.copy_test OWNER TO postgres'))""]")
(0,worker,"[""RowDescription(fieldcount=1,fields=['F(name=worker_apply_shard_ddl_command,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']")
(0,coordinator,"[""Query(query=BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(0, XX, 'XXXX-XX-XX XX:XX:XX.XXXXXX-XX');)""]")
(0,worker,"['CommandComplete(command=BEGIN)', ""RowDescription(fieldcount=1,fields=['F(name=assign_distributed_transaction_id,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=in_transaction_block)']")
(0,coordinator,"['Query(query=COPY public.copy_test_XXXXXX FROM STDIN WITH (FORMAT BINARY))']")
@ -52,7 +54,7 @@ SELECT citus.dump_network_traffic();
(1,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
(1,coordinator,"['Query(query=COPY (SELECT count(1) AS count FROM copy_test_100400 copy_test WHERE true) TO STDOUT)']")
(1,worker,"[""CopyOutResponse(format=0,columncount=1,columns=['Anonymous(format=0)'])"", ""CopyData(data=b'4\\\\n')"", 'CopyDone()', 'CommandComplete(command=COPY 1)', 'ReadyForQuery(state=idle)']")
(20 rows)
(22 rows)
---- all of the following tests test behavior with 2 shard placements ----
SHOW citus.shard_replication_factor;

View File

@ -634,7 +634,8 @@ SELECT * FROM master_get_table_ddl_events('unlogged_table');
master_get_table_ddl_events
--------------------------------------------------------------------
CREATE UNLOGGED TABLE public.unlogged_table (key text, value text)
(1 row)
ALTER TABLE public.unlogged_table OWNER TO postgres
(2 rows)
\c - - - :worker_1_port
SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%';

View File

@ -1,12 +1,5 @@
SET citus.next_shard_id TO 610000;
-- ===================================================================
-- create test functions
-- ===================================================================
CREATE FUNCTION table_ddl_command_array(regclass)
RETURNS text[]
AS 'citus'
LANGUAGE C STRICT;
-- ===================================================================
-- test ddl command generation functionality
-- ===================================================================
-- first make sure a simple table works
@ -15,30 +8,34 @@ CREATE TABLE simple_table (
last_name text,
id bigint
);
SELECT table_ddl_command_array('simple_table');
table_ddl_command_array
-----------------------------------------------------------------------------------
{"CREATE TABLE public.simple_table (first_name text, last_name text, id bigint)"}
(1 row)
SELECT master_get_table_ddl_events('simple_table');
master_get_table_ddl_events
-------------------------------------------------------------------------------
CREATE TABLE public.simple_table (first_name text, last_name text, id bigint)
ALTER TABLE public.simple_table OWNER TO postgres
(2 rows)
-- ensure not-null constraints are propagated
CREATE TABLE not_null_table (
city text,
id bigint not null
);
SELECT table_ddl_command_array('not_null_table');
table_ddl_command_array
------------------------------------------------------------------------
{"CREATE TABLE public.not_null_table (city text, id bigint NOT NULL)"}
(1 row)
SELECT master_get_table_ddl_events('not_null_table');
master_get_table_ddl_events
--------------------------------------------------------------------
CREATE TABLE public.not_null_table (city text, id bigint NOT NULL)
ALTER TABLE public.not_null_table OWNER TO postgres
(2 rows)
-- ensure tables not in search path are schema-prefixed
CREATE SCHEMA not_in_path CREATE TABLE simple_table (id bigint);
SELECT table_ddl_command_array('not_in_path.simple_table');
table_ddl_command_array
------------------------------------------------------------------------------------------------------------------------
{"CREATE SCHEMA IF NOT EXISTS not_in_path AUTHORIZATION postgres","CREATE TABLE not_in_path.simple_table (id bigint)"}
(1 row)
SELECT master_get_table_ddl_events('not_in_path.simple_table');
master_get_table_ddl_events
----------------------------------------------------------------
CREATE SCHEMA IF NOT EXISTS not_in_path AUTHORIZATION postgres
CREATE TABLE not_in_path.simple_table (id bigint)
ALTER TABLE not_in_path.simple_table OWNER TO postgres
(3 rows)
-- even more complex constraints should be preserved...
CREATE TABLE column_constraint_table (
@ -46,11 +43,12 @@ CREATE TABLE column_constraint_table (
last_name text,
age int CONSTRAINT non_negative_age CHECK (age >= 0)
);
SELECT table_ddl_command_array('column_constraint_table');
table_ddl_command_array
----------------------------------------------------------------------------------------------------------------------------------------------
{"CREATE TABLE public.column_constraint_table (first_name text, last_name text, age integer, CONSTRAINT non_negative_age CHECK (age >= 0))"}
(1 row)
SELECT master_get_table_ddl_events('column_constraint_table');
master_get_table_ddl_events
------------------------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.column_constraint_table (first_name text, last_name text, age integer, CONSTRAINT non_negative_age CHECK (age >= 0))
ALTER TABLE public.column_constraint_table OWNER TO postgres
(2 rows)
-- including table constraints
CREATE TABLE table_constraint_table (
@ -59,22 +57,24 @@ CREATE TABLE table_constraint_table (
max_bid decimal not null,
CONSTRAINT bids_ordered CHECK (min_bid > max_bid)
);
SELECT table_ddl_command_array('table_constraint_table');
table_ddl_command_array
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
{"CREATE TABLE public.table_constraint_table (bid_item_id bigint, min_bid numeric NOT NULL, max_bid numeric NOT NULL, CONSTRAINT bids_ordered CHECK (min_bid > max_bid))"}
(1 row)
SELECT master_get_table_ddl_events('table_constraint_table');
master_get_table_ddl_events
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.table_constraint_table (bid_item_id bigint, min_bid numeric NOT NULL, max_bid numeric NOT NULL, CONSTRAINT bids_ordered CHECK (min_bid > max_bid))
ALTER TABLE public.table_constraint_table OWNER TO postgres
(2 rows)
-- default values are supported
CREATE TABLE default_value_table (
name text,
price decimal default 0.00
);
SELECT table_ddl_command_array('default_value_table');
table_ddl_command_array
-------------------------------------------------------------------------------------
{"CREATE TABLE public.default_value_table (name text, price numeric DEFAULT 0.00)"}
(1 row)
SELECT master_get_table_ddl_events('default_value_table');
master_get_table_ddl_events
---------------------------------------------------------------------------------
CREATE TABLE public.default_value_table (name text, price numeric DEFAULT 0.00)
ALTER TABLE public.default_value_table OWNER TO postgres
(2 rows)
-- of course primary keys work...
CREATE TABLE pkey_table (
@ -82,22 +82,26 @@ CREATE TABLE pkey_table (
last_name text,
id bigint PRIMARY KEY
);
SELECT table_ddl_command_array('pkey_table');
table_ddl_command_array
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
{"CREATE TABLE public.pkey_table (first_name text, last_name text, id bigint NOT NULL)","ALTER TABLE public.pkey_table ADD CONSTRAINT pkey_table_pkey PRIMARY KEY (id)"}
(1 row)
SELECT master_get_table_ddl_events('pkey_table');
master_get_table_ddl_events
--------------------------------------------------------------------------------------
CREATE TABLE public.pkey_table (first_name text, last_name text, id bigint NOT NULL)
ALTER TABLE public.pkey_table OWNER TO postgres
ALTER TABLE public.pkey_table ADD CONSTRAINT pkey_table_pkey PRIMARY KEY (id)
(3 rows)
-- as do unique indexes...
CREATE TABLE unique_table (
user_id bigint not null,
username text UNIQUE not null
);
SELECT table_ddl_command_array('unique_table');
table_ddl_command_array
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
{"CREATE TABLE public.unique_table (user_id bigint NOT NULL, username text NOT NULL)","ALTER TABLE public.unique_table ADD CONSTRAINT unique_table_username_key UNIQUE (username)"}
(1 row)
SELECT master_get_table_ddl_events('unique_table');
master_get_table_ddl_events
--------------------------------------------------------------------------------------------
CREATE TABLE public.unique_table (user_id bigint NOT NULL, username text NOT NULL)
ALTER TABLE public.unique_table OWNER TO postgres
ALTER TABLE public.unique_table ADD CONSTRAINT unique_table_username_key UNIQUE (username)
(3 rows)
-- and indexes used for clustering
CREATE TABLE clustered_table (
@ -106,11 +110,14 @@ CREATE TABLE clustered_table (
);
CREATE INDEX clustered_time_idx ON clustered_table (received_at);
CLUSTER clustered_table USING clustered_time_idx;
SELECT table_ddl_command_array('clustered_table');
table_ddl_command_array
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
{"CREATE TABLE public.clustered_table (data json NOT NULL, received_at timestamp without time zone NOT NULL)","CREATE INDEX clustered_time_idx ON public.clustered_table USING btree (received_at) TABLESPACE pg_default","ALTER TABLE public.clustered_table CLUSTER ON clustered_time_idx"}
(1 row)
SELECT master_get_table_ddl_events('clustered_table');
master_get_table_ddl_events
------------------------------------------------------------------------------------------------------------
CREATE TABLE public.clustered_table (data json NOT NULL, received_at timestamp without time zone NOT NULL)
ALTER TABLE public.clustered_table OWNER TO postgres
CREATE INDEX clustered_time_idx ON public.clustered_table USING btree (received_at) TABLESPACE pg_default
ALTER TABLE public.clustered_table CLUSTER ON clustered_time_idx
(4 rows)
-- fiddly things like storage type and statistics also work
CREATE TABLE fiddly_table (
@ -125,27 +132,31 @@ ALTER TABLE fiddly_table
ALTER ip_addr SET STORAGE EXTENDED,
ALTER traceroute SET STORAGE EXTERNAL,
ALTER ip_addr SET STATISTICS 500;
SELECT table_ddl_command_array('fiddly_table');
table_ddl_command_array
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
{"CREATE TABLE public.fiddly_table (hostname character(255) NOT NULL, os character(255) NOT NULL, ip_addr inet NOT NULL, traceroute text NOT NULL)","ALTER TABLE ONLY public.fiddly_table ALTER COLUMN hostname SET STORAGE PLAIN, ALTER COLUMN os SET STORAGE MAIN, ALTER COLUMN ip_addr SET STORAGE EXTENDED, ALTER COLUMN ip_addr SET STATISTICS 500, ALTER COLUMN traceroute SET STORAGE EXTERNAL"}
(1 row)
SELECT master_get_table_ddl_events('fiddly_table');
master_get_table_ddl_events
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.fiddly_table (hostname character(255) NOT NULL, os character(255) NOT NULL, ip_addr inet NOT NULL, traceroute text NOT NULL)
ALTER TABLE ONLY public.fiddly_table ALTER COLUMN hostname SET STORAGE PLAIN, ALTER COLUMN os SET STORAGE MAIN, ALTER COLUMN ip_addr SET STORAGE EXTENDED, ALTER COLUMN ip_addr SET STATISTICS 500, ALTER COLUMN traceroute SET STORAGE EXTERNAL
ALTER TABLE public.fiddly_table OWNER TO postgres
(3 rows)
-- test foreign tables using fake FDW
CREATE FOREIGN TABLE foreign_table (
id bigint not null,
full_name text not null default ''
) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true');
SELECT table_ddl_command_array('foreign_table');
SELECT master_get_table_ddl_events('foreign_table');
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
table_ddl_command_array
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
{"CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw","CREATE FOREIGN TABLE public.foreign_table (id bigint NOT NULL, full_name text DEFAULT ''::text NOT NULL) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true')"}
(1 row)
master_get_table_ddl_events
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw
CREATE FOREIGN TABLE public.foreign_table (id bigint NOT NULL, full_name text DEFAULT ''::text NOT NULL) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true')
ALTER TABLE public.foreign_table OWNER TO postgres
(3 rows)
-- propagating views is not supported
CREATE VIEW local_view AS SELECT * FROM simple_table;
SELECT table_ddl_command_array('local_view');
SELECT master_get_table_ddl_events('local_view');
ERROR: local_view is not a regular, foreign or partitioned table
-- clean up
DROP VIEW IF EXISTS local_view;

View File

@ -14,9 +14,10 @@ SELECT * FROM master_get_table_ddl_events('lineitem');
master_get_table_ddl_events
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL)
ALTER TABLE public.lineitem OWNER TO postgres
CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) TABLESPACE pg_default
ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber)
(3 rows)
(4 rows)
SELECT * FROM master_get_new_shardid();
master_get_new_shardid

View File

@ -61,13 +61,14 @@ SELECT unnest(master_metadata_snapshot());
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
ALTER TABLE public.mx_test_table OWNER TO postgres
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
ALTER TABLE public.mx_test_table OWNER TO postgres
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's')
SELECT worker_create_truncate_trigger('public.mx_test_table')
INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007)
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
(12 rows)
(13 rows)
-- Show that CREATE INDEX commands are included in the metadata snapshot
CREATE INDEX mx_index ON mx_test_table(col_2);
@ -80,6 +81,7 @@ SELECT unnest(master_metadata_snapshot());
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
ALTER TABLE public.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2) TABLESPACE pg_default
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
ALTER TABLE public.mx_test_table OWNER TO postgres
@ -87,7 +89,7 @@ SELECT unnest(master_metadata_snapshot());
SELECT worker_create_truncate_trigger('public.mx_test_table')
INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007)
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
(13 rows)
(14 rows)
-- Show that schema changes are included in the metadata snapshot
CREATE SCHEMA mx_testing_schema;
@ -105,6 +107,7 @@ SELECT unnest(master_metadata_snapshot());
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
@ -112,7 +115,7 @@ SELECT unnest(master_metadata_snapshot());
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007)
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
(15 rows)
(16 rows)
-- Show that append distributed tables are not included in the metadata snapshot
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
@ -134,6 +137,7 @@ SELECT unnest(master_metadata_snapshot());
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
@ -141,7 +145,7 @@ SELECT unnest(master_metadata_snapshot());
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007)
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
(15 rows)
(16 rows)
-- Show that range distributed tables are not included in the metadata snapshot
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
@ -156,6 +160,7 @@ SELECT unnest(master_metadata_snapshot());
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
@ -163,7 +168,7 @@ SELECT unnest(master_metadata_snapshot());
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007)
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
(15 rows)
(16 rows)
-- Test start_metadata_sync_to_node UDF
-- Ensure that hasmetadata=false for all nodes

View File

@ -47,8 +47,16 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
CREATE USER no_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE ROLE some_role;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test TO full_access;
GRANT SELECT ON TABLE test TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
SET citus.enable_ddl_propagation TO DEFAULT;
\c - - - :worker_1_port
CREATE USER full_access;
@ -60,10 +68,18 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
CREATE USER no_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE ROLE some_role;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test_1420000 TO full_access;
GRANT SELECT ON TABLE test_1420000 TO read_access;
GRANT ALL ON TABLE test_1420002 TO full_access;
GRANT SELECT ON TABLE test_1420002 TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
\c - - - :worker_2_port
CREATE USER full_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
@ -74,10 +90,18 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
CREATE USER no_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE ROLE some_role;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test_1420001 TO full_access;
GRANT SELECT ON TABLE test_1420001 TO read_access;
GRANT ALL ON TABLE test_1420003 TO full_access;
GRANT SELECT ON TABLE test_1420003 TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
\c - - - :master_port
-- create prepare tests
PREPARE prepare_insert AS INSERT INTO test VALUES ($1);
@ -282,7 +306,110 @@ SELECT task_tracker_cleanup_job(1);
(1 row)
DROP TABLE my_table, singleshard, test, test_coloc;
-- table should be distributable by super user when it has data in there
SET ROLE full_access;
CREATE TABLE my_table_with_data (id integer, val integer);
INSERT INTO my_table_with_data VALUES (1,2);
RESET ROLE;
SELECT create_distributed_table('my_table_with_data', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
SELECT count(*) FROM my_table_with_data;
count
-------
1
(1 row)
-- table that is owned by a role should be distributable by a user that has that role granted
-- while it should not be if the user has the role not granted
SET ROLE full_access;
CREATE TABLE my_role_table_with_data (id integer, val integer);
ALTER TABLE my_role_table_with_data OWNER TO some_role;
INSERT INTO my_role_table_with_data VALUES (1,2);
RESET ROLE;
-- we first try to distribute it with a user that does not have the role so we can reuse the table
SET ROLE no_access;
SELECT create_distributed_table('my_role_table_with_data', 'id');
ERROR: must be owner of table my_role_table_with_data
RESET ROLE;
-- then we try to distribute it with a user that has the role but different then the one creating
SET ROLE read_access;
SELECT create_distributed_table('my_role_table_with_data', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
RESET ROLE;
-- lastly we want to verify the table owner is set to the role, not the user that distributed
SELECT result FROM run_command_on_workers($cmd$
SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_role_table_with_data%' LIMIT 1;
$cmd$);
result
-----------
some_role
some_role
(2 rows)
-- we want to verify a user without CREATE access cannot distribute its table, but can get
-- its table distributed by the super user
-- we want to make sure the schema and user are setup in such a way they can't create a
-- table
SET ROLE full_access;
CREATE TABLE full_access_user_schema.t1 (id int);
ERROR: permission denied for schema full_access_user_schema
LINE 1: CREATE TABLE full_access_user_schema.t1 (id int);
^
RESET ROLE;
-- now we create the table for the user
CREATE TABLE full_access_user_schema.t1 (id int);
ALTER TABLE full_access_user_schema.t1 OWNER TO full_access;
-- make sure we can insert data
SET ROLE full_access;
INSERT INTO full_access_user_schema.t1 VALUES (1),(2),(3);
-- creating the table should fail with a failure on the worker machine since the user is
-- not allowed to create a table
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
ERROR: permission denied for database regression
CONTEXT: while executing command on localhost:57638
RESET ROLE;
-- now we distribute the table as super user
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
-- verify the owner of the shards for the distributed tables
SELECT result FROM run_command_on_workers($cmd$
SELECT tableowner FROM pg_tables WHERE
true
AND schemaname = 'full_access_user_schema'
AND tablename LIKE 't1_%'
LIMIT 1;
$cmd$);
result
-------------
full_access
full_access
(2 rows)
DROP SCHEMA full_access_user_schema CASCADE;
NOTICE: drop cascades to table full_access_user_schema.t1
DROP TABLE
my_table,
my_table_with_data,
my_role_table_with_data,
singleshard,
test,
test_coloc;
DROP USER full_access;
DROP USER read_access;
DROP USER no_access;
DROP ROLE some_role;

View File

@ -47,8 +47,16 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
CREATE USER no_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE ROLE some_role;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test TO full_access;
GRANT SELECT ON TABLE test TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
SET citus.enable_ddl_propagation TO DEFAULT;
\c - - - :worker_1_port
CREATE USER full_access;
@ -60,10 +68,18 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
CREATE USER no_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE ROLE some_role;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test_1420000 TO full_access;
GRANT SELECT ON TABLE test_1420000 TO read_access;
GRANT ALL ON TABLE test_1420002 TO full_access;
GRANT SELECT ON TABLE test_1420002 TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
\c - - - :worker_2_port
CREATE USER full_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
@ -74,10 +90,18 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
CREATE USER no_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE ROLE some_role;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test_1420001 TO full_access;
GRANT SELECT ON TABLE test_1420001 TO read_access;
GRANT ALL ON TABLE test_1420003 TO full_access;
GRANT SELECT ON TABLE test_1420003 TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
\c - - - :master_port
-- create prepare tests
PREPARE prepare_insert AS INSERT INTO test VALUES ($1);
@ -282,7 +306,110 @@ SELECT task_tracker_cleanup_job(1);
(1 row)
DROP TABLE my_table, singleshard, test, test_coloc;
-- table should be distributable by super user when it has data in there
SET ROLE full_access;
CREATE TABLE my_table_with_data (id integer, val integer);
INSERT INTO my_table_with_data VALUES (1,2);
RESET ROLE;
SELECT create_distributed_table('my_table_with_data', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
SELECT count(*) FROM my_table_with_data;
count
-------
1
(1 row)
-- table that is owned by a role should be distributable by a user that has that role granted
-- while it should not be if the user has the role not granted
SET ROLE full_access;
CREATE TABLE my_role_table_with_data (id integer, val integer);
ALTER TABLE my_role_table_with_data OWNER TO some_role;
INSERT INTO my_role_table_with_data VALUES (1,2);
RESET ROLE;
-- we first try to distribute it with a user that does not have the role so we can reuse the table
SET ROLE no_access;
SELECT create_distributed_table('my_role_table_with_data', 'id');
ERROR: must be owner of relation my_role_table_with_data
RESET ROLE;
-- then we try to distribute it with a user that has the role but different then the one creating
SET ROLE read_access;
SELECT create_distributed_table('my_role_table_with_data', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
RESET ROLE;
-- lastly we want to verify the table owner is set to the role, not the user that distributed
SELECT result FROM run_command_on_workers($cmd$
SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_role_table_with_data%' LIMIT 1;
$cmd$);
result
-----------
some_role
some_role
(2 rows)
-- we want to verify a user without CREATE access cannot distribute its table, but can get
-- its table distributed by the super user
-- we want to make sure the schema and user are setup in such a way they can't create a
-- table
SET ROLE full_access;
CREATE TABLE full_access_user_schema.t1 (id int);
ERROR: permission denied for schema full_access_user_schema
LINE 1: CREATE TABLE full_access_user_schema.t1 (id int);
^
RESET ROLE;
-- now we create the table for the user
CREATE TABLE full_access_user_schema.t1 (id int);
ALTER TABLE full_access_user_schema.t1 OWNER TO full_access;
-- make sure we can insert data
SET ROLE full_access;
INSERT INTO full_access_user_schema.t1 VALUES (1),(2),(3);
-- creating the table should fail with a failure on the worker machine since the user is
-- not allowed to create a table
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
ERROR: permission denied for database regression
CONTEXT: while executing command on localhost:57638
RESET ROLE;
-- now we distribute the table as super user
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
-- verify the owner of the shards for the distributed tables
SELECT result FROM run_command_on_workers($cmd$
SELECT tableowner FROM pg_tables WHERE
true
AND schemaname = 'full_access_user_schema'
AND tablename LIKE 't1_%'
LIMIT 1;
$cmd$);
result
-------------
full_access
full_access
(2 rows)
DROP SCHEMA full_access_user_schema CASCADE;
NOTICE: drop cascades to table full_access_user_schema.t1
DROP TABLE
my_table,
my_table_with_data,
my_role_table_with_data,
singleshard,
test,
test_coloc;
DROP USER full_access;
DROP USER read_access;
DROP USER no_access;
DROP ROLE some_role;

View File

@ -99,7 +99,8 @@ SELECT master_get_table_ddl_events('date_partitioned_table');
master_get_table_ddl_events
---------------------------------------------------------------------------------------------------
CREATE TABLE public.date_partitioned_table (id integer, "time" date) PARTITION BY RANGE ("time")
(1 row)
ALTER TABLE public.date_partitioned_table OWNER TO postgres
(2 rows)
-- now create the partitions
CREATE TABLE date_partition_2006 PARTITION OF date_partitioned_table FOR VALUES FROM ('2006-01-01') TO ('2007-01-01');
@ -180,7 +181,8 @@ SELECT master_get_table_ddl_events('date_partition_2007_100');
master_get_table_ddl_events
-----------------------------------------------------------------------
CREATE TABLE public.date_partition_2007_100 (id integer, "time" date)
(1 row)
ALTER TABLE public.date_partition_2007_100 OWNER TO postgres
(2 rows)
-- now break the partitioning hierarcy
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',
@ -327,7 +329,8 @@ SELECT master_get_table_ddl_events('multi_column_partitioned');
master_get_table_ddl_events
------------------------------------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.multi_column_partitioned (a integer, b integer, c text) PARTITION BY RANGE (a, (((a + b) + 1)), public.some_function(upper(c)))
(1 row)
ALTER TABLE public.multi_column_partitioned OWNER TO postgres
(2 rows)
SELECT drop_and_recreate_partitioned_table('multi_column_partitioned');
drop_and_recreate_partitioned_table
@ -368,7 +371,8 @@ SELECT master_get_table_ddl_events('list_partitioned');
master_get_table_ddl_events
-------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.list_partitioned (col1 numeric, col2 numeric, col3 character varying(10)) PARTITION BY LIST (col1)
(1 row)
ALTER TABLE public.list_partitioned OWNER TO postgres
(2 rows)
SELECT drop_and_recreate_partitioned_table('list_partitioned');
drop_and_recreate_partitioned_table

View File

@ -99,7 +99,8 @@ SELECT master_get_table_ddl_events('date_partitioned_table');
master_get_table_ddl_events
---------------------------------------------------------------------------------------------------
CREATE TABLE public.date_partitioned_table (id integer, "time" date) PARTITION BY RANGE ("time")
(1 row)
ALTER TABLE public.date_partitioned_table OWNER TO postgres
(2 rows)
-- now create the partitions
CREATE TABLE date_partition_2006 PARTITION OF date_partitioned_table FOR VALUES FROM ('2006-01-01') TO ('2007-01-01');
@ -180,7 +181,8 @@ SELECT master_get_table_ddl_events('date_partition_2007_100');
master_get_table_ddl_events
-----------------------------------------------------------------------
CREATE TABLE public.date_partition_2007_100 (id integer, "time" date)
(1 row)
ALTER TABLE public.date_partition_2007_100 OWNER TO postgres
(2 rows)
-- now break the partitioning hierarcy
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',
@ -326,7 +328,8 @@ SELECT master_get_table_ddl_events('multi_column_partitioned');
master_get_table_ddl_events
------------------------------------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.multi_column_partitioned (a integer, b integer, c text) PARTITION BY RANGE (a, (((a + b) + 1)), public.some_function(upper(c)))
(1 row)
ALTER TABLE public.multi_column_partitioned OWNER TO postgres
(2 rows)
SELECT drop_and_recreate_partitioned_table('multi_column_partitioned');
drop_and_recreate_partitioned_table
@ -367,7 +370,8 @@ SELECT master_get_table_ddl_events('list_partitioned');
master_get_table_ddl_events
-------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.list_partitioned (col1 numeric, col2 numeric, col3 character varying(10)) PARTITION BY LIST (col1)
(1 row)
ALTER TABLE public.list_partitioned OWNER TO postgres
(2 rows)
SELECT drop_and_recreate_partitioned_table('list_partitioned');
drop_and_recreate_partitioned_table

View File

@ -148,7 +148,8 @@ SELECT master_get_table_ddl_events('date_partition_2007_100');
master_get_table_ddl_events
-----------------------------------------------------------------------
CREATE TABLE public.date_partition_2007_100 (id integer, "time" date)
(1 row)
ALTER TABLE public.date_partition_2007_100 OWNER TO postgres
(2 rows)
-- now break the partitioning hierarcy
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',

View File

@ -1438,7 +1438,8 @@ SELECT master_get_table_ddl_events('reference_schema.reference_table_ddl');
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
CREATE SCHEMA IF NOT EXISTS reference_schema AUTHORIZATION postgres
CREATE TABLE reference_schema.reference_table_ddl (value_2 double precision DEFAULT 25.0, value_3 text NOT NULL, value_4 timestamp without time zone, value_5 double precision)
(2 rows)
ALTER TABLE reference_schema.reference_table_ddl OWNER TO postgres
(3 rows)
-- in reality, we wouldn't need to repair any reference table shard placements
-- however, the test could be relevant for other purposes

View File

@ -1,16 +1,6 @@
SET citus.next_shard_id TO 610000;
-- ===================================================================
-- create test functions
-- ===================================================================
CREATE FUNCTION table_ddl_command_array(regclass)
RETURNS text[]
AS 'citus'
LANGUAGE C STRICT;
-- ===================================================================
-- test ddl command generation functionality
-- ===================================================================
@ -22,7 +12,7 @@ CREATE TABLE simple_table (
id bigint
);
SELECT table_ddl_command_array('simple_table');
SELECT master_get_table_ddl_events('simple_table');
-- ensure not-null constraints are propagated
CREATE TABLE not_null_table (
@ -30,12 +20,12 @@ CREATE TABLE not_null_table (
id bigint not null
);
SELECT table_ddl_command_array('not_null_table');
SELECT master_get_table_ddl_events('not_null_table');
-- ensure tables not in search path are schema-prefixed
CREATE SCHEMA not_in_path CREATE TABLE simple_table (id bigint);
SELECT table_ddl_command_array('not_in_path.simple_table');
SELECT master_get_table_ddl_events('not_in_path.simple_table');
-- even more complex constraints should be preserved...
CREATE TABLE column_constraint_table (
@ -44,7 +34,7 @@ CREATE TABLE column_constraint_table (
age int CONSTRAINT non_negative_age CHECK (age >= 0)
);
SELECT table_ddl_command_array('column_constraint_table');
SELECT master_get_table_ddl_events('column_constraint_table');
-- including table constraints
CREATE TABLE table_constraint_table (
@ -54,7 +44,7 @@ CREATE TABLE table_constraint_table (
CONSTRAINT bids_ordered CHECK (min_bid > max_bid)
);
SELECT table_ddl_command_array('table_constraint_table');
SELECT master_get_table_ddl_events('table_constraint_table');
-- default values are supported
CREATE TABLE default_value_table (
@ -62,7 +52,7 @@ CREATE TABLE default_value_table (
price decimal default 0.00
);
SELECT table_ddl_command_array('default_value_table');
SELECT master_get_table_ddl_events('default_value_table');
-- of course primary keys work...
CREATE TABLE pkey_table (
@ -71,7 +61,7 @@ CREATE TABLE pkey_table (
id bigint PRIMARY KEY
);
SELECT table_ddl_command_array('pkey_table');
SELECT master_get_table_ddl_events('pkey_table');
-- as do unique indexes...
CREATE TABLE unique_table (
@ -79,7 +69,7 @@ CREATE TABLE unique_table (
username text UNIQUE not null
);
SELECT table_ddl_command_array('unique_table');
SELECT master_get_table_ddl_events('unique_table');
-- and indexes used for clustering
CREATE TABLE clustered_table (
@ -91,7 +81,7 @@ CREATE INDEX clustered_time_idx ON clustered_table (received_at);
CLUSTER clustered_table USING clustered_time_idx;
SELECT table_ddl_command_array('clustered_table');
SELECT master_get_table_ddl_events('clustered_table');
-- fiddly things like storage type and statistics also work
CREATE TABLE fiddly_table (
@ -108,7 +98,7 @@ ALTER TABLE fiddly_table
ALTER traceroute SET STORAGE EXTERNAL,
ALTER ip_addr SET STATISTICS 500;
SELECT table_ddl_command_array('fiddly_table');
SELECT master_get_table_ddl_events('fiddly_table');
-- test foreign tables using fake FDW
CREATE FOREIGN TABLE foreign_table (
@ -116,12 +106,12 @@ CREATE FOREIGN TABLE foreign_table (
full_name text not null default ''
) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true');
SELECT table_ddl_command_array('foreign_table');
SELECT master_get_table_ddl_events('foreign_table');
-- propagating views is not supported
CREATE VIEW local_view AS SELECT * FROM simple_table;
SELECT table_ddl_command_array('local_view');
SELECT master_get_table_ddl_events('local_view');
-- clean up
DROP VIEW IF EXISTS local_view;

View File

@ -29,16 +29,26 @@ SET citus.enable_ddl_propagation TO off;
CREATE USER full_access;
CREATE USER read_access;
CREATE USER no_access;
CREATE ROLE some_role;
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test TO full_access;
GRANT SELECT ON TABLE test TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
SET citus.enable_ddl_propagation TO DEFAULT;
\c - - - :worker_1_port
CREATE USER full_access;
CREATE USER read_access;
CREATE USER no_access;
CREATE ROLE some_role;
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test_1420000 TO full_access;
GRANT SELECT ON TABLE test_1420000 TO read_access;
@ -46,10 +56,17 @@ GRANT SELECT ON TABLE test_1420000 TO read_access;
GRANT ALL ON TABLE test_1420002 TO full_access;
GRANT SELECT ON TABLE test_1420002 TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
\c - - - :worker_2_port
CREATE USER full_access;
CREATE USER read_access;
CREATE USER no_access;
CREATE ROLE some_role;
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test_1420001 TO full_access;
GRANT SELECT ON TABLE test_1420001 TO read_access;
@ -57,6 +74,10 @@ GRANT SELECT ON TABLE test_1420001 TO read_access;
GRANT ALL ON TABLE test_1420003 TO full_access;
GRANT SELECT ON TABLE test_1420003 TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
\c - - - :master_port
-- create prepare tests
@ -173,7 +194,81 @@ SELECT create_distributed_table('my_table', 'id');
SELECT result FROM run_command_on_workers($$SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_table_%' LIMIT 1$$);
SELECT task_tracker_cleanup_job(1);
DROP TABLE my_table, singleshard, test, test_coloc;
-- table should be distributable by super user when it has data in there
SET ROLE full_access;
CREATE TABLE my_table_with_data (id integer, val integer);
INSERT INTO my_table_with_data VALUES (1,2);
RESET ROLE;
SELECT create_distributed_table('my_table_with_data', 'id');
SELECT count(*) FROM my_table_with_data;
-- table that is owned by a role should be distributable by a user that has that role granted
-- while it should not be if the user has the role not granted
SET ROLE full_access;
CREATE TABLE my_role_table_with_data (id integer, val integer);
ALTER TABLE my_role_table_with_data OWNER TO some_role;
INSERT INTO my_role_table_with_data VALUES (1,2);
RESET ROLE;
-- we first try to distribute it with a user that does not have the role so we can reuse the table
SET ROLE no_access;
SELECT create_distributed_table('my_role_table_with_data', 'id');
RESET ROLE;
-- then we try to distribute it with a user that has the role but different then the one creating
SET ROLE read_access;
SELECT create_distributed_table('my_role_table_with_data', 'id');
RESET ROLE;
-- lastly we want to verify the table owner is set to the role, not the user that distributed
SELECT result FROM run_command_on_workers($cmd$
SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_role_table_with_data%' LIMIT 1;
$cmd$);
-- we want to verify a user without CREATE access cannot distribute its table, but can get
-- its table distributed by the super user
-- we want to make sure the schema and user are setup in such a way they can't create a
-- table
SET ROLE full_access;
CREATE TABLE full_access_user_schema.t1 (id int);
RESET ROLE;
-- now we create the table for the user
CREATE TABLE full_access_user_schema.t1 (id int);
ALTER TABLE full_access_user_schema.t1 OWNER TO full_access;
-- make sure we can insert data
SET ROLE full_access;
INSERT INTO full_access_user_schema.t1 VALUES (1),(2),(3);
-- creating the table should fail with a failure on the worker machine since the user is
-- not allowed to create a table
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
RESET ROLE;
-- now we distribute the table as super user
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
-- verify the owner of the shards for the distributed tables
SELECT result FROM run_command_on_workers($cmd$
SELECT tableowner FROM pg_tables WHERE
true
AND schemaname = 'full_access_user_schema'
AND tablename LIKE 't1_%'
LIMIT 1;
$cmd$);
DROP SCHEMA full_access_user_schema CASCADE;
DROP TABLE
my_table,
my_table_with_data,
my_role_table_with_data,
singleshard,
test,
test_coloc;
DROP USER full_access;
DROP USER read_access;
DROP USER no_access;
DROP ROLE some_role;