Merge branch 'main', remote-tracking branch 'origin' into circleci-gha-migration

pull/7154/head
Gokhan Gulbiz 2023-08-28 13:19:03 +03:00
commit c272bb5112
No known key found for this signature in database
GPG Key ID: 608EF06B6BD1B45B
32 changed files with 2336 additions and 596 deletions

View File

@ -6,6 +6,10 @@ on:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
get_postgres_versions_from_file:
@ -119,7 +123,6 @@ jobs:
- debian-buster-all
- debian-bookworm-all
- debian-bullseye-all
- ubuntu-bionic-all
- ubuntu-focal-all
- ubuntu-jammy-all
- ubuntu-kinetic-all

View File

@ -11,7 +11,7 @@ endif
include Makefile.global
all: extension pg_send_cancellation
all: extension
# build columnar only
@ -40,22 +40,14 @@ clean-full:
install-downgrades:
$(MAKE) -C src/backend/distributed/ install-downgrades
install-all: install-headers install-pg_send_cancellation
install-all: install-headers
$(MAKE) -C src/backend/columnar/ install-all
$(MAKE) -C src/backend/distributed/ install-all
# build citus_send_cancellation binary
pg_send_cancellation:
$(MAKE) -C src/bin/pg_send_cancellation/ all
install-pg_send_cancellation: pg_send_cancellation
$(MAKE) -C src/bin/pg_send_cancellation/ install
clean-pg_send_cancellation:
$(MAKE) -C src/bin/pg_send_cancellation/ clean
.PHONY: pg_send_cancellation install-pg_send_cancellation clean-pg_send_cancellation
# Add to generic targets
install: install-extension install-headers install-pg_send_cancellation
clean: clean-extension clean-pg_send_cancellation
install: install-extension install-headers
clean: clean-extension
# apply or check style
reindent:

View File

@ -31,6 +31,8 @@
static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid);
static Oid get_database_owner(Oid db_oid);
List * PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
/* controlled via GUC */
bool EnableAlterDatabaseOwner = true;
@ -107,3 +109,41 @@ get_database_owner(Oid db_oid)
return dba;
}
/*
* PreprocessGrantOnDatabaseStmt is executed before the statement is applied to the local
* postgres instance.
*
* In this stage we can prepare the commands that need to be run on all workers to grant
* on databases.
*/
List *
PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
if (!ShouldPropagate())
{
return NIL;
}
GrantStmt *stmt = castNode(GrantStmt, node);
Assert(stmt->objtype == OBJECT_DATABASE);
List *databaseList = stmt->objects;
if (list_length(databaseList) == 0)
{
return NIL;
}
EnsureCoordinator();
char *sql = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}

View File

@ -432,6 +432,18 @@ static DistributeObjectOps Database_AlterOwner = {
.address = AlterDatabaseOwnerObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps Database_Grant = {
.deparse = DeparseGrantOnDatabaseStmt,
.qualify = NULL,
.preprocess = PreprocessGrantOnDatabaseStmt,
.postprocess = NULL,
.objectType = OBJECT_DATABASE,
.operationType = DIST_OPS_ALTER,
.address = NULL,
.markDistributed = false,
};
static DistributeObjectOps Domain_Alter = {
.deparse = DeparseAlterDomainStmt,
.qualify = QualifyAlterDomainStmt,
@ -1911,6 +1923,11 @@ GetDistributeObjectOps(Node *node)
return &Routine_Grant;
}
case OBJECT_DATABASE:
{
return &Database_Grant;
}
default:
{
return &Any_Grant;

View File

@ -425,7 +425,8 @@ EnsureCopyCanRunOnRelation(Oid relationId)
*/
if (RecoveryInProgress() && WritableStandbyCoordinator)
{
ereport(ERROR, (errmsg("COPY command to Citus tables is not allowed in "
ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
errmsg("COPY command to Citus tables is not allowed in "
"read-only mode"),
errhint("All COPY commands to citus tables happen via 2PC, "
"and 2PC requires the database to be in a writable state."),

View File

@ -77,6 +77,14 @@ PreprocessCreateStatisticsStmt(Node *node, const char *queryString,
EnsureCoordinator();
if (!(stmt->defnames))
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot create statistics without a name on a "
"Citus table"),
errhint("Consider specifying a name for the statistics")));
}
QualifyTreeNode((Node *) stmt);
Oid statsOid = get_statistics_object_oid(stmt->defnames, true);

View File

@ -49,3 +49,79 @@ AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt)
quote_identifier(strVal((String *) stmt->object)),
RoleSpecString(stmt->newowner, true));
}
static void
AppendGrantDatabases(StringInfo buf, GrantStmt *stmt)
{
ListCell *cell = NULL;
appendStringInfo(buf, " ON DATABASE ");
foreach(cell, stmt->objects)
{
char *database = strVal(lfirst(cell));
appendStringInfoString(buf, quote_identifier(database));
if (cell != list_tail(stmt->objects))
{
appendStringInfo(buf, ", ");
}
}
}
static void
AppendGrantOnDatabaseStmt(StringInfo buf, GrantStmt *stmt)
{
Assert(stmt->objtype == OBJECT_DATABASE);
appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE");
if (!stmt->is_grant && stmt->grant_option)
{
appendStringInfo(buf, "GRANT OPTION FOR ");
}
AppendGrantPrivileges(buf, stmt);
AppendGrantDatabases(buf, stmt);
AppendGrantGrantees(buf, stmt);
if (stmt->is_grant && stmt->grant_option)
{
appendStringInfo(buf, " WITH GRANT OPTION");
}
if (!stmt->is_grant)
{
if (stmt->behavior == DROP_RESTRICT)
{
appendStringInfo(buf, " RESTRICT");
}
else if (stmt->behavior == DROP_CASCADE)
{
appendStringInfo(buf, " CASCADE");
}
}
if (stmt->grantor)
{
appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(stmt->grantor, true));
}
appendStringInfo(buf, ";");
}
char *
DeparseGrantOnDatabaseStmt(Node *node)
{
GrantStmt *stmt = castNode(GrantStmt, node);
Assert(stmt->objtype == OBJECT_DATABASE);
StringInfoData str = { 0 };
initStringInfo(&str);
AppendGrantOnDatabaseStmt(&str, stmt);
return str.data;
}

View File

@ -354,7 +354,8 @@ EnsureModificationsCanRun(void)
{
if (RecoveryInProgress() && !WritableStandbyCoordinator)
{
ereport(ERROR, (errmsg("writing to worker nodes is not currently allowed"),
ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
errmsg("writing to worker nodes is not currently allowed"),
errdetail("the database is read-only")));
}
@ -415,7 +416,8 @@ EnsureModificationsCanRunOnRelation(Oid relationId)
if (modifiedTableReplicated)
{
ereport(ERROR, (errmsg("writing to worker nodes is not currently "
ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
errmsg("writing to worker nodes is not currently "
"allowed for replicated tables such as reference "
"tables or hash distributed tables with replication "
"factor greater than 1."),

View File

@ -992,12 +992,18 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es)
appendStringInfo(explainQuery,
"EXPLAIN (ANALYZE %s, VERBOSE %s, "
"COSTS %s, BUFFERS %s, WAL %s, "
#if PG_VERSION_NUM >= PG_VERSION_16
"GENERIC_PLAN %s, "
#endif
"TIMING %s, SUMMARY %s, FORMAT %s) %s",
es->analyze ? "TRUE" : "FALSE",
es->verbose ? "TRUE" : "FALSE",
es->costs ? "TRUE" : "FALSE",
es->buffers ? "TRUE" : "FALSE",
es->wal ? "TRUE" : "FALSE",
#if PG_VERSION_NUM >= PG_VERSION_16
es->generic ? "TRUE" : "FALSE",
#endif
es->timing ? "TRUE" : "FALSE",
es->summary ? "TRUE" : "FALSE",
formatStr,

View File

@ -1,70 +0,0 @@
/*-------------------------------------------------------------------------
*
* pg_send_cancellation.c
*
* This file contains functions to test setting pg_send_cancellation.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "miscadmin.h"
#include "fmgr.h"
#include "port.h"
#include "postmaster/postmaster.h"
#define PG_SEND_CANCELLATION_VERSION \
"pg_send_cancellation (PostgreSQL) " PG_VERSION "\n"
/* exports for SQL callable functions */
PG_FUNCTION_INFO_V1(get_cancellation_key);
PG_FUNCTION_INFO_V1(run_pg_send_cancellation);
/*
* get_cancellation_key returns the cancellation key of the current process
* as an integer.
*/
Datum
get_cancellation_key(PG_FUNCTION_ARGS)
{
PG_RETURN_INT32(MyCancelKey);
}
/*
* run_pg_send_cancellation runs the pg_send_cancellation program with
* the specified arguments
*/
Datum
run_pg_send_cancellation(PG_FUNCTION_ARGS)
{
int pid = PG_GETARG_INT32(0);
int cancelKey = PG_GETARG_INT32(1);
char sendCancellationPath[MAXPGPATH];
char command[1024];
/* Locate executable backend before we change working directory */
if (find_other_exec(my_exec_path, "pg_send_cancellation",
PG_SEND_CANCELLATION_VERSION,
sendCancellationPath) < 0)
{
ereport(ERROR, (errmsg("could not locate pg_send_cancellation")));
}
pg_snprintf(command, sizeof(command), "%s %d %d %s %d",
sendCancellationPath, pid, cancelKey, "localhost", PostPortNumber);
if (system(command) != 0)
{
ereport(ERROR, (errmsg("failed to run command: %s", command)));
}
PG_RETURN_VOID();
}

View File

@ -1 +0,0 @@
pg_send_cancellation

View File

@ -1,24 +0,0 @@
citus_top_builddir = ../../..
PROGRAM = pg_send_cancellation
PGFILEDESC = "pg_send_cancellation sends a custom cancellation message"
OBJS = $(citus_abs_srcdir)/src/bin/pg_send_cancellation/pg_send_cancellation.o
PG_CPPFLAGS = -I$(libpq_srcdir)
PG_LIBS_INTERNAL = $(libpq_pgport)
PG_LDFLAGS += $(LDFLAGS)
include $(citus_top_builddir)/Makefile.global
# We reuse all the Citus flags (incl. security flags), but we are building a program not a shared library
# We sometimes build Citus with a newer version of gcc than Postgres was built
# with and this breaks LTO (link-time optimization). Even if disabling it can
# have some perf impact this is ok because pg_send_cancellation is only used
# for tests anyway.
override CFLAGS := $(filter-out -shared, $(CFLAGS)) -fno-lto
# Filter out unneeded dependencies
override LIBS := $(filter-out -lz -lreadline -ledit -ltermcap -lncurses -lcurses -lpam, $(LIBS))
clean: clean-pg_send_cancellation
clean-pg_send_cancellation:
rm -f $(PROGRAM) $(OBJS)

View File

@ -1,47 +0,0 @@
# pg_send_cancellation
pg_send_cancellation is a program for manually sending a cancellation
to a Postgres endpoint. It is effectively a command-line version of
PQcancel in libpq, but it can use any PID or cancellation key.
We use pg_send_cancellation primarily to propagate cancellations between pgbouncers
behind a load balancer. Since the cancellation protocol involves
opening a new connection, the new connection may go to a different
node that does not recognize the cancellation key. To handle that
scenario, we modified pgbouncer to pass unrecognized cancellation
keys to a shell command.
Users can configure the cancellation_command, which will be run with:
```
<cancellation_command> <client ip> <client port> <pid> <cancel key>
```
Note that pgbouncer does not use actual PIDs. Instead, it generates PID and cancellation key together a random 8-byte number. This makes the chance of collisions exceedingly small.
By providing pg_send_cancellation as part of Citus, we can use a shell script that pgbouncer invokes to propagate the cancellation to all *other* worker nodes in the same cluster, for example:
```bash
#!/bin/sh
remote_ip=$1
remote_port=$2
pid=$3
cancel_key=$4
postgres_path=/usr/pgsql-14/bin
pgbouncer_port=6432
nodes_query="select nodename from pg_dist_node where groupid > 0 and groupid not in (select groupid from pg_dist_local_group) and nodecluster = current_setting('citus.cluster_name')"
# Get hostnames of other worker nodes in the cluster, and send cancellation to their pgbouncers
$postgres_path/psql -c "$nodes_query" -tAX | xargs -n 1 sh -c "$postgres_path/pg_send_cancellation $pid $cancel_key \$0 $pgbouncer_port"
```
One thing we need to be careful about is that the cancellations do not get forwarded
back-and-forth. This is handled in pgbouncer by setting the last bit of all generated
cancellation keys (sent to clients) to 1, and setting the last bit of all forwarded bits to 0.
That way, when a pgbouncer receives a cancellation key with the last bit set to 0,
it knows it is from another pgbouncer and should not forward further, and should set
the last bit to 1 when comparing to stored cancellation keys.
Another thing we need to be careful about is that the integers should be encoded
as big endian on the wire.

View File

@ -1,261 +0,0 @@
/*
* pg_send_cancellation is a program for manually sending a cancellation
* to a Postgres endpoint. It is effectively a command-line version of
* PQcancel in libpq, but it can use any PID or cancellation key.
*
* Portions Copyright (c) Citus Data, Inc.
*
* For the internal_cancel function:
*
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose, without fee, and without a written agreement
* is hereby granted, provided that the above copyright notice and this
* paragraph and the following two paragraphs appear in all copies.
*
* IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
* LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
* DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
*/
#include "postgres_fe.h"
#include <sys/stat.h>
#include <fcntl.h>
#include <ctype.h>
#include <time.h>
#include <unistd.h>
#include "common/ip.h"
#include "common/link-canary.h"
#include "common/scram-common.h"
#include "common/string.h"
#include "libpq-fe.h"
#include "libpq-int.h"
#include "mb/pg_wchar.h"
#include "port/pg_bswap.h"
#define ERROR_BUFFER_SIZE 256
static int internal_cancel(SockAddr *raddr, int be_pid, int be_key,
char *errbuf, int errbufsize);
/*
* main entry point into the pg_send_cancellation program.
*/
int
main(int argc, char *argv[])
{
if (argc == 2 && strcmp(argv[1], "-V") == 0)
{
pg_fprintf(stdout, "pg_send_cancellation (PostgreSQL) " PG_VERSION "\n");
return 0;
}
if (argc < 4 || argc > 5)
{
char *program = argv[0];
pg_fprintf(stderr, "%s requires 4 arguments\n\n", program);
pg_fprintf(stderr, "Usage: %s <pid> <cancel key> <hostname> [port]\n", program);
return 1;
}
char *pidString = argv[1];
char *cancelKeyString = argv[2];
char *host = argv[3];
char *portString = "5432";
if (argc >= 5)
{
portString = argv[4];
}
/* parse the PID and cancellation key */
int pid = strtol(pidString, NULL, 10);
int cancelAuthCode = strtol(cancelKeyString, NULL, 10);
char errorBuffer[ERROR_BUFFER_SIZE] = { 0 };
struct addrinfo *ipAddressList;
struct addrinfo hint;
int ipAddressListFamily = AF_UNSPEC;
SockAddr socketAddress;
memset(&hint, 0, sizeof(hint));
hint.ai_socktype = SOCK_STREAM;
hint.ai_family = ipAddressListFamily;
/* resolve the hostname to an IP */
int ret = pg_getaddrinfo_all(host, portString, &hint, &ipAddressList);
if (ret || !ipAddressList)
{
pg_fprintf(stderr, "could not translate host name \"%s\" to address: %s\n",
host, gai_strerror(ret));
return 1;
}
if (ipAddressList->ai_addrlen > sizeof(socketAddress.addr))
{
pg_fprintf(stderr, "invalid address length");
return 1;
}
/*
* Explanation of IGNORE-BANNED:
* This is a common pattern when using getaddrinfo. The system guarantees
* that ai_addrlen < sizeof(socketAddress.addr). Out of an abundance of
* caution. We also check it above.
*/
memcpy(&socketAddress.addr, ipAddressList->ai_addr, ipAddressList->ai_addrlen); /* IGNORE-BANNED */
socketAddress.salen = ipAddressList->ai_addrlen;
/* send the cancellation */
bool cancelSucceeded = internal_cancel(&socketAddress, pid, cancelAuthCode,
errorBuffer, sizeof(errorBuffer));
if (!cancelSucceeded)
{
pg_fprintf(stderr, "sending cancellation to %s:%s failed: %s",
host, portString, errorBuffer);
return 1;
}
pg_freeaddrinfo_all(ipAddressListFamily, ipAddressList);
return 0;
}
/* *INDENT-OFF* */
/*
* internal_cancel is copied from fe-connect.c
*
* The return value is true if the cancel request was successfully
* dispatched, false if not (in which case an error message is available).
* Note: successful dispatch is no guarantee that there will be any effect at
* the backend. The application must read the operation result as usual.
*
* CAUTION: we want this routine to be safely callable from a signal handler
* (for example, an application might want to call it in a SIGINT handler).
* This means we cannot use any C library routine that might be non-reentrant.
* malloc/free are often non-reentrant, and anything that might call them is
* just as dangerous. We avoid sprintf here for that reason. Building up
* error messages with strcpy/strcat is tedious but should be quite safe.
* We also save/restore errno in case the signal handler support doesn't.
*
* internal_cancel() is an internal helper function to make code-sharing
* between the two versions of the cancel function possible.
*/
static int
internal_cancel(SockAddr *raddr, int be_pid, int be_key,
char *errbuf, int errbufsize)
{
int save_errno = SOCK_ERRNO;
pgsocket tmpsock = PGINVALID_SOCKET;
char sebuf[PG_STRERROR_R_BUFLEN];
int maxlen;
struct
{
uint32 packetlen;
CancelRequestPacket cp;
} crp;
/*
* We need to open a temporary connection to the postmaster. Do this with
* only kernel calls.
*/
if ((tmpsock = socket(raddr->addr.ss_family, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
{
strlcpy(errbuf, "PQcancel() -- socket() failed: ", errbufsize);
goto cancel_errReturn;
}
retry3:
if (connect(tmpsock, (struct sockaddr *) &raddr->addr, raddr->salen) < 0)
{
if (SOCK_ERRNO == EINTR)
/* Interrupted system call - we'll just try again */
goto retry3;
strlcpy(errbuf, "PQcancel() -- connect() failed: ", errbufsize);
goto cancel_errReturn;
}
/*
* We needn't set nonblocking I/O or NODELAY options here.
*/
/* Create and send the cancel request packet. */
crp.packetlen = pg_hton32((uint32) sizeof(crp));
crp.cp.cancelRequestCode = (MsgType) pg_hton32(CANCEL_REQUEST_CODE);
crp.cp.backendPID = pg_hton32(be_pid);
crp.cp.cancelAuthCode = pg_hton32(be_key);
retry4:
if (send(tmpsock, (char *) &crp, sizeof(crp), 0) != (int) sizeof(crp))
{
if (SOCK_ERRNO == EINTR)
/* Interrupted system call - we'll just try again */
goto retry4;
strlcpy(errbuf, "PQcancel() -- send() failed: ", errbufsize);
goto cancel_errReturn;
}
/*
* Wait for the postmaster to close the connection, which indicates that
* it's processed the request. Without this delay, we might issue another
* command only to find that our cancel zaps that command instead of the
* one we thought we were canceling. Note we don't actually expect this
* read to obtain any data, we are just waiting for EOF to be signaled.
*/
retry5:
if (recv(tmpsock, (char *) &crp, 1, 0) < 0)
{
if (SOCK_ERRNO == EINTR)
/* Interrupted system call - we'll just try again */
goto retry5;
/* we ignore other error conditions */
}
/* All done */
closesocket(tmpsock);
SOCK_ERRNO_SET(save_errno);
return true;
cancel_errReturn:
/*
* Make sure we don't overflow the error buffer. Leave space for the \n at
* the end, and for the terminating zero.
*/
maxlen = errbufsize - strlen(errbuf) - 2;
if (maxlen >= 0)
{
/*
* Explanation of IGNORE-BANNED:
* This is well-tested libpq code that we would like to preserve in its
* original form. The appropriate length calculation is done above.
*/
strncat(errbuf, SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)), /* IGNORE-BANNED */
maxlen);
strcat(errbuf, "\n"); /* IGNORE-BANNED */
}
if (tmpsock != PGINVALID_SOCKET)
closesocket(tmpsock);
SOCK_ERRNO_SET(save_errno);
return false;
}
/* *INDENT-ON* */

View File

@ -220,6 +220,9 @@ extern List * AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok, bool
isPostprocess);
extern List * DatabaseOwnerDDLCommands(const ObjectAddress *address);
extern List * PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
/* domain.c - forward declarations */
extern List * CreateDomainStmtObjectAddress(Node *node, bool missing_ok, bool
isPostprocess);
@ -235,6 +238,7 @@ extern List * RenameDomainStmtObjectAddress(Node *node, bool missing_ok, bool
extern CreateDomainStmt * RecreateDomainStmt(Oid domainOid);
extern Oid get_constraint_typid(Oid conoid);
/* extension.c - forward declarations */
extern bool IsDropCitusExtensionStmt(Node *parsetree);
extern List * GetDependentFDWsToExtension(Oid extensionId);

View File

@ -210,6 +210,7 @@ extern char * DeparseAlterExtensionStmt(Node *stmt);
/* forward declarations for deparse_database_stmts.c */
extern char * DeparseAlterDatabaseOwnerStmt(Node *node);
extern char * DeparseGrantOnDatabaseStmt(Node *node);
/* forward declaration for deparse_publication_stmts.c */
extern char * DeparseCreatePublicationStmt(Node *stmt);

View File

@ -10,7 +10,6 @@ test: isolation_move_placement_vs_modification
test: isolation_move_placement_vs_modification_fk
test: isolation_tenant_isolation_with_fkey_to_reference
test: isolation_ref2ref_foreign_keys_enterprise
test: isolation_pg_send_cancellation
test: isolation_shard_move_vs_start_metadata_sync
test: isolation_tenant_isolation
test: isolation_tenant_isolation_nonblocking

View File

@ -0,0 +1,967 @@
-- Public role has connect,temp,temporary privileges on database
-- To test these scenarios, we need to revoke these privileges from public role
-- since public role privileges are inherited by new roles/users
revoke connect,temp,temporary on database regression from public;
CREATE SCHEMA grant_on_database_propagation;
SET search_path TO grant_on_database_propagation;
-- test grant/revoke CREATE privilege propagation on database
create user myuser;
grant create on database regression to myuser;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :master_port
revoke create on database regression from myuser;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :master_port
drop user myuser;
---------------------------------------------------------------------
-- test grant/revoke CONNECT privilege propagation on database
create user myuser;
grant CONNECT on database regression to myuser;
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :master_port
revoke connect on database regression from myuser;
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :master_port
drop user myuser;
---------------------------------------------------------------------
-- test grant/revoke TEMP privilege propagation on database
create user myuser;
-- test grant/revoke temp on database
grant TEMP on database regression to myuser;
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :master_port
revoke TEMP on database regression from myuser;
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :master_port
drop user myuser;
---------------------------------------------------------------------
-- test temporary privilege on database
create user myuser;
-- test grant/revoke temporary on database
grant TEMPORARY on database regression to myuser;
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :master_port
revoke TEMPORARY on database regression from myuser;
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :master_port
drop user myuser;
---------------------------------------------------------------------
-- test ALL privileges with ALL statement on database
create user myuser;
grant ALL on database regression to myuser;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :master_port
revoke ALL on database regression from myuser;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :master_port
drop user myuser;
---------------------------------------------------------------------
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database
create user myuser;
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :master_port
RESET ROLE;
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :master_port
drop user myuser;
---------------------------------------------------------------------
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option
create user myuser;
create user myuser_1;
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser;
set role myuser;
--here since myuser does not have grant option, it should fail
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser_1;
WARNING: no privileges were granted for "regression"
select has_database_privilege('myuser_1','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser_1','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :master_port
RESET ROLE;
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser with grant option;
set role myuser;
--here since myuser have grant option, it should succeed
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser_1 granted by myuser;
select has_database_privilege('myuser_1','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser_1','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :master_port
RESET ROLE;
--below test should fail and should throw an error since myuser_1 still have the dependent privileges
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser restrict;
ERROR: dependent privileges exist
HINT: Use CASCADE to revoke them too.
--below test should fail and should throw an error since myuser_1 still have the dependent privileges
revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser restrict ;
ERROR: dependent privileges exist
HINT: Use CASCADE to revoke them too.
--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade
revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser cascade ;
--here we test if myuser still have the privileges after revoke grant option for
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :master_port
reset role;
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser;
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser_1;
drop user myuser_1;
drop user myuser;
---------------------------------------------------------------------
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database
-- and multi user
create user myuser;
create user myuser_1;
create database test_db;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
SELECT result FROM run_command_on_workers($$create database test_db$$);
result
---------------------------------------------------------------------
CREATE DATABASE
CREATE DATABASE
(2 rows)
revoke connect,temp,temporary on database test_db from public;
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db to myuser,myuser_1;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','test_db', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','test_db', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','test_db', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','test_db', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','test_db', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','test_db', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','test_db', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','test_db', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','test_db', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','test_db', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser','test_db', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','test_db', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','test_db', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','test_db', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
t
(1 row)
\c - - - :master_port
RESET ROLE;
--below test should fail and should throw an error
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser ;
--below test should succeed and should not throw any error
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser_1;
--below test should succeed and should not throw any error
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser cascade;
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','test_db', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','test_db', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','test_db', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','test_db', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','test_db', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','test_db', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','test_db', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','test_db', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','test_db', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','test_db', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser','test_db', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','test_db', 'CREATE');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','test_db', 'CONNECT');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','test_db', 'TEMP');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
has_database_privilege
---------------------------------------------------------------------
f
(1 row)
\c - - - :master_port
reset role;
drop user myuser_1;
drop user myuser;
drop database test_db;
SELECT result FROM run_command_on_workers($$drop database test_db$$);
result
---------------------------------------------------------------------
DROP DATABASE
DROP DATABASE
(2 rows)
---------------------------------------------------------------------
-- rollbacks public role database privileges to original state
grant connect,temp,temporary on database regression to public;
SET client_min_messages TO ERROR;
DROP SCHEMA grant_on_database_propagation CASCADE;
---------------------------------------------------------------------

View File

@ -572,13 +572,6 @@ WARNING: Query could not find the intermediate result file "squares_2", it was
-- test refreshing mat views
SET client_min_messages TO ERROR;
CREATE USER some_other_user;
SELECT run_command_on_workers($$GRANT ALL ON DATABASE regression TO some_other_user;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,GRANT)
(localhost,57638,t,GRANT)
(2 rows)
GRANT ALL ON DATABASE regression TO some_other_user;
RESET client_min_messages;
\c - some_other_user

View File

@ -1,42 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s1-register s2-lock s1-lock s2-wrong-cancel-1 s2-wrong-cancel-2 s2-cancel
step s1-register:
INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
step s2-lock:
BEGIN;
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
step s1-lock:
BEGIN;
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
END;
<waiting ...>
step s2-wrong-cancel-1:
SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
run_pg_send_cancellation
---------------------------------------------------------------------
(1 row)
step s2-wrong-cancel-2:
SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
run_pg_send_cancellation
---------------------------------------------------------------------
(1 row)
step s2-cancel:
SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
END;
run_pg_send_cancellation
---------------------------------------------------------------------
(1 row)
step s1-lock: <... completed>
ERROR: canceling statement due to user request

View File

@ -12,6 +12,8 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
t
(1 row)
CREATE SCHEMA multi_deparse_shard_query;
SET search_path TO multi_deparse_shard_query;
SET citus.next_shard_id TO 13100000;
SET citus.shard_replication_factor TO 1;
CREATE FUNCTION deparse_shard_query_test(text)
@ -74,7 +76,7 @@ SELECT deparse_shard_query_test('
INSERT INTO raw_events_1
SELECT * FROM raw_events_1;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_1, raw_events_1_1.value_2, raw_events_1_1.value_3, raw_events_1_1.value_4, raw_events_1_1.value_5, raw_events_1_1.value_6, raw_events_1_1.value_7, raw_events_1_1.event_at FROM public.raw_events_1 raw_events_1_1
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_1, raw_events_1_1.value_2, raw_events_1_1.value_3, raw_events_1_1.value_4, raw_events_1_1.value_5, raw_events_1_1.value_6, raw_events_1_1.value_7, raw_events_1_1.event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -87,7 +89,7 @@ SELECT
FROM
raw_events_1;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_4, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -101,7 +103,7 @@ SELECT
FROM
raw_events_1;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT raw_events_1_1.tenant_id, (raw_events_1_1.value_5)::integer AS value_5, raw_events_1_1.value_4, (raw_events_1_1.value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT raw_events_1_1.tenant_id, (raw_events_1_1.value_5)::integer AS value_5, raw_events_1_1.value_4, (raw_events_1_1.value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -115,7 +117,7 @@ SELECT
FROM
raw_events_2;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT raw_events_2.tenant_id, (raw_events_2.value_5)::integer AS value_5, raw_events_2.value_4, (raw_events_2.value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT raw_events_2.tenant_id, (raw_events_2.value_5)::integer AS value_5, raw_events_2.value_4, (raw_events_2.value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_2
deparse_shard_query_test
---------------------------------------------------------------------
@ -131,7 +133,7 @@ FROM
GROUP BY
tenant_id, date_trunc(\'hour\', event_at)
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT raw_events_1.tenant_id, sum(raw_events_1.value_1) AS sum, avg(raw_events_1.value_3) AS avg, sum(raw_events_1.value_4) AS sum, avg(raw_events_1.value_6) AS avg, date_trunc('hour'::text, (raw_events_1.event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY raw_events_1.tenant_id, (date_trunc('hour'::text, (raw_events_1.event_at)::timestamp with time zone))
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT raw_events_1.tenant_id, sum(raw_events_1.value_1) AS sum, avg(raw_events_1.value_3) AS avg, sum(raw_events_1.value_4) AS sum, avg(raw_events_1.value_6) AS avg, date_trunc('hour'::text, (raw_events_1.event_at)::timestamp with time zone) AS date_trunc FROM multi_deparse_shard_query.raw_events_1 GROUP BY raw_events_1.tenant_id, (date_trunc('hour'::text, (raw_events_1.event_at)::timestamp with time zone))
deparse_shard_query_test
---------------------------------------------------------------------
@ -148,7 +150,7 @@ FROM
WHERE
raw_events_1.tenant_id = raw_events_2.tenant_id;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1, public.raw_events_2 WHERE (raw_events_1_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1, multi_deparse_shard_query.raw_events_2 WHERE (raw_events_1_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
deparse_shard_query_test
---------------------------------------------------------------------
@ -164,7 +166,7 @@ FROM
WHERE
raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1, public.raw_events_2 WHERE (raw_events_1_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1_1.event_at
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1, multi_deparse_shard_query.raw_events_2 WHERE (raw_events_1_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1_1.event_at
deparse_shard_query_test
---------------------------------------------------------------------
@ -184,7 +186,7 @@ GROUP BY
ORDER BY
r2.event_at DESC;
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM public.raw_events_1 r1, public.raw_events_2 r2, public.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM multi_deparse_shard_query.raw_events_1 r1, multi_deparse_shard_query.raw_events_2 r2, multi_deparse_shard_query.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC
deparse_shard_query_test
---------------------------------------------------------------------
@ -201,7 +203,7 @@ FROM
GROUP BY
event_at, tenant_id;
');
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT raw_events_1.tenant_id, sum((raw_events_1.value_5)::integer) AS sum, raw_events_1.event_at FROM public.raw_events_1 GROUP BY raw_events_1.event_at, raw_events_1.tenant_id
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1) INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT raw_events_1.tenant_id, sum((raw_events_1.value_5)::integer) AS sum, raw_events_1.event_at FROM multi_deparse_shard_query.raw_events_1 GROUP BY raw_events_1.event_at, raw_events_1.tenant_id
deparse_shard_query_test
---------------------------------------------------------------------
@ -217,7 +219,7 @@ FROM
GROUP BY
event_at, tenant_id;
');
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT raw_events_1.tenant_id, sum((raw_events_1.value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY raw_events_1.event_at, raw_events_1.tenant_id
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1) INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5) SELECT raw_events_1.tenant_id, sum((raw_events_1.value_5)::integer) AS sum FROM multi_deparse_shard_query.raw_events_1 GROUP BY raw_events_1.event_at, raw_events_1.tenant_id
deparse_shard_query_test
---------------------------------------------------------------------
@ -236,7 +238,7 @@ WITH RECURSIVE hierarchy as (
h.value_1 = re.value_6))
SELECT * FROM hierarchy WHERE LEVEL <= 2;
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT hierarchy.tenant_id, hierarchy.value_1, hierarchy.level FROM hierarchy WHERE (hierarchy.level OPERATOR(pg_catalog.<=) 2)
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN multi_deparse_shard_query.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT hierarchy.tenant_id, hierarchy.value_1, hierarchy.level FROM hierarchy WHERE (hierarchy.level OPERATOR(pg_catalog.<=) 2)
deparse_shard_query_test
---------------------------------------------------------------------
@ -249,7 +251,7 @@ SELECT
FROM
raw_events_1;
');
INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT raw_events_1.value_1 FROM public.raw_events_1
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (sum_value_1) SELECT DISTINCT raw_events_1.value_1 FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -262,7 +264,7 @@ SELECT value_3, value_2, tenant_id
FROM raw_events_1
WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000);
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT raw_events_1.tenant_id, raw_events_1.value_2, raw_events_1.value_3 FROM public.raw_events_1 WHERE (((raw_events_1.value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (raw_events_1.value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) AND ((raw_events_1.value_6 OPERATOR(pg_catalog.<) 3000) OR (raw_events_1.value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT raw_events_1.tenant_id, raw_events_1.value_2, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1 WHERE (((raw_events_1.value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (raw_events_1.value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) AND ((raw_events_1.value_6 OPERATOR(pg_catalog.<) 3000) OR (raw_events_1.value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
deparse_shard_query_test
---------------------------------------------------------------------
@ -274,7 +276,7 @@ SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id
FROM raw_events_1
WHERE event_at = now();
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT raw_events_1.tenant_id, rank() OVER (PARTITION BY raw_events_1.tenant_id ORDER BY raw_events_1.value_6) AS rank FROM public.raw_events_1 WHERE (raw_events_1.event_at OPERATOR(pg_catalog.=) now())
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5) SELECT raw_events_1.tenant_id, rank() OVER (PARTITION BY raw_events_1.tenant_id ORDER BY raw_events_1.value_6) AS rank FROM multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.event_at OPERATOR(pg_catalog.=) now())
deparse_shard_query_test
---------------------------------------------------------------------
@ -287,7 +289,7 @@ SELECT random(), int4eq(1, max(value_1))::int, value_6
WHERE event_at = now()
GROUP BY event_at, value_7, value_6;
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(raw_events_1.value_1)))::integer AS int4eq, raw_events_1.value_6, random() AS random FROM public.raw_events_1 WHERE (raw_events_1.event_at OPERATOR(pg_catalog.=) now()) GROUP BY raw_events_1.event_at, raw_events_1.value_7, raw_events_1.value_6
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(raw_events_1.value_1)))::integer AS int4eq, raw_events_1.value_6, random() AS random FROM multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.event_at OPERATOR(pg_catalog.=) now()) GROUP BY raw_events_1.event_at, raw_events_1.value_7, raw_events_1.value_6
deparse_shard_query_test
---------------------------------------------------------------------
@ -308,7 +310,7 @@ SELECT
FROM
raw_events_1;
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(raw_events_1.tenant_id) AS max, count(DISTINCT CASE WHEN (raw_events_1.value_1 OPERATOR(pg_catalog.>) 100) THEN raw_events_1.tenant_id ELSE (raw_events_1.value_6)::bigint END) AS c FROM public.raw_events_1
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1) SELECT max(raw_events_1.tenant_id) AS max, count(DISTINCT CASE WHEN (raw_events_1.value_1 OPERATOR(pg_catalog.>) 100) THEN raw_events_1.tenant_id ELSE (raw_events_1.value_6)::bigint END) AS c FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -325,7 +327,7 @@ FROM
raw_events_2
) as foo
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT foo.tenant_id, foo.value_1, 10 AS value_6, foo.value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT foo.tenant_id, foo.value_1, 10 AS value_6, foo.value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM multi_deparse_shard_query.raw_events_2) foo
deparse_shard_query_test
---------------------------------------------------------------------
@ -346,7 +348,7 @@ FROM
GROUP BY
tenant_id, date_trunc(\'hour\', event_at)
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT foo.tenant_id, sum(foo.value_1) AS sum, sum((foo.value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY foo.tenant_id, (date_trunc('hour'::text, (foo.event_at)::timestamp with time zone))
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT foo.tenant_id, sum(foo.value_1) AS sum, sum((foo.value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM multi_deparse_shard_query.raw_events_2, multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY foo.tenant_id, (date_trunc('hour'::text, (foo.event_at)::timestamp with time zone))
deparse_shard_query_test
---------------------------------------------------------------------
@ -363,7 +365,7 @@ FROM
raw_events_1
) as foo
');
INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT foo.tenant_id, foo.value_1, foo.value_2, foo.value_3, foo.value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT foo.tenant_id, foo.value_1, foo.value_2, foo.value_3, foo.value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1) foo
deparse_shard_query_test
---------------------------------------------------------------------
@ -380,7 +382,7 @@ FROM
raw_events_1
) as foo
');
INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT foo.value_2, foo.value_4, foo.value_1, foo.value_3, foo.tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT foo.value_2, foo.value_4, foo.value_1, foo.value_3, foo.tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1) foo
deparse_shard_query_test
---------------------------------------------------------------------
@ -396,7 +398,7 @@ FROM
ORDER BY
value_2, value_1;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_7, 10 AS value_6, raw_events_1_1.value_7, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1 ORDER BY raw_events_1_1.value_2, raw_events_1_1.value_1
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_7, 10 AS value_6, raw_events_1_1.value_7, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1 ORDER BY raw_events_1_1.value_2, raw_events_1_1.value_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -411,9 +413,11 @@ SELECT
FROM
raw_events_1;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_4, 10 AS value_6, raw_events_1_1.value_7, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_4, 10 AS value_6, raw_events_1_1.value_7, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1
deparse_shard_query_test
---------------------------------------------------------------------
(1 row)
SET client_min_messages TO ERROR;
DROP SCHEMA multi_deparse_shard_query CASCADE;

View File

@ -12,6 +12,8 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
f
(1 row)
CREATE SCHEMA multi_deparse_shard_query;
SET search_path TO multi_deparse_shard_query;
SET citus.next_shard_id TO 13100000;
SET citus.shard_replication_factor TO 1;
CREATE FUNCTION deparse_shard_query_test(text)
@ -74,7 +76,7 @@ SELECT deparse_shard_query_test('
INSERT INTO raw_events_1
SELECT * FROM raw_events_1;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM public.raw_events_1
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -87,7 +89,7 @@ SELECT
FROM
raw_events_1;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -101,7 +103,7 @@ SELECT
FROM
raw_events_1;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -115,7 +117,7 @@ SELECT
FROM
raw_events_2;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_2
deparse_shard_query_test
---------------------------------------------------------------------
@ -131,7 +133,7 @@ FROM
GROUP BY
tenant_id, date_trunc(\'hour\', event_at)
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM multi_deparse_shard_query.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
deparse_shard_query_test
---------------------------------------------------------------------
@ -148,7 +150,7 @@ FROM
WHERE
raw_events_1.tenant_id = raw_events_2.tenant_id;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1, multi_deparse_shard_query.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
deparse_shard_query_test
---------------------------------------------------------------------
@ -164,7 +166,7 @@ FROM
WHERE
raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1.event_at
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1, multi_deparse_shard_query.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1.event_at
deparse_shard_query_test
---------------------------------------------------------------------
@ -184,7 +186,7 @@ GROUP BY
ORDER BY
r2.event_at DESC;
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM public.raw_events_1 r1, public.raw_events_2 r2, public.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM multi_deparse_shard_query.raw_events_1 r1, multi_deparse_shard_query.raw_events_2 r2, multi_deparse_shard_query.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC
deparse_shard_query_test
---------------------------------------------------------------------
@ -201,7 +203,7 @@ FROM
GROUP BY
event_at, tenant_id;
');
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM public.raw_events_1 GROUP BY event_at, tenant_id
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1) INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM multi_deparse_shard_query.raw_events_1 GROUP BY event_at, tenant_id
deparse_shard_query_test
---------------------------------------------------------------------
@ -217,7 +219,7 @@ FROM
GROUP BY
event_at, tenant_id;
');
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY event_at, tenant_id
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1) INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM multi_deparse_shard_query.raw_events_1 GROUP BY event_at, tenant_id
deparse_shard_query_test
---------------------------------------------------------------------
@ -236,7 +238,7 @@ WITH RECURSIVE hierarchy as (
h.value_1 = re.value_6))
SELECT * FROM hierarchy WHERE LEVEL <= 2;
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level OPERATOR(pg_catalog.<=) 2)
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN multi_deparse_shard_query.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level OPERATOR(pg_catalog.<=) 2)
deparse_shard_query_test
---------------------------------------------------------------------
@ -249,7 +251,7 @@ SELECT
FROM
raw_events_1;
');
INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM public.raw_events_1
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -262,7 +264,7 @@ SELECT value_3, value_2, tenant_id
FROM raw_events_1
WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000);
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM public.raw_events_1 WHERE (((value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (tenant_id OPERATOR(pg_catalog.=) 1) AND ((value_6 OPERATOR(pg_catalog.<) 3000) OR (value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM multi_deparse_shard_query.raw_events_1 WHERE (((value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (tenant_id OPERATOR(pg_catalog.=) 1) AND ((value_6 OPERATOR(pg_catalog.<) 3000) OR (value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
deparse_shard_query_test
---------------------------------------------------------------------
@ -274,7 +276,7 @@ SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id
FROM raw_events_1
WHERE event_at = now();
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now())
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM multi_deparse_shard_query.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now())
deparse_shard_query_test
---------------------------------------------------------------------
@ -287,7 +289,7 @@ SELECT random(), int4eq(1, max(value_1))::int, value_6
WHERE event_at = now()
GROUP BY event_at, value_7, value_6;
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) GROUP BY event_at, value_7, value_6
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM multi_deparse_shard_query.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) GROUP BY event_at, value_7, value_6
deparse_shard_query_test
---------------------------------------------------------------------
@ -308,7 +310,7 @@ SELECT
FROM
raw_events_1;
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 OPERATOR(pg_catalog.>) 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM public.raw_events_1
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 OPERATOR(pg_catalog.>) 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -325,7 +327,7 @@ FROM
raw_events_2
) as foo
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM multi_deparse_shard_query.raw_events_2) foo
deparse_shard_query_test
---------------------------------------------------------------------
@ -341,12 +343,12 @@ FROM
FROM
raw_events_2, raw_events_1
WHERE
raw_events_1.tenant_id = raw_events_2.tenant_id
raw_events_1.tenant_id = raw_events_2.tenant_id
) as foo
GROUP BY
tenant_id, date_trunc(\'hour\', event_at)
');
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM multi_deparse_shard_query.raw_events_2, multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
deparse_shard_query_test
---------------------------------------------------------------------
@ -363,7 +365,7 @@ FROM
raw_events_1
) as foo
');
INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1) foo
deparse_shard_query_test
---------------------------------------------------------------------
@ -380,7 +382,7 @@ FROM
raw_events_1
) as foo
');
INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1) foo
deparse_shard_query_test
---------------------------------------------------------------------
@ -396,7 +398,7 @@ FROM
ORDER BY
value_2, value_1;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 ORDER BY value_2, value_1
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 ORDER BY value_2, value_1
deparse_shard_query_test
---------------------------------------------------------------------
@ -411,9 +413,11 @@ SELECT
FROM
raw_events_1;
');
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
(1 row)
SET client_min_messages TO ERROR;
DROP SCHEMA multi_deparse_shard_query CASCADE;

View File

@ -11,6 +11,7 @@ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
CREATE SCHEMA pg16;
SET search_path TO pg16;
SET citus.next_shard_id TO 950000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1400000;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
-- test the new vacuum and analyze options
@ -65,6 +66,482 @@ SET citus.log_remote_commands TO OFF;
-- only verifying it works and not printing log
-- remote commands because it can be flaky
VACUUM (ONLY_DATABASE_STATS);
-- New GENERIC_PLAN option in EXPLAIN
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/3c05284
CREATE TABLE tenk1 (
unique1 int4,
unique2 int4,
thousand int4
);
SELECT create_distributed_table('tenk1', 'unique1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SET citus.log_remote_commands TO on;
EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = 1000;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SAVEPOINT citus_explain_savepoint
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing EXPLAIN (ANALYZE FALSE, VERBOSE FALSE, COSTS TRUE, BUFFERS FALSE, WAL FALSE, GENERIC_PLAN TRUE, TIMING FALSE, SUMMARY FALSE, FORMAT TEXT) SELECT unique1 FROM pg16.tenk1_950001 tenk1 WHERE (thousand OPERATOR(pg_catalog.=) 1000)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ROLLBACK TO SAVEPOINT citus_explain_savepoint
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on tenk1_950001 tenk1 (cost=0.00..35.50 rows=10 width=4)
Filter: (thousand = 1000)
(7 rows)
EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = 1000;
ERROR: EXPLAIN options ANALYZE and GENERIC_PLAN cannot be used together
SET citus.log_remote_commands TO off;
-- Proper error when creating statistics without a name on a Citus table
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/624aa2a13bd02dd584bb0995c883b5b93b2152df
CREATE TABLE test_stats (
a int,
b int
);
SELECT create_distributed_table('test_stats', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE STATISTICS (dependencies) ON a, b FROM test_stats;
ERROR: cannot create statistics without a name on a Citus table
HINT: Consider specifying a name for the statistics
CREATE STATISTICS (ndistinct, dependencies) on a, b from test_stats;
ERROR: cannot create statistics without a name on a Citus table
HINT: Consider specifying a name for the statistics
CREATE STATISTICS (ndistinct, dependencies, mcv) on a, b from test_stats;
ERROR: cannot create statistics without a name on a Citus table
HINT: Consider specifying a name for the statistics
-- STORAGE option in CREATE is already propagated by Citus
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/784cedd
CREATE TABLE test_storage (a text, c text STORAGE plain);
SELECT create_distributed_table('test_storage', 'a', shard_count := 2);
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT result FROM run_command_on_all_nodes
($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
result
---------------------------------------------------------------------
{"(a,x)","(c,p)"}
{"(a,x)","(c,p)"}
{"(a,x)","(c,p)"}
(3 rows)
SELECT alter_distributed_table('test_storage', shard_count := 4);
NOTICE: creating a new table for pg16.test_storage
NOTICE: moving the data of pg16.test_storage
NOTICE: dropping the old pg16.test_storage
NOTICE: renaming the new table to pg16.test_storage
alter_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT result FROM run_command_on_all_nodes
($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
result
---------------------------------------------------------------------
{"(a,x)","(c,p)"}
{"(a,x)","(c,p)"}
{"(a,x)","(c,p)"}
(3 rows)
SELECT undistribute_table('test_storage');
NOTICE: creating a new table for pg16.test_storage
NOTICE: moving the data of pg16.test_storage
NOTICE: dropping the old pg16.test_storage
NOTICE: renaming the new table to pg16.test_storage
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT result FROM run_command_on_all_nodes
($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
result
---------------------------------------------------------------------
{"(a,x)","(c,p)"}
(3 rows)
-- New option to change storage to DEFAULT in PG16
-- ALTER TABLE .. ALTER COLUMN .. SET STORAGE is already
-- not supported by Citus, so this is also not supported
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/b9424d0
SELECT create_distributed_table('test_storage', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
ALTER TABLE test_storage ALTER a SET STORAGE default;
ERROR: alter table command is currently unsupported
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
--
-- COPY FROM ... DEFAULT
-- Already supported in Citus, adding all PG tests with a distributed table
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/9f8377f
CREATE TABLE copy_default (
id integer PRIMARY KEY,
text_value text NOT NULL DEFAULT 'test',
ts_value timestamp without time zone NOT NULL DEFAULT '2022-07-05'
);
SELECT create_distributed_table('copy_default', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- if DEFAULT is not specified, then the marker will be regular data
COPY copy_default FROM stdin;
SELECT * FROM copy_default ORDER BY id;
id | text_value | ts_value
---------------------------------------------------------------------
1 | value | Mon Jul 04 00:00:00 2022
2 | D | Tue Jul 05 00:00:00 2022
(2 rows)
TRUNCATE copy_default;
COPY copy_default FROM stdin WITH (format csv);
SELECT * FROM copy_default ORDER BY id;
id | text_value | ts_value
---------------------------------------------------------------------
1 | value | Mon Jul 04 00:00:00 2022
2 | \D | Tue Jul 05 00:00:00 2022
(2 rows)
TRUNCATE copy_default;
-- DEFAULT cannot be used in binary mode
COPY copy_default FROM stdin WITH (format binary, default '\D');
ERROR: cannot specify DEFAULT in BINARY mode
-- DEFAULT cannot be new line nor carriage return
COPY copy_default FROM stdin WITH (default E'\n');
ERROR: COPY default representation cannot use newline or carriage return
COPY copy_default FROM stdin WITH (default E'\r');
ERROR: COPY default representation cannot use newline or carriage return
-- DELIMITER cannot appear in DEFAULT spec
COPY copy_default FROM stdin WITH (delimiter ';', default 'test;test');
ERROR: COPY delimiter must not appear in the DEFAULT specification
-- CSV quote cannot appear in DEFAULT spec
COPY copy_default FROM stdin WITH (format csv, quote '"', default 'test"test');
ERROR: CSV quote character must not appear in the DEFAULT specification
-- NULL and DEFAULT spec must be different
COPY copy_default FROM stdin WITH (default '\N');
ERROR: NULL specification and DEFAULT specification cannot be the same
-- cannot use DEFAULT marker in column that has no DEFAULT value
COPY copy_default FROM stdin WITH (default '\D');
ERROR: unexpected default marker in COPY data
DETAIL: Column "id" has no default value.
CONTEXT: COPY copy_default, line 1: "\D value '2022-07-04'"
COPY copy_default FROM stdin WITH (format csv, default '\D');
ERROR: unexpected default marker in COPY data
DETAIL: Column "id" has no default value.
CONTEXT: COPY copy_default, line 1: "\D,value,2022-07-04"
-- The DEFAULT marker must be unquoted and unescaped or it's not recognized
COPY copy_default FROM stdin WITH (default '\D');
SELECT * FROM copy_default ORDER BY id;
id | text_value | ts_value
---------------------------------------------------------------------
1 | test | Mon Jul 04 00:00:00 2022
2 | \D | Mon Jul 04 00:00:00 2022
3 | "D" | Mon Jul 04 00:00:00 2022
(3 rows)
TRUNCATE copy_default;
COPY copy_default FROM stdin WITH (format csv, default '\D');
SELECT * FROM copy_default ORDER BY id;
id | text_value | ts_value
---------------------------------------------------------------------
1 | test | Mon Jul 04 00:00:00 2022
2 | \\D | Mon Jul 04 00:00:00 2022
3 | \D | Mon Jul 04 00:00:00 2022
(3 rows)
TRUNCATE copy_default;
-- successful usage of DEFAULT option in COPY
COPY copy_default FROM stdin WITH (default '\D');
SELECT * FROM copy_default ORDER BY id;
id | text_value | ts_value
---------------------------------------------------------------------
1 | value | Mon Jul 04 00:00:00 2022
2 | test | Sun Jul 03 00:00:00 2022
3 | test | Tue Jul 05 00:00:00 2022
(3 rows)
TRUNCATE copy_default;
COPY copy_default FROM stdin WITH (format csv, default '\D');
SELECT * FROM copy_default ORDER BY id;
id | text_value | ts_value
---------------------------------------------------------------------
1 | value | Mon Jul 04 00:00:00 2022
2 | test | Sun Jul 03 00:00:00 2022
3 | test | Tue Jul 05 00:00:00 2022
(3 rows)
TRUNCATE copy_default;
\c - - - :worker_1_port
COPY pg16.copy_default FROM stdin WITH (format csv, default '\D');
SELECT * FROM pg16.copy_default ORDER BY id;
id | text_value | ts_value
---------------------------------------------------------------------
1 | value | Mon Jul 04 00:00:00 2022
2 | test | Sun Jul 03 00:00:00 2022
3 | test | Tue Jul 05 00:00:00 2022
(3 rows)
\c - - - :master_port
TRUNCATE pg16.copy_default;
\c - - - :worker_2_port
COPY pg16.copy_default FROM stdin WITH (format csv, default '\D');
SELECT * FROM pg16.copy_default ORDER BY id;
id | text_value | ts_value
---------------------------------------------------------------------
1 | value | Mon Jul 04 00:00:00 2022
2 | test | Sun Jul 03 00:00:00 2022
3 | test | Tue Jul 05 00:00:00 2022
(3 rows)
\c - - - :master_port
SET search_path TO pg16;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
-- DEFAULT cannot be used in COPY TO
COPY (select 1 as test) TO stdout WITH (default '\D');
ERROR: COPY DEFAULT only available using COPY FROM
-- Tests for SQL/JSON: support the IS JSON predicate
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/6ee30209
CREATE TABLE test_is_json (id bigserial, js text);
SELECT create_distributed_table('test_is_json', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO test_is_json(js) VALUES
(NULL),
(''),
('123'),
('"aaa "'),
('true'),
('null'),
('[]'),
('[1, "2", {}]'),
('{}'),
('{ "a": 1, "b": null }'),
('{ "a": 1, "a": null }'),
('{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] }'),
('{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] }'),
('aaa'),
('{a:1}'),
('["a",]');
-- run IS JSON predicate in the worker nodes
SELECT
js,
js IS JSON "JSON",
js IS NOT JSON "NOT JSON",
js IS JSON VALUE "VALUE",
js IS JSON OBJECT "OBJECT",
js IS JSON ARRAY "ARRAY",
js IS JSON SCALAR "SCALAR",
js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
FROM
test_is_json ORDER BY js;
js | JSON | NOT JSON | VALUE | OBJECT | ARRAY | SCALAR | WITHOUT UNIQUE | WITH UNIQUE
---------------------------------------------------------------------
| f | t | f | f | f | f | f | f
"aaa " | t | f | t | f | f | t | t | t
123 | t | f | t | f | f | t | t | t
["a",] | f | t | f | f | f | f | f | f
[1, "2", {}] | t | f | t | f | t | f | t | t
[] | t | f | t | f | t | f | t | t
aaa | f | t | f | f | f | f | f | f
null | t | f | t | f | f | t | t | t
true | t | f | t | f | f | t | t | t
{ "a": 1, "a": null } | t | f | t | t | f | f | t | f
{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
{ "a": 1, "b": null } | t | f | t | t | f | f | t | t
{a:1} | f | t | f | f | f | f | f | f
{} | t | f | t | t | f | f | t | t
| | | | | | | |
(16 rows)
-- pull the data, and run IS JSON predicate in the coordinator
WITH pulled_data as (SELECT js FROM test_is_json OFFSET 0)
SELECT
js,
js IS JSON "IS JSON",
js IS NOT JSON "IS NOT JSON",
js IS JSON VALUE "IS VALUE",
js IS JSON OBJECT "IS OBJECT",
js IS JSON ARRAY "IS ARRAY",
js IS JSON SCALAR "IS SCALAR",
js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
FROM
pulled_data ORDER BY js;
js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
---------------------------------------------------------------------
| f | t | f | f | f | f | f | f
"aaa " | t | f | t | f | f | t | t | t
123 | t | f | t | f | f | t | t | t
["a",] | f | t | f | f | f | f | f | f
[1, "2", {}] | t | f | t | f | t | f | t | t
[] | t | f | t | f | t | f | t | t
aaa | f | t | f | f | f | f | f | f
null | t | f | t | f | f | t | t | t
true | t | f | t | f | f | t | t | t
{ "a": 1, "a": null } | t | f | t | t | f | f | t | f
{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
{ "a": 1, "b": null } | t | f | t | t | f | f | t | t
{a:1} | f | t | f | f | f | f | f | f
{} | t | f | t | t | f | f | t | t
| | | | | | | |
(16 rows)
SELECT
js,
js IS JSON "IS JSON",
js IS NOT JSON "IS NOT JSON",
js IS JSON VALUE "IS VALUE",
js IS JSON OBJECT "IS OBJECT",
js IS JSON ARRAY "IS ARRAY",
js IS JSON SCALAR "IS SCALAR",
js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
FROM
(SELECT js::json FROM test_is_json WHERE js IS JSON) foo(js);
js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
---------------------------------------------------------------------
123 | t | f | t | f | f | t | t | t
"aaa " | t | f | t | f | f | t | t | t
true | t | f | t | f | f | t | t | t
null | t | f | t | f | f | t | t | t
[] | t | f | t | f | t | f | t | t
[1, "2", {}] | t | f | t | f | t | f | t | t
{} | t | f | t | t | f | f | t | t
{ "a": 1, "b": null } | t | f | t | t | f | f | t | t
{ "a": 1, "a": null } | t | f | t | t | f | f | t | f
{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
(11 rows)
SELECT
js0,
js IS JSON "IS JSON",
js IS NOT JSON "IS NOT JSON",
js IS JSON VALUE "IS VALUE",
js IS JSON OBJECT "IS OBJECT",
js IS JSON ARRAY "IS ARRAY",
js IS JSON SCALAR "IS SCALAR",
js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
FROM
(SELECT js, js::bytea FROM test_is_json WHERE js IS JSON) foo(js0, js);
js0 | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
---------------------------------------------------------------------
123 | t | f | t | f | f | t | t | t
"aaa " | t | f | t | f | f | t | t | t
true | t | f | t | f | f | t | t | t
null | t | f | t | f | f | t | t | t
[] | t | f | t | f | t | f | t | t
[1, "2", {}] | t | f | t | f | t | f | t | t
{} | t | f | t | t | f | f | t | t
{ "a": 1, "b": null } | t | f | t | t | f | f | t | t
{ "a": 1, "a": null } | t | f | t | t | f | f | t | f
{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
(11 rows)
SELECT
js,
js IS JSON "IS JSON",
js IS NOT JSON "IS NOT JSON",
js IS JSON VALUE "IS VALUE",
js IS JSON OBJECT "IS OBJECT",
js IS JSON ARRAY "IS ARRAY",
js IS JSON SCALAR "IS SCALAR",
js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
FROM
(SELECT js::jsonb FROM test_is_json WHERE js IS JSON) foo(js);
js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
---------------------------------------------------------------------
123 | t | f | t | f | f | t | t | t
"aaa " | t | f | t | f | f | t | t | t
true | t | f | t | f | f | t | t | t
null | t | f | t | f | f | t | t | t
[] | t | f | t | f | t | f | t | t
[1, "2", {}] | t | f | t | f | t | f | t | t
{} | t | f | t | t | f | f | t | t
{"a": 1, "b": null} | t | f | t | t | f | f | t | t
{"a": null} | t | f | t | t | f | f | t | t
{"a": 1, "b": [{"a": 1}, {"a": 2}]} | t | f | t | t | f | f | t | t
{"a": 1, "b": [{"a": 2, "b": 0}]} | t | f | t | t | f | f | t | t
(11 rows)
-- SYSTEM_USER
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/0823d061
CREATE TABLE table_name_for_view(id int, val_1 text);
SELECT create_distributed_table('table_name_for_view', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO table_name_for_view VALUES (1, 'test');
-- define a view that uses SYSTEM_USER keyword
CREATE VIEW prop_view_1 AS
SELECT *, SYSTEM_USER AS su FROM table_name_for_view;
SELECT * FROM prop_view_1;
id | val_1 | su
---------------------------------------------------------------------
1 | test |
(1 row)
-- check definition with SYSTEM_USER is correctly propagated to workers
\c - - - :worker_1_port
SELECT pg_get_viewdef('pg16.prop_view_1', true);
pg_get_viewdef
---------------------------------------------------------------------
SELECT id, +
val_1, +
SYSTEM_USER AS su +
FROM pg16.table_name_for_view;
(1 row)
\c - - - :master_port
SET search_path TO pg16;
\set VERBOSITY terse
SET client_min_messages TO ERROR;
DROP SCHEMA pg16 CASCADE;

View File

@ -1345,13 +1345,6 @@ DROP USER test_other_super_user;
CREATE ROLE test_non_super_user WITH LOGIN;
ALTER ROLE test_non_super_user NOSUPERUSER;
GRANT CREATE ON DATABASE regression TO test_non_super_user;
SELECT result FROM run_command_on_workers($$GRANT CREATE ON DATABASE regression TO test_non_super_user$$);
result
---------------------------------------------------------------------
GRANT
GRANT
(2 rows)
GRANT CREATE ON SCHEMA public TO test_non_super_user ;
\c - test_non_super_user
SET search_path TO regular_schema;
@ -1487,13 +1480,6 @@ $$);
\c - postgres
REVOKE CREATE ON DATABASE regression FROM test_non_super_user;
SELECT result FROM run_command_on_workers($$REVOKE CREATE ON DATABASE regression FROM test_non_super_user$$);
result
---------------------------------------------------------------------
REVOKE
REVOKE
(2 rows)
REVOKE CREATE ON SCHEMA public FROM test_non_super_user;
DROP ROLE test_non_super_user;
\c - - - :worker_1_port

View File

@ -50,6 +50,8 @@ test: multi_metadata_attributes
test: multi_read_from_secondaries
test: grant_on_database_propagation
# ----------
# multi_citus_tools tests utility functions written for citus tools
# ----------

View File

@ -1,65 +0,0 @@
setup
{
CREATE FUNCTION run_pg_send_cancellation(int,int)
RETURNS void
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION get_cancellation_key()
RETURNS int
AS 'citus'
LANGUAGE C STRICT;
CREATE TABLE cancel_table (pid int, cancel_key int);
}
teardown
{
DROP TABLE IF EXISTS cancel_table;
}
session "s1"
/* store the PID and cancellation key of session 1 */
step "s1-register"
{
INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
}
/* lock the table from session 1, will block and get cancelled */
step "s1-lock"
{
BEGIN;
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
END;
}
session "s2"
/* lock the table from session 2 to block session 1 */
step "s2-lock"
{
BEGIN;
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
}
/* PID mismatch */
step "s2-wrong-cancel-1"
{
SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
}
/* cancellation key mismatch */
step "s2-wrong-cancel-2"
{
SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
}
/* cancel the LOCK statement in session 1 */
step "s2-cancel"
{
SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
END;
}
permutation "s1-register" "s2-lock" "s1-lock" "s2-wrong-cancel-1" "s2-wrong-cancel-2" "s2-cancel"

View File

@ -0,0 +1,378 @@
-- Public role has connect,temp,temporary privileges on database
-- To test these scenarios, we need to revoke these privileges from public role
-- since public role privileges are inherited by new roles/users
revoke connect,temp,temporary on database regression from public;
CREATE SCHEMA grant_on_database_propagation;
SET search_path TO grant_on_database_propagation;
-- test grant/revoke CREATE privilege propagation on database
create user myuser;
grant create on database regression to myuser;
select has_database_privilege('myuser','regression', 'CREATE');
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'CREATE');
\c - - - :master_port
revoke create on database regression from myuser;
select has_database_privilege('myuser','regression', 'CREATE');
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
\c - - - :master_port
drop user myuser;
-----------------------------------------------------------------------
-- test grant/revoke CONNECT privilege propagation on database
create user myuser;
grant CONNECT on database regression to myuser;
select has_database_privilege('myuser','regression', 'CONNECT');
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'CONNECT');
\c - - - :master_port
revoke connect on database regression from myuser;
select has_database_privilege('myuser','regression', 'CONNECT');
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CONNECT');
\c - - - :master_port
drop user myuser;
-----------------------------------------------------------------------
-- test grant/revoke TEMP privilege propagation on database
create user myuser;
-- test grant/revoke temp on database
grant TEMP on database regression to myuser;
select has_database_privilege('myuser','regression', 'TEMP');
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'TEMP');
\c - - - :master_port
revoke TEMP on database regression from myuser;
select has_database_privilege('myuser','regression', 'TEMP');
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'TEMP');
\c - - - :master_port
drop user myuser;
-----------------------------------------------------------------------
-- test temporary privilege on database
create user myuser;
-- test grant/revoke temporary on database
grant TEMPORARY on database regression to myuser;
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :master_port
revoke TEMPORARY on database regression from myuser;
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :master_port
drop user myuser;
-----------------------------------------------------------------------
-- test ALL privileges with ALL statement on database
create user myuser;
grant ALL on database regression to myuser;
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :master_port
revoke ALL on database regression from myuser;
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :master_port
drop user myuser;
-----------------------------------------------------------------------
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database
create user myuser;
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser;
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :worker_1_port;
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :master_port
RESET ROLE;
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser;
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :master_port
drop user myuser;
-----------------------------------------------------------------------
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option
create user myuser;
create user myuser_1;
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser;
set role myuser;
--here since myuser does not have grant option, it should fail
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser_1;
select has_database_privilege('myuser_1','regression', 'CREATE');
select has_database_privilege('myuser_1','regression', 'CONNECT');
select has_database_privilege('myuser_1','regression', 'TEMP');
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
\c - - - :worker_1_port
select has_database_privilege('myuser_1','regression', 'CREATE');
select has_database_privilege('myuser_1','regression', 'CONNECT');
select has_database_privilege('myuser_1','regression', 'TEMP');
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
\c - - - :master_port
RESET ROLE;
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser with grant option;
set role myuser;
--here since myuser have grant option, it should succeed
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser_1 granted by myuser;
select has_database_privilege('myuser_1','regression', 'CREATE');
select has_database_privilege('myuser_1','regression', 'CONNECT');
select has_database_privilege('myuser_1','regression', 'TEMP');
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
\c - - - :worker_1_port
select has_database_privilege('myuser_1','regression', 'CREATE');
select has_database_privilege('myuser_1','regression', 'CONNECT');
select has_database_privilege('myuser_1','regression', 'TEMP');
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
\c - - - :master_port
RESET ROLE;
--below test should fail and should throw an error since myuser_1 still have the dependent privileges
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser restrict;
--below test should fail and should throw an error since myuser_1 still have the dependent privileges
revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser restrict ;
--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade
revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser cascade ;
--here we test if myuser still have the privileges after revoke grant option for
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
\c - - - :master_port
reset role;
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser;
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser_1;
drop user myuser_1;
drop user myuser;
-----------------------------------------------------------------------
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database
-- and multi user
create user myuser;
create user myuser_1;
create database test_db;
SELECT result FROM run_command_on_workers($$create database test_db$$);
revoke connect,temp,temporary on database test_db from public;
grant CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db to myuser,myuser_1;
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
select has_database_privilege('myuser','test_db', 'CREATE');
select has_database_privilege('myuser','test_db', 'CONNECT');
select has_database_privilege('myuser','test_db', 'TEMP');
select has_database_privilege('myuser','test_db', 'TEMPORARY');
select has_database_privilege('myuser_1','regression', 'CREATE');
select has_database_privilege('myuser_1','regression', 'CONNECT');
select has_database_privilege('myuser_1','regression', 'TEMP');
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
select has_database_privilege('myuser_1','test_db', 'CREATE');
select has_database_privilege('myuser_1','test_db', 'CONNECT');
select has_database_privilege('myuser_1','test_db', 'TEMP');
select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
select has_database_privilege('myuser','test_db', 'CREATE');
select has_database_privilege('myuser','test_db', 'CONNECT');
select has_database_privilege('myuser','test_db', 'TEMP');
select has_database_privilege('myuser','test_db', 'TEMPORARY');
select has_database_privilege('myuser_1','regression', 'CREATE');
select has_database_privilege('myuser_1','regression', 'CONNECT');
select has_database_privilege('myuser_1','regression', 'TEMP');
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
select has_database_privilege('myuser_1','test_db', 'CREATE');
select has_database_privilege('myuser_1','test_db', 'CONNECT');
select has_database_privilege('myuser_1','test_db', 'TEMP');
select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
\c - - - :master_port
RESET ROLE;
--below test should fail and should throw an error
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser ;
--below test should succeed and should not throw any error
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser_1;
--below test should succeed and should not throw any error
revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser cascade;
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
select has_database_privilege('myuser','test_db', 'CREATE');
select has_database_privilege('myuser','test_db', 'CONNECT');
select has_database_privilege('myuser','test_db', 'TEMP');
select has_database_privilege('myuser','test_db', 'TEMPORARY');
select has_database_privilege('myuser_1','regression', 'CREATE');
select has_database_privilege('myuser_1','regression', 'CONNECT');
select has_database_privilege('myuser_1','regression', 'TEMP');
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
select has_database_privilege('myuser_1','test_db', 'CREATE');
select has_database_privilege('myuser_1','test_db', 'CONNECT');
select has_database_privilege('myuser_1','test_db', 'TEMP');
select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
\c - - - :worker_1_port
select has_database_privilege('myuser','regression', 'CREATE');
select has_database_privilege('myuser','regression', 'CONNECT');
select has_database_privilege('myuser','regression', 'TEMP');
select has_database_privilege('myuser','regression', 'TEMPORARY');
select has_database_privilege('myuser','test_db', 'CREATE');
select has_database_privilege('myuser','test_db', 'CONNECT');
select has_database_privilege('myuser','test_db', 'TEMP');
select has_database_privilege('myuser','test_db', 'TEMPORARY');
select has_database_privilege('myuser_1','regression', 'CREATE');
select has_database_privilege('myuser_1','regression', 'CONNECT');
select has_database_privilege('myuser_1','regression', 'TEMP');
select has_database_privilege('myuser_1','regression', 'TEMPORARY');
select has_database_privilege('myuser_1','test_db', 'CREATE');
select has_database_privilege('myuser_1','test_db', 'CONNECT');
select has_database_privilege('myuser_1','test_db', 'TEMP');
select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
\c - - - :master_port
reset role;
drop user myuser_1;
drop user myuser;
drop database test_db;
SELECT result FROM run_command_on_workers($$drop database test_db$$);
---------------------------------------------------------------------------
-- rollbacks public role database privileges to original state
grant connect,temp,temporary on database regression to public;
SET client_min_messages TO ERROR;
DROP SCHEMA grant_on_database_propagation CASCADE;
---------------------------------------------------------------------------

View File

@ -255,7 +255,6 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[],
-- test refreshing mat views
SET client_min_messages TO ERROR;
CREATE USER some_other_user;
SELECT run_command_on_workers($$GRANT ALL ON DATABASE regression TO some_other_user;$$);
GRANT ALL ON DATABASE regression TO some_other_user;
RESET client_min_messages;

View File

@ -8,6 +8,9 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
CREATE SCHEMA multi_deparse_shard_query;
SET search_path TO multi_deparse_shard_query;
SET citus.next_shard_id TO 13100000;
SET citus.shard_replication_factor TO 1;
@ -304,3 +307,6 @@ SELECT
FROM
raw_events_1;
');
SET client_min_messages TO ERROR;
DROP SCHEMA multi_deparse_shard_query CASCADE;

View File

@ -12,6 +12,7 @@ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
CREATE SCHEMA pg16;
SET search_path TO pg16;
SET citus.next_shard_id TO 950000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1400000;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
@ -45,6 +46,292 @@ SET citus.log_remote_commands TO OFF;
-- remote commands because it can be flaky
VACUUM (ONLY_DATABASE_STATS);
-- New GENERIC_PLAN option in EXPLAIN
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/3c05284
CREATE TABLE tenk1 (
unique1 int4,
unique2 int4,
thousand int4
);
SELECT create_distributed_table('tenk1', 'unique1');
SET citus.log_remote_commands TO on;
EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = 1000;
EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = 1000;
SET citus.log_remote_commands TO off;
-- Proper error when creating statistics without a name on a Citus table
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/624aa2a13bd02dd584bb0995c883b5b93b2152df
CREATE TABLE test_stats (
a int,
b int
);
SELECT create_distributed_table('test_stats', 'a');
CREATE STATISTICS (dependencies) ON a, b FROM test_stats;
CREATE STATISTICS (ndistinct, dependencies) on a, b from test_stats;
CREATE STATISTICS (ndistinct, dependencies, mcv) on a, b from test_stats;
-- STORAGE option in CREATE is already propagated by Citus
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/784cedd
CREATE TABLE test_storage (a text, c text STORAGE plain);
SELECT create_distributed_table('test_storage', 'a', shard_count := 2);
SELECT result FROM run_command_on_all_nodes
($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
SELECT alter_distributed_table('test_storage', shard_count := 4);
SELECT result FROM run_command_on_all_nodes
($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
SELECT undistribute_table('test_storage');
SELECT result FROM run_command_on_all_nodes
($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
-- New option to change storage to DEFAULT in PG16
-- ALTER TABLE .. ALTER COLUMN .. SET STORAGE is already
-- not supported by Citus, so this is also not supported
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/b9424d0
SELECT create_distributed_table('test_storage', 'a');
ALTER TABLE test_storage ALTER a SET STORAGE default;
--
-- COPY FROM ... DEFAULT
-- Already supported in Citus, adding all PG tests with a distributed table
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/9f8377f
CREATE TABLE copy_default (
id integer PRIMARY KEY,
text_value text NOT NULL DEFAULT 'test',
ts_value timestamp without time zone NOT NULL DEFAULT '2022-07-05'
);
SELECT create_distributed_table('copy_default', 'id');
-- if DEFAULT is not specified, then the marker will be regular data
COPY copy_default FROM stdin;
1 value '2022-07-04'
2 \D '2022-07-05'
\.
SELECT * FROM copy_default ORDER BY id;
TRUNCATE copy_default;
COPY copy_default FROM stdin WITH (format csv);
1,value,2022-07-04
2,\D,2022-07-05
\.
SELECT * FROM copy_default ORDER BY id;
TRUNCATE copy_default;
-- DEFAULT cannot be used in binary mode
COPY copy_default FROM stdin WITH (format binary, default '\D');
-- DEFAULT cannot be new line nor carriage return
COPY copy_default FROM stdin WITH (default E'\n');
COPY copy_default FROM stdin WITH (default E'\r');
-- DELIMITER cannot appear in DEFAULT spec
COPY copy_default FROM stdin WITH (delimiter ';', default 'test;test');
-- CSV quote cannot appear in DEFAULT spec
COPY copy_default FROM stdin WITH (format csv, quote '"', default 'test"test');
-- NULL and DEFAULT spec must be different
COPY copy_default FROM stdin WITH (default '\N');
-- cannot use DEFAULT marker in column that has no DEFAULT value
COPY copy_default FROM stdin WITH (default '\D');
\D value '2022-07-04'
2 \D '2022-07-05'
\.
COPY copy_default FROM stdin WITH (format csv, default '\D');
\D,value,2022-07-04
2,\D,2022-07-05
\.
-- The DEFAULT marker must be unquoted and unescaped or it's not recognized
COPY copy_default FROM stdin WITH (default '\D');
1 \D '2022-07-04'
2 \\D '2022-07-04'
3 "\D" '2022-07-04'
\.
SELECT * FROM copy_default ORDER BY id;
TRUNCATE copy_default;
COPY copy_default FROM stdin WITH (format csv, default '\D');
1,\D,2022-07-04
2,\\D,2022-07-04
3,"\D",2022-07-04
\.
SELECT * FROM copy_default ORDER BY id;
TRUNCATE copy_default;
-- successful usage of DEFAULT option in COPY
COPY copy_default FROM stdin WITH (default '\D');
1 value '2022-07-04'
2 \D '2022-07-03'
3 \D \D
\.
SELECT * FROM copy_default ORDER BY id;
TRUNCATE copy_default;
COPY copy_default FROM stdin WITH (format csv, default '\D');
1,value,2022-07-04
2,\D,2022-07-03
3,\D,\D
\.
SELECT * FROM copy_default ORDER BY id;
TRUNCATE copy_default;
\c - - - :worker_1_port
COPY pg16.copy_default FROM stdin WITH (format csv, default '\D');
1,value,2022-07-04
2,\D,2022-07-03
3,\D,\D
\.
SELECT * FROM pg16.copy_default ORDER BY id;
\c - - - :master_port
TRUNCATE pg16.copy_default;
\c - - - :worker_2_port
COPY pg16.copy_default FROM stdin WITH (format csv, default '\D');
1,value,2022-07-04
2,\D,2022-07-03
3,\D,\D
\.
SELECT * FROM pg16.copy_default ORDER BY id;
\c - - - :master_port
SET search_path TO pg16;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
-- DEFAULT cannot be used in COPY TO
COPY (select 1 as test) TO stdout WITH (default '\D');
-- Tests for SQL/JSON: support the IS JSON predicate
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/6ee30209
CREATE TABLE test_is_json (id bigserial, js text);
SELECT create_distributed_table('test_is_json', 'id');
INSERT INTO test_is_json(js) VALUES
(NULL),
(''),
('123'),
('"aaa "'),
('true'),
('null'),
('[]'),
('[1, "2", {}]'),
('{}'),
('{ "a": 1, "b": null }'),
('{ "a": 1, "a": null }'),
('{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] }'),
('{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] }'),
('aaa'),
('{a:1}'),
('["a",]');
-- run IS JSON predicate in the worker nodes
SELECT
js,
js IS JSON "JSON",
js IS NOT JSON "NOT JSON",
js IS JSON VALUE "VALUE",
js IS JSON OBJECT "OBJECT",
js IS JSON ARRAY "ARRAY",
js IS JSON SCALAR "SCALAR",
js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
FROM
test_is_json ORDER BY js;
-- pull the data, and run IS JSON predicate in the coordinator
WITH pulled_data as (SELECT js FROM test_is_json OFFSET 0)
SELECT
js,
js IS JSON "IS JSON",
js IS NOT JSON "IS NOT JSON",
js IS JSON VALUE "IS VALUE",
js IS JSON OBJECT "IS OBJECT",
js IS JSON ARRAY "IS ARRAY",
js IS JSON SCALAR "IS SCALAR",
js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
FROM
pulled_data ORDER BY js;
SELECT
js,
js IS JSON "IS JSON",
js IS NOT JSON "IS NOT JSON",
js IS JSON VALUE "IS VALUE",
js IS JSON OBJECT "IS OBJECT",
js IS JSON ARRAY "IS ARRAY",
js IS JSON SCALAR "IS SCALAR",
js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
FROM
(SELECT js::json FROM test_is_json WHERE js IS JSON) foo(js);
SELECT
js0,
js IS JSON "IS JSON",
js IS NOT JSON "IS NOT JSON",
js IS JSON VALUE "IS VALUE",
js IS JSON OBJECT "IS OBJECT",
js IS JSON ARRAY "IS ARRAY",
js IS JSON SCALAR "IS SCALAR",
js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
FROM
(SELECT js, js::bytea FROM test_is_json WHERE js IS JSON) foo(js0, js);
SELECT
js,
js IS JSON "IS JSON",
js IS NOT JSON "IS NOT JSON",
js IS JSON VALUE "IS VALUE",
js IS JSON OBJECT "IS OBJECT",
js IS JSON ARRAY "IS ARRAY",
js IS JSON SCALAR "IS SCALAR",
js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
FROM
(SELECT js::jsonb FROM test_is_json WHERE js IS JSON) foo(js);
-- SYSTEM_USER
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/0823d061
CREATE TABLE table_name_for_view(id int, val_1 text);
SELECT create_distributed_table('table_name_for_view', 'id');
INSERT INTO table_name_for_view VALUES (1, 'test');
-- define a view that uses SYSTEM_USER keyword
CREATE VIEW prop_view_1 AS
SELECT *, SYSTEM_USER AS su FROM table_name_for_view;
SELECT * FROM prop_view_1;
-- check definition with SYSTEM_USER is correctly propagated to workers
\c - - - :worker_1_port
SELECT pg_get_viewdef('pg16.prop_view_1', true);
\c - - - :master_port
SET search_path TO pg16;
\set VERBOSITY terse
SET client_min_messages TO ERROR;
DROP SCHEMA pg16 CASCADE;

View File

@ -905,7 +905,6 @@ CREATE ROLE test_non_super_user WITH LOGIN;
ALTER ROLE test_non_super_user NOSUPERUSER;
GRANT CREATE ON DATABASE regression TO test_non_super_user;
SELECT result FROM run_command_on_workers($$GRANT CREATE ON DATABASE regression TO test_non_super_user$$);
GRANT CREATE ON SCHEMA public TO test_non_super_user ;
@ -997,7 +996,6 @@ $$);
\c - postgres
REVOKE CREATE ON DATABASE regression FROM test_non_super_user;
SELECT result FROM run_command_on_workers($$REVOKE CREATE ON DATABASE regression FROM test_non_super_user$$);
REVOKE CREATE ON SCHEMA public FROM test_non_super_user;