mirror of https://github.com/citusdata/citus.git
Merge branch 'main' into create_user_2pc
commit
a336e4bd1e
|
@ -68,7 +68,7 @@ USER citus
|
|||
|
||||
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
|
||||
FROM base AS pg14
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.10
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.11
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
|
|||
RUN rm .pgenv-staging/config/default.conf
|
||||
|
||||
FROM base AS pg15
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.5
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.6
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
|
|||
RUN rm .pgenv-staging/config/default.conf
|
||||
|
||||
FROM base AS pg16
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.1
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.2
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -210,7 +210,7 @@ COPY --chown=citus:citus .psqlrc .
|
|||
RUN sudo chown --from=root:root citus:citus -R ~
|
||||
|
||||
# sets default pg version
|
||||
RUN pgenv switch 16.1
|
||||
RUN pgenv switch 16.2
|
||||
|
||||
# make connecting to the coordinator easy
|
||||
ENV PGPORT=9700
|
||||
|
|
|
@ -31,11 +31,11 @@ jobs:
|
|||
pgupgrade_image_name: "citus/pgupgradetester"
|
||||
style_checker_image_name: "citus/stylechecker"
|
||||
style_checker_tools_version: "0.8.18"
|
||||
image_suffix: "-v19b671f"
|
||||
pg14_version: '{ "major": "14", "full": "14.10" }'
|
||||
pg15_version: '{ "major": "15", "full": "15.5" }'
|
||||
pg16_version: '{ "major": "16", "full": "16.1" }'
|
||||
upgrade_pg_versions: "14.10-15.5-16.1"
|
||||
image_suffix: "-v390dab3"
|
||||
pg14_version: '{ "major": "14", "full": "14.11" }'
|
||||
pg15_version: '{ "major": "15", "full": "15.6" }'
|
||||
pg16_version: '{ "major": "16", "full": "16.2" }'
|
||||
upgrade_pg_versions: "14.11-15.6-16.2"
|
||||
steps:
|
||||
# Since GHA jobs needs at least one step we use a noop step here.
|
||||
- name: Set up parameters
|
||||
|
|
114
CHANGELOG.md
114
CHANGELOG.md
|
@ -1,3 +1,117 @@
|
|||
### citus v11.0.10 (February 15, 2024) ###
|
||||
|
||||
* Removes pg_send_cancellation and all references (#7135)
|
||||
|
||||
### citus v12.1.2 (February 12, 2024) ###
|
||||
|
||||
* Fixes the incorrect column count after ALTER TABLE (#7379)
|
||||
|
||||
### citus v12.0.1 (July 11, 2023) ###
|
||||
|
||||
* Fixes incorrect default value assumption for VACUUM(PROCESS_TOAST) #7122)
|
||||
|
||||
* Fixes a bug that causes an unexpected error when adding a column
|
||||
with a NULL constraint (#7093)
|
||||
|
||||
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
|
||||
|
||||
* Fixes a bug with deleting colocation groups (#6929)
|
||||
|
||||
* Fixes memory and memory contexts leaks in Foreign Constraint Graphs (#7236)
|
||||
|
||||
* Fixes shard size bug with too many shards (#7018)
|
||||
|
||||
* Fixes the incorrect column count after ALTER TABLE (#7379)
|
||||
|
||||
* Improves citus_tables view performance (#7050)
|
||||
|
||||
* Makes sure to disallow creating a replicated distributed table
|
||||
concurrently (#7219)
|
||||
|
||||
* Removes pg_send_cancellation and all references (#7135)
|
||||
|
||||
### citus v11.3.1 (February 12, 2024) ###
|
||||
|
||||
* Disallows MERGE when the query prunes down to zero shards (#6946)
|
||||
|
||||
* Fixes a bug related to non-existent objects in DDL commands (#6984)
|
||||
|
||||
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
|
||||
|
||||
* Fixes a bug with deleting colocation groups (#6929)
|
||||
|
||||
* Fixes incorrect results on fetching scrollable with hold cursors (#7014)
|
||||
|
||||
* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236)
|
||||
|
||||
* Fixes replicate reference tables task fail when user is superuser (#6930)
|
||||
|
||||
* Fixes the incorrect column count after ALTER TABLE (#7379)
|
||||
|
||||
* Improves citus_shard_sizes performance (#7050)
|
||||
|
||||
* Makes sure to disallow creating a replicated distributed table
|
||||
concurrently (#7219)
|
||||
|
||||
* Removes pg_send_cancellation and all references (#7135)
|
||||
|
||||
### citus v11.2.2 (February 12, 2024) ###
|
||||
|
||||
* Fixes a bug in background shard rebalancer where the replicate
|
||||
reference tables task fails if the current user is not a superuser (#6930)
|
||||
|
||||
* Fixes a bug related to non-existent objects in DDL commands (#6984)
|
||||
|
||||
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
|
||||
|
||||
* Fixes a bug with deleting colocation groups (#6929)
|
||||
|
||||
* Fixes incorrect results on fetching scrollable with hold cursors (#7014)
|
||||
|
||||
* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236)
|
||||
|
||||
* Fixes the incorrect column count after ALTER TABLE (#7379)
|
||||
|
||||
* Improves failure handling of distributed execution (#7090)
|
||||
|
||||
* Makes sure to disallow creating a replicated distributed table
|
||||
concurrently (#7219)
|
||||
|
||||
* Removes pg_send_cancellation (#7135)
|
||||
|
||||
### citus v11.1.7 (February 12, 2024) ###
|
||||
|
||||
* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236)
|
||||
|
||||
* Fixes a bug related to non-existent objects in DDL commands (#6984)
|
||||
|
||||
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
|
||||
|
||||
* Fixes a bug with deleting colocation groups (#6929)
|
||||
|
||||
* Fixes incorrect results on fetching scrollable with hold cursors (#7014)
|
||||
|
||||
* Fixes the incorrect column count after ALTER TABLE (#7379)
|
||||
|
||||
* Improves failure handling of distributed execution (#7090)
|
||||
|
||||
* Makes sure to disallow creating a replicated distributed table
|
||||
concurrently (#7219)
|
||||
|
||||
* Removes pg_send_cancellation and all references (#7135)
|
||||
|
||||
### citus v11.0.9 (February 12, 2024) ###
|
||||
|
||||
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
|
||||
|
||||
* Fixes a bug with deleting colocation groups (#6929)
|
||||
|
||||
* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236)
|
||||
|
||||
* Fixes the incorrect column count after ALTER TABLE (#7462)
|
||||
|
||||
* Improve failure handling of distributed execution (#7090)
|
||||
|
||||
### citus v12.1.1 (November 9, 2023) ###
|
||||
|
||||
* Fixes leaking of memory and memory contexts in Citus foreign key cache
|
||||
|
|
|
@ -20,6 +20,6 @@ tail -n +$RegisterCitusConfigVariables_begin_linenumber src/backend/distributed/
|
|||
|
||||
# extract citus gucs in the form of <tab><tab>"citus.X"
|
||||
grep -P "^[\t][\t]\"citus\.[a-zA-Z_0-9]+\"" RegisterCitusConfigVariables_func_def.out > gucs.out
|
||||
sort -c gucs.out
|
||||
LC_COLLATE=C sort -c gucs.out
|
||||
rm gucs.out
|
||||
rm RegisterCitusConfigVariables_func_def.out
|
||||
|
|
|
@ -1749,8 +1749,6 @@ The reason for handling dependencies and deparsing in post-process step is that
|
|||
|
||||
Not all table DDL is currently deparsed. In that case, the original command sent by the client is used. That is a shortcoming in our DDL logic that causes user-facing issues and should be addressed. We do not directly construct a separate DDL command for each shard. Instead, we call the `worker_apply_shard_ddl_command(shardid bigint, ddl_command text)` function which parses the DDL command, replaces the table names with shard names in the parse tree according to the shard ID, and then executes the command. That also has some shortcomings, because we cannot support more complex DDL commands in this manner (e.g. adding multiple foreign keys). Ideally, all DDL would be deparsed, and for table DDL the deparsed query string would have shard names, similar to regular queries.
|
||||
|
||||
`markDistributed` is used to indicate whether we add a record to `pg_dist_object` to mark the object as "distributed".
|
||||
|
||||
## Defining a new DDL command
|
||||
|
||||
All commands that are propagated by Citus should be defined in DistributeObjectOps struct. Below is a sample DistributeObjectOps for ALTER DATABASE command that is defined in [distribute_object_ops.c](commands/distribute_object_ops.c) file.
|
||||
|
@ -1810,6 +1808,14 @@ GetDistributeObjectOps(Node *node)
|
|||
...
|
||||
```
|
||||
|
||||
Finally, when adding support for propagation of a new DDL command, you also need to make sure that:
|
||||
* Use `quote_identifier()` or `quote_literal_cstr()` for the fields that might need escaping some characters or bare quotes when deparsing a DDL command.
|
||||
* The code is tolerant to nullable fields within given `Stmt *` object, i.e., the ones that Postgres allows not specifying at all.
|
||||
* You register the object into `pg_dist_object` if it's a CREATE command and you delete the object from `pg_dist_object` if it's a DROP command.
|
||||
* Node activation (e.g., `citus_add_node()`) properly propagates the object and its dependencies to new nodes.
|
||||
* Add tests cases for all the scenarios noted above.
|
||||
* Add test cases for different options that can be specified for the settings. For example, `CREATE DATABASE .. IS_TEMPLATE = TRUE` and `CREATE DATABASE .. IS_TEMPLATE = FALSE` should be tested separately.
|
||||
|
||||
## Object & dependency propagation
|
||||
|
||||
These two topics are closely related, so we'll discuss them together. You can start the topic by reading [Nils' blog](https://www.citusdata.com/blog/2020/06/25/using-custom-types-with-citus-and-postgres/) on the topic.
|
||||
|
@ -1885,7 +1891,7 @@ Generally, the process is straightforward: When a new object is created, Citus a
|
|||
|
||||
Citus employs a universal strategy for dealing with objects. Every object creation, alteration, or deletion event (like custom types, tables, or extensions) is represented by the C struct `DistributeObjectOps`. You can find a list of all supported object types in [`distribute_object_ops.c`](https://github.com/citusdata/citus/blob/2c190d068918d1c457894adf97f550e5b3739184/src/backend/distributed/commands/distribute_object_ops.c#L4). As of Citus 12.1, most Postgres objects are supported, although there are a few exceptions.
|
||||
|
||||
Whenever `DistributeObjectOps->markDistributed` is set to true—usually during `CREATE` operations—Citus calls `MarkObjectDistributed()`. Citus also labels the same objects as distributed across all nodes via the `citus_internal_add_object_metadata()` UDF.
|
||||
Whenever `DistributeObjectOps->markDistributed` is set to true—usually during `CREATE` operations—Citus calls `MarkObjectDistributed()`. Citus also labels the same objects as distributed across all nodes via the `citus_internal.add_object_metadata()` UDF.
|
||||
|
||||
Here's a simple example:
|
||||
|
||||
|
@ -1895,7 +1901,7 @@ CREATE TYPE type_test AS (a int, b int);
|
|||
...
|
||||
NOTICE: issuing SELECT worker_create_or_replace_object('CREATE TYPE public.type_test AS (a integer, b integer);');
|
||||
....
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
...
|
||||
|
||||
-- Then, check pg_dist_object. This should be consistent across all nodes.
|
||||
|
@ -2423,7 +2429,7 @@ Cleanup records always need to be committed before creating the actual object. I
|
|||
|
||||
PostgreSQL supports change data capture (CDC) via the logical decoding interface. The basic idea behind logical decoding is that you make a replication connection (a special type of postgres connection), start replication, and then the backend process reads through the WAL and decodes the WAL records and emits it over the wire in a format defined by the output plugin. If we were to use regular logical decoding on the nodes of a Citus cluster, we would see the name of the shard in each write, and internal data transfers such as shard moves would result in inserts being emitted. We use several techniques to avoid this.
|
||||
|
||||
All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal_start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId.
|
||||
All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal.start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId.
|
||||
|
||||
We have very minimal control over replication commands like `CREATE_REPLICATION_SLOT`, since there are no direct hooks, and decoder names (e.g. “pgoutput”) are typically hard-coded in the client. The only method we found of overriding logical decoding behaviour is to overload the output plugin name in the dynamic library path.
|
||||
|
||||
|
|
|
@ -397,7 +397,7 @@ AdjustClocksToTransactionHighest(List *nodeConnectionList,
|
|||
|
||||
/* Set the clock value on participating worker nodes */
|
||||
appendStringInfo(queryToSend,
|
||||
"SELECT pg_catalog.citus_internal_adjust_local_clock_to_remote"
|
||||
"SELECT citus_internal.adjust_local_clock_to_remote"
|
||||
"('(%lu, %u)'::pg_catalog.cluster_clock);",
|
||||
transactionClockValue->logical, transactionClockValue->counter);
|
||||
|
||||
|
|
|
@ -481,9 +481,7 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString,
|
|||
/*
|
||||
* PostprocessCreateDatabaseStmt is executed after the statement is applied to the local
|
||||
* postgres instance. In this stage we prepare the commands that need to be run on
|
||||
* all workers to create the database. Since the CREATE DATABASE statement gives error
|
||||
* in a transaction block, we need to use NontransactionalNodeDDLTaskList to send the
|
||||
* CREATE DATABASE statement to the workers.
|
||||
* all workers to create the database.
|
||||
*
|
||||
*/
|
||||
List *
|
||||
|
@ -508,20 +506,25 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString)
|
|||
|
||||
char *createDatabaseCommand = DeparseTreeNode(node);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) createDatabaseCommand,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *createDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) createDatabaseCommand,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands);
|
||||
/*
|
||||
* Since the CREATE DATABASE statements cannot be executed in a transaction
|
||||
* block, we need to use NontransactionalNodeDDLTaskList() to send the CREATE
|
||||
* DATABASE statement to the workers.
|
||||
*/
|
||||
List *createDatabaseDDLJobList =
|
||||
NontransactionalNodeDDLTaskList(REMOTE_NODES, createDatabaseCommands);
|
||||
return createDatabaseDDLJobList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessDropDatabaseStmt is executed before the statement is applied to the local
|
||||
* postgres instance. In this stage we can prepare the commands that need to be run on
|
||||
* all workers to drop the database. Since the DROP DATABASE statement gives error in
|
||||
* transaction context, we need to use NontransactionalNodeDDLTaskList to send the
|
||||
* DROP DATABASE statement to the workers.
|
||||
* all workers to drop the database.
|
||||
*
|
||||
* We also serialize database commands globally by acquiring a Citus specific advisory
|
||||
* lock based on OCLASS_DATABASE on the first primary worker node.
|
||||
|
@ -559,11 +562,18 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString,
|
|||
|
||||
char *dropDatabaseCommand = DeparseTreeNode(node);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) dropDatabaseCommand,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *dropDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) dropDatabaseCommand,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands);
|
||||
/*
|
||||
* Due to same reason stated in PostprocessCreateDatabaseStmt(), we need to
|
||||
* use NontransactionalNodeDDLTaskList() to send the DROP DATABASE statement
|
||||
* to the workers.
|
||||
*/
|
||||
List *dropDatabaseDDLJobList =
|
||||
NontransactionalNodeDDLTaskList(REMOTE_NODES, dropDatabaseCommands);
|
||||
return dropDatabaseDDLJobList;
|
||||
}
|
||||
|
||||
|
||||
|
@ -890,7 +900,7 @@ CreateDatabaseDDLCommand(Oid dbId)
|
|||
|
||||
/* Generate the CREATE DATABASE statement */
|
||||
appendStringInfo(outerDbStmt,
|
||||
"SELECT pg_catalog.citus_internal_database_command(%s)",
|
||||
"SELECT citus_internal.database_command(%s)",
|
||||
quote_literal_cstr(createStmt));
|
||||
|
||||
ReleaseSysCache(tuple);
|
||||
|
|
|
@ -2663,7 +2663,6 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest,
|
|||
CreateIntermediateResultsDirectory();
|
||||
|
||||
const int fileFlags = (O_CREAT | O_RDWR | O_TRUNC);
|
||||
const int fileMode = (S_IRUSR | S_IWUSR);
|
||||
|
||||
StringInfo filePath = makeStringInfo();
|
||||
appendStringInfo(filePath, "%s_%ld", copyDest->colocatedIntermediateResultIdPrefix,
|
||||
|
@ -2671,7 +2670,7 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest,
|
|||
|
||||
const char *fileName = QueryResultFileName(filePath->data);
|
||||
shardState->fileDest =
|
||||
FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags, fileMode));
|
||||
FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags));
|
||||
|
||||
CopyOutState localFileCopyOutState = shardState->copyOutState;
|
||||
bool isBinaryCopy = localFileCopyOutState->binary;
|
||||
|
|
|
@ -886,6 +886,14 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
|
|||
{
|
||||
Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple);
|
||||
|
||||
ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*roleAddress, AuthIdRelationId, membership->grantor);
|
||||
if (!IsAnyObjectDistributed(list_make1(roleAddress)))
|
||||
{
|
||||
/* we only need to propagate the grant if the grantor is distributed */
|
||||
continue;
|
||||
}
|
||||
|
||||
GrantRoleStmt *grantRoleStmt = makeNode(GrantRoleStmt);
|
||||
grantRoleStmt->is_grant = true;
|
||||
|
||||
|
@ -901,7 +909,11 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
|
|||
granteeRole->rolename = GetUserNameFromId(membership->member, true);
|
||||
grantRoleStmt->grantee_roles = list_make1(granteeRole);
|
||||
|
||||
grantRoleStmt->grantor = NULL;
|
||||
RoleSpec *grantorRole = makeNode(RoleSpec);
|
||||
grantorRole->roletype = ROLESPEC_CSTRING;
|
||||
grantorRole->location = -1;
|
||||
grantorRole->rolename = GetUserNameFromId(membership->grantor, false);
|
||||
grantRoleStmt->grantor = grantorRole;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||
|
||||
|
@ -1241,12 +1253,6 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Postgres don't seem to use the grantor. Even dropping the grantor doesn't
|
||||
* seem to affect the membership. If this changes, we might need to add grantors
|
||||
* to the dependency resolution too. For now we just don't propagate it.
|
||||
*/
|
||||
stmt->grantor = NULL;
|
||||
stmt->grantee_roles = distributedGranteeRoles;
|
||||
char *sql = DeparseTreeNode((Node *) stmt);
|
||||
stmt->grantee_roles = allGranteeRoles;
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
List *
|
||||
PostprocessSecLabelStmt(Node *node, const char *queryString)
|
||||
{
|
||||
if (!ShouldPropagate())
|
||||
if (!EnableAlterRolePropagation || !ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
@ -59,21 +59,17 @@ PostprocessSecLabelStmt(Node *node, const char *queryString)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
if (!EnableCreateRolePropagation)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsurePropagationToCoordinator();
|
||||
EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses);
|
||||
|
||||
const char *sql = DeparseTreeNode((Node *) secLabelStmt);
|
||||
const char *secLabelCommands = DeparseTreeNode((Node *) secLabelStmt);
|
||||
|
||||
List *commandList = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
(void *) secLabelCommands,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList);
|
||||
return NodeDDLTaskList(REMOTE_NODES, commandList);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@
|
|||
#include "foreign/foreign.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/nodes.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "postmaster/postmaster.h"
|
||||
|
@ -143,6 +142,40 @@ static const NonMainDbDistributedStatementInfo NonMainDbSupportedStatements[] =
|
|||
sizeof(supportedObjectTypesForGrantStmt) / sizeof(ObjectType) }
|
||||
};
|
||||
|
||||
/*
|
||||
* NonMainDbDistributedStatementInfo is used to determine whether a statement is
|
||||
* supported from non-main databases and whether it should be marked as
|
||||
* distributed explicitly (*).
|
||||
*
|
||||
* (*) We always have to mark such objects as "distributed" but while for some
|
||||
* object types we can delegate this to main database, for some others we have
|
||||
* to explicitly send a command to all nodes in this code-path to achieve this.
|
||||
*/
|
||||
typedef struct NonMainDbDistributedStatementInfo
|
||||
{
|
||||
int statementType;
|
||||
bool explicitlyMarkAsDistributed;
|
||||
|
||||
/*
|
||||
* checkSupportedObjectTypes is a callback function that checks whether
|
||||
* type of the object referred to by given statement is supported.
|
||||
*
|
||||
* Can be NULL if not applicable for the statement type.
|
||||
*/
|
||||
bool (*checkSupportedObjectTypes)(Node *node);
|
||||
} NonMainDbDistributedStatementInfo;
|
||||
|
||||
/*
|
||||
* MarkObjectDistributedParams is used to pass parameters to the
|
||||
* MarkObjectDistributedFromNonMainDb function.
|
||||
*/
|
||||
typedef struct MarkObjectDistributedParams
|
||||
{
|
||||
char *name;
|
||||
Oid id;
|
||||
uint16 catalogRelId;
|
||||
} MarkObjectDistributedParams;
|
||||
|
||||
|
||||
bool EnableDDLPropagation = true; /* ddl propagation is enabled */
|
||||
int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE;
|
||||
|
@ -171,17 +204,42 @@ static void PostStandardProcessUtility(Node *parsetree);
|
|||
static void DecrementUtilityHookCountersIfNecessary(Node *parsetree);
|
||||
static bool IsDropSchemaOrDB(Node *parsetree);
|
||||
static bool ShouldCheckUndistributeCitusLocalTables(void);
|
||||
static void RunPreprocessMainDBCommand(Node *parsetree, const char *queryString);
|
||||
|
||||
|
||||
/*
|
||||
* Functions to support commands used to manage node-wide objects from non-main
|
||||
* databases.
|
||||
*/
|
||||
static bool IsCommandToCreateOrDropMainDB(Node *parsetree);
|
||||
static void RunPreprocessMainDBCommand(Node *parsetree);
|
||||
static void RunPostprocessMainDBCommand(Node *parsetree);
|
||||
static bool IsObjectTypeSupported(Node *parsetree, NonMainDbDistributedStatementInfo
|
||||
nonMainDbDistributedStatementInfo);
|
||||
static bool IsStatementSupportedInNonMainDb(Node *parsetree);
|
||||
static bool IsStatementSupportedFromNonMainDb(Node *parsetree);
|
||||
static bool StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree);
|
||||
static bool StatementRequiresUnmarkDistributedFromNonMainDb(Node *parsetree);
|
||||
static List * GetObjectInfoList(Node *parsetree);
|
||||
static void MarkObjectDistributedInNonMainDb(Node *parsetree, ObjectInfo *objectInfo);
|
||||
static void MarkObjectDistributedFromNonMainDb(Node *parsetree);
|
||||
static MarkObjectDistributedParams GetMarkObjectDistributedParams(Node *parsetree);
|
||||
static void UnmarkObjectDistributedInNonMainDb(List *objectInfoList);
|
||||
|
||||
/*
|
||||
* checkSupportedObjectTypes callbacks for
|
||||
* NonMainDbDistributedStatementInfo objects.
|
||||
*/
|
||||
static bool NonMainDbCheckSupportedObjectTypeForGrant(Node *node);
|
||||
|
||||
|
||||
/*
|
||||
* NonMainDbSupportedStatements is an array of statements that are supported
|
||||
* from non-main databases.
|
||||
*/
|
||||
ObjectType supportedObjectTypesForGrantStmt[] = { OBJECT_DATABASE };
|
||||
static const NonMainDbDistributedStatementInfo NonMainDbSupportedStatements[] = {
|
||||
{ T_GrantRoleStmt, false, NULL },
|
||||
{ T_CreateRoleStmt, true, NULL },
|
||||
{ T_GrantStmt, false, NonMainDbCheckSupportedObjectTypeForGrant },
|
||||
{ T_CreatedbStmt, false, NULL },
|
||||
{ T_DropdbStmt, false, NULL },
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of
|
||||
* pieces of a utility statement before invoking ProcessUtility.
|
||||
|
@ -312,9 +370,24 @@ citus_ProcessUtility(PlannedStmt *pstmt,
|
|||
|
||||
if (!CitusHasBeenLoaded())
|
||||
{
|
||||
if (!IsMainDB)
|
||||
/*
|
||||
* We always execute CREATE/DROP DATABASE from the main database. There are no
|
||||
* transactional visibility issues, since these commands are non-transactional.
|
||||
* And this way we only have to consider one codepath when creating databases.
|
||||
* We don't try to send the query to the main database if the CREATE/DROP DATABASE
|
||||
* command is for the main database itself, this is a very rare case but it's
|
||||
* exercised by our test suite.
|
||||
*/
|
||||
if (!IsMainDB &&
|
||||
!IsCommandToCreateOrDropMainDB(parsetree))
|
||||
{
|
||||
RunPreprocessMainDBCommand(parsetree, queryString);
|
||||
RunPreprocessMainDBCommand(parsetree);
|
||||
|
||||
if (IsA(parsetree, CreatedbStmt) ||
|
||||
IsA(parsetree, DropdbStmt))
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -795,6 +868,13 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
errhint("Connect to other nodes directly to manually create all"
|
||||
" necessary users and roles.")));
|
||||
}
|
||||
else if (IsA(parsetree, SecLabelStmt) && !EnableAlterRolePropagation)
|
||||
{
|
||||
ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands to other"
|
||||
" nodes"),
|
||||
errhint("Connect to other nodes directly to manually assign"
|
||||
" necessary labels.")));
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that on DROP EXTENSION we terminate the background daemon
|
||||
|
@ -1653,18 +1733,61 @@ DropSchemaOrDBInProgress(void)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsCommandToCreateOrDropMainDB checks if this query creates or drops the
|
||||
* main database, so we can make an exception and not send this query to
|
||||
* the main database.
|
||||
*/
|
||||
static bool
|
||||
IsCommandToCreateOrDropMainDB(Node *parsetree)
|
||||
{
|
||||
if (IsA(parsetree, CreatedbStmt))
|
||||
{
|
||||
CreatedbStmt *createdbStmt = castNode(CreatedbStmt, parsetree);
|
||||
return strcmp(createdbStmt->dbname, MainDb) == 0;
|
||||
}
|
||||
else if (IsA(parsetree, DropdbStmt))
|
||||
{
|
||||
DropdbStmt *dropdbStmt = castNode(DropdbStmt, parsetree);
|
||||
return strcmp(dropdbStmt->dbname, MainDb) == 0;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RunPreprocessMainDBCommand runs the necessary commands for a query, in main
|
||||
* database before query is run on the local node with PrevProcessUtility
|
||||
*/
|
||||
static void
|
||||
RunPreprocessMainDBCommand(Node *parsetree, const char *queryString)
|
||||
RunPreprocessMainDBCommand(Node *parsetree)
|
||||
{
|
||||
if (!IsStatementSupportedInNonMainDb(parsetree))
|
||||
if (!IsStatementSupportedFromNonMainDb(parsetree))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
char *queryString = DeparseTreeNode(parsetree);
|
||||
|
||||
if (IsA(parsetree, CreatedbStmt) ||
|
||||
IsA(parsetree, DropdbStmt))
|
||||
{
|
||||
IsMainDBCommandInXact = false;
|
||||
RunCitusMainDBQuery((char *) queryString);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (StatementRequiresUnmarkDistributedFromNonMainDb(parsetree))
|
||||
{
|
||||
List *objectInfoList = GetObjectInfoList(parsetree);
|
||||
|
||||
UnmarkObjectDistributedInNonMainDb(objectInfoList);
|
||||
}
|
||||
|
||||
IsMainDBCommandInXact = true;
|
||||
|
||||
StringInfo mainDBQuery = makeStringInfo();
|
||||
appendStringInfo(mainDBQuery,
|
||||
START_MANAGEMENT_TRANSACTION,
|
||||
|
@ -1675,14 +1798,6 @@ RunPreprocessMainDBCommand(Node *parsetree, const char *queryString)
|
|||
EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER,
|
||||
quote_literal_cstr(queryString),
|
||||
quote_literal_cstr(CurrentUserName()));
|
||||
|
||||
if (StatementRequiresUnmarkDistributedFromNonMainDb(parsetree))
|
||||
{
|
||||
List *objectInfoList = GetObjectInfoList(parsetree);
|
||||
|
||||
UnmarkObjectDistributedInNonMainDb(objectInfoList);
|
||||
}
|
||||
|
||||
RunCitusMainDBQuery(mainDBQuery->data);
|
||||
}
|
||||
|
||||
|
@ -1694,158 +1809,39 @@ RunPreprocessMainDBCommand(Node *parsetree, const char *queryString)
|
|||
static void
|
||||
RunPostprocessMainDBCommand(Node *parsetree)
|
||||
{
|
||||
if (IsStatementSupportedInNonMainDb(parsetree) &&
|
||||
if (IsStatementSupportedFromNonMainDb(parsetree) &&
|
||||
StatementRequiresMarkDistributedFromNonMainDb(parsetree))
|
||||
{
|
||||
List *objectInfoList = GetObjectInfoList(parsetree);
|
||||
ObjectInfo *objectInfo = NULL;
|
||||
|
||||
if (objectInfoList != NIL)
|
||||
{
|
||||
objectInfo = (ObjectInfo *) linitial(objectInfoList);
|
||||
MarkObjectDistributedInNonMainDb(parsetree, objectInfo);
|
||||
}
|
||||
MarkObjectDistributedFromNonMainDb(parsetree);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetObjectInfoList returns the names and oids of the object in the given parsetree.
|
||||
*/
|
||||
List *
|
||||
GetObjectInfoList(Node *parsetree)
|
||||
{
|
||||
List *infoList = NIL;
|
||||
|
||||
if (IsA(parsetree, CreateRoleStmt))
|
||||
{
|
||||
CreateRoleStmt *stmt = castNode(CreateRoleStmt, parsetree);
|
||||
ObjectInfo *info = palloc(sizeof(ObjectInfo));
|
||||
info->name = stmt->role;
|
||||
info->id = get_role_oid(stmt->role, false);
|
||||
infoList = lappend(infoList, info);
|
||||
}
|
||||
else if (IsA(parsetree, DropRoleStmt))
|
||||
{
|
||||
DropRoleStmt *stmt = castNode(DropRoleStmt, parsetree);
|
||||
RoleSpec *roleSpec;
|
||||
foreach_ptr(roleSpec, stmt->roles)
|
||||
{
|
||||
ObjectInfo *info = palloc(sizeof(ObjectInfo));
|
||||
info->name = roleSpec->rolename;
|
||||
info->id = get_role_oid(roleSpec->rolename, false);
|
||||
infoList = lappend(infoList, info);
|
||||
}
|
||||
}
|
||||
|
||||
/* Add else if branches for other statement types */
|
||||
|
||||
return infoList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MarkObjectDistributedInNonMainDb marks the given object as distributed on the
|
||||
* non-main database.
|
||||
*/
|
||||
static void
|
||||
MarkObjectDistributedInNonMainDb(Node *parsetree, ObjectInfo *objectInfo)
|
||||
{
|
||||
StringInfo mainDBQuery = makeStringInfo();
|
||||
appendStringInfo(mainDBQuery,
|
||||
MARK_OBJECT_DISTRIBUTED,
|
||||
AuthIdRelationId,
|
||||
quote_literal_cstr(objectInfo->name),
|
||||
objectInfo->id,
|
||||
quote_literal_cstr(CurrentUserName()));
|
||||
RunCitusMainDBQuery(mainDBQuery->data);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* UnmarkObjectDistributedInNonMainDb unmarks the given object as distributed on the
|
||||
* non-main database.
|
||||
*/
|
||||
static void
|
||||
UnmarkObjectDistributedInNonMainDb(List *objectInfoList)
|
||||
{
|
||||
ObjectInfo *objectInfo = NULL;
|
||||
int subObjectId = 0;
|
||||
char *checkObjectExistence = "false";
|
||||
foreach_ptr(objectInfo, objectInfoList)
|
||||
{
|
||||
StringInfo query = makeStringInfo();
|
||||
appendStringInfo(query,
|
||||
UNMARK_OBJECT_DISTRIBUTED,
|
||||
AuthIdRelationId,
|
||||
objectInfo->id,
|
||||
subObjectId, checkObjectExistence);
|
||||
RunCitusMainDBQuery(query->data);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsStatementSupportedIn2Pc returns true if the statement is supported from a
|
||||
* IsStatementSupportedFromNonMainDb returns true if the statement is supported from a
|
||||
* non-main database.
|
||||
*/
|
||||
static bool
|
||||
IsStatementSupportedInNonMainDb(Node *parsetree)
|
||||
IsStatementSupportedFromNonMainDb(Node *parsetree)
|
||||
{
|
||||
NodeTag type = nodeTag(parsetree);
|
||||
|
||||
for (int i = 0; i < sizeof(NonMainDbSupportedStatements) /
|
||||
sizeof(NonMainDbSupportedStatements[0]); i++)
|
||||
{
|
||||
if (type == NonMainDbSupportedStatements[i].statementType)
|
||||
if (type != NonMainDbSupportedStatements[i].statementType)
|
||||
{
|
||||
if (NonMainDbSupportedStatements[i].supportedObjectTypes == NULL)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (type == T_GrantStmt)
|
||||
{
|
||||
return IsObjectTypeSupported(parsetree,
|
||||
NonMainDbSupportedStatements[i]);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
return !NonMainDbSupportedStatements[i].checkSupportedObjectTypes ||
|
||||
NonMainDbSupportedStatements[i].checkSupportedObjectTypes(parsetree);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsObjectTypeSupported returns true if the object type is supported in 2pc
|
||||
*/
|
||||
bool
|
||||
IsObjectTypeSupported(Node *parsetree, NonMainDbDistributedStatementInfo
|
||||
nonMainDbDistributedStatementInfo)
|
||||
{
|
||||
NodeTag type = nodeTag(parsetree);
|
||||
if (type == T_GrantStmt)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, parsetree);
|
||||
|
||||
/* check if stmt->objtype is in supportedObjectTypes */
|
||||
for (int j = 0; j < nonMainDbDistributedStatementInfo.supportedObjectTypesSize;
|
||||
j++)
|
||||
{
|
||||
if (stmt->objtype ==
|
||||
nonMainDbDistributedStatementInfo.supportedObjectTypes[j])
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* StatementRequiresMarkDistributedFromNonMainDb returns true if the statement should be marked
|
||||
* as distributed when executed from a non-main database.
|
||||
|
@ -1860,8 +1856,7 @@ StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree)
|
|||
{
|
||||
if (type == NonMainDbSupportedStatements[i].statementType)
|
||||
{
|
||||
return NonMainDbSupportedStatements[i].distributedOperation ==
|
||||
MARK_DISTRIBUTED;
|
||||
return NonMainDbSupportedStatements[i].explicitlyMarkAsDistributed;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1870,23 +1865,57 @@ StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree)
|
|||
|
||||
|
||||
/*
|
||||
* StatementRequiresUnmarkDistributedFromNonMainDb returns true if the statement should be marked
|
||||
* as distributed when executed from a non-main database.
|
||||
* MarkObjectDistributedFromNonMainDb marks the given object as distributed on the
|
||||
* non-main database.
|
||||
*/
|
||||
static bool
|
||||
StatementRequiresUnmarkDistributedFromNonMainDb(Node *parsetree)
|
||||
static void
|
||||
MarkObjectDistributedFromNonMainDb(Node *parsetree)
|
||||
{
|
||||
NodeTag type = nodeTag(parsetree);
|
||||
MarkObjectDistributedParams markObjectDistributedParams =
|
||||
GetMarkObjectDistributedParams(parsetree);
|
||||
StringInfo mainDBQuery = makeStringInfo();
|
||||
appendStringInfo(mainDBQuery,
|
||||
MARK_OBJECT_DISTRIBUTED,
|
||||
markObjectDistributedParams.catalogRelId,
|
||||
quote_literal_cstr(markObjectDistributedParams.name),
|
||||
markObjectDistributedParams.id,
|
||||
quote_literal_cstr(CurrentUserName()));
|
||||
RunCitusMainDBQuery(mainDBQuery->data);
|
||||
}
|
||||
|
||||
for (int i = 0; i < sizeof(NonMainDbSupportedStatements) /
|
||||
sizeof(NonMainDbSupportedStatements[0]); i++)
|
||||
|
||||
/*
|
||||
* GetMarkObjectDistributedParams returns MarkObjectDistributedParams for the target
|
||||
* object of given parsetree.
|
||||
*/
|
||||
static MarkObjectDistributedParams
|
||||
GetMarkObjectDistributedParams(Node *parsetree)
|
||||
{
|
||||
if (IsA(parsetree, CreateRoleStmt))
|
||||
{
|
||||
if (type == NonMainDbSupportedStatements[i].statementType)
|
||||
{
|
||||
return NonMainDbSupportedStatements[i].distributedOperation ==
|
||||
UNMARK_DISTRIBUTED;
|
||||
}
|
||||
CreateRoleStmt *stmt = castNode(CreateRoleStmt, parsetree);
|
||||
MarkObjectDistributedParams info = {
|
||||
.name = stmt->role,
|
||||
.catalogRelId = AuthIdRelationId,
|
||||
.id = get_role_oid(stmt->role, false)
|
||||
};
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
return false;
|
||||
/* Add else if branches for other statement types */
|
||||
|
||||
elog(ERROR, "unsupported statement type");
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* NonMainDbCheckSupportedObjectTypeForGrant implements checkSupportedObjectTypes
|
||||
* callback for GrantStmt.
|
||||
*/
|
||||
static bool
|
||||
NonMainDbCheckSupportedObjectTypeForGrant(Node *node)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
return stmt->objtype == OBJECT_DATABASE;
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ AppendGrantRestrictAndCascade(StringInfo buf, GrantStmt *stmt)
|
|||
void
|
||||
AppendGrantedByInGrantForRoleSpec(StringInfo buf, RoleSpec *grantor, bool isGrant)
|
||||
{
|
||||
if (isGrant && grantor)
|
||||
if (grantor)
|
||||
{
|
||||
appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(grantor, true));
|
||||
}
|
||||
|
|
|
@ -488,7 +488,6 @@ AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt)
|
|||
AppendGrantWithAdminOption(buf, stmt);
|
||||
AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant);
|
||||
AppendGrantRestrictAndCascadeForRoleSpec(buf, stmt->behavior, stmt->is_grant);
|
||||
AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant);
|
||||
appendStringInfo(buf, ";");
|
||||
}
|
||||
|
||||
|
|
|
@ -295,7 +295,6 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest)
|
|||
if (resultDest->writeLocalFile)
|
||||
{
|
||||
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
|
||||
const int fileMode = (S_IRUSR | S_IWUSR);
|
||||
|
||||
/* make sure the directory exists */
|
||||
CreateIntermediateResultsDirectory();
|
||||
|
@ -303,8 +302,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest)
|
|||
const char *fileName = QueryResultFileName(resultId);
|
||||
|
||||
resultDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(fileName,
|
||||
fileFlags,
|
||||
fileMode));
|
||||
fileFlags));
|
||||
}
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
|
@ -606,7 +604,7 @@ CreateIntermediateResultsDirectory(void)
|
|||
{
|
||||
char *resultDirectory = IntermediateResultsDirectory();
|
||||
|
||||
int makeOK = mkdir(resultDirectory, S_IRWXU);
|
||||
int makeOK = MakePGDirectory(resultDirectory);
|
||||
if (makeOK != 0)
|
||||
{
|
||||
if (errno == EEXIST)
|
||||
|
@ -976,7 +974,6 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId)
|
|||
|
||||
StringInfo copyCommand = makeStringInfo();
|
||||
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
|
||||
const int fileMode = (S_IRUSR | S_IWUSR);
|
||||
|
||||
PGconn *pgConn = connection->pgConn;
|
||||
int socket = PQsocket(pgConn);
|
||||
|
@ -998,7 +995,7 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId)
|
|||
|
||||
PQclear(result);
|
||||
|
||||
File fileDesc = FileOpenForTransmit(localPath, fileFlags, fileMode);
|
||||
File fileDesc = FileOpenForTransmit(localPath, fileFlags);
|
||||
FileCompat fileCompat = FileCompatFromFileStart(fileDesc);
|
||||
|
||||
while (true)
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "pgstat.h"
|
||||
|
||||
#include "commands/defrem.h"
|
||||
#include "common/file_perm.h"
|
||||
#include "libpq/libpq.h"
|
||||
#include "libpq/pqformat.h"
|
||||
#include "storage/fd.h"
|
||||
|
@ -48,8 +49,7 @@ RedirectCopyDataToRegularFile(const char *filename)
|
|||
{
|
||||
StringInfo copyData = makeStringInfo();
|
||||
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
|
||||
const int fileMode = (S_IRUSR | S_IWUSR);
|
||||
File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode);
|
||||
File fileDesc = FileOpenForTransmit(filename, fileFlags);
|
||||
FileCompat fileCompat = FileCompatFromFileStart(fileDesc);
|
||||
|
||||
SendCopyInStart();
|
||||
|
@ -92,7 +92,7 @@ SendRegularFile(const char *filename)
|
|||
const int fileMode = 0;
|
||||
|
||||
/* we currently do not check if the caller has permissions for this file */
|
||||
File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode);
|
||||
File fileDesc = FileOpenForTransmitPerm(filename, fileFlags, fileMode);
|
||||
FileCompat fileCompat = FileCompatFromFileStart(fileDesc);
|
||||
|
||||
/*
|
||||
|
@ -136,12 +136,23 @@ FreeStringInfo(StringInfo stringInfo)
|
|||
|
||||
|
||||
/*
|
||||
* FileOpenForTransmit opens file with the given filename and flags. On success,
|
||||
* the function returns the internal file handle for the opened file. On failure
|
||||
* the function errors out.
|
||||
* Open a file with FileOpenForTransmitPerm() and pass default file mode for
|
||||
* the fileMode parameter.
|
||||
*/
|
||||
File
|
||||
FileOpenForTransmit(const char *filename, int fileFlags, int fileMode)
|
||||
FileOpenForTransmit(const char *filename, int fileFlags)
|
||||
{
|
||||
return FileOpenForTransmitPerm(filename, fileFlags, pg_file_create_mode);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FileOpenForTransmitPerm opens file with the given filename and flags. On
|
||||
* success, the function returns the internal file handle for the opened file.
|
||||
* On failure the function errors out.
|
||||
*/
|
||||
File
|
||||
FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode)
|
||||
{
|
||||
struct stat fileStat;
|
||||
|
||||
|
|
|
@ -999,7 +999,7 @@ MarkObjectsDistributedCreateCommand(List *addresses,
|
|||
appendStringInfo(insertDistributedObjectsCommand, ") ");
|
||||
|
||||
appendStringInfo(insertDistributedObjectsCommand,
|
||||
"SELECT citus_internal_add_object_metadata("
|
||||
"SELECT citus_internal.add_object_metadata("
|
||||
"typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) "
|
||||
"FROM distributed_object_data;");
|
||||
|
||||
|
@ -1134,7 +1134,7 @@ DistributionCreateCommand(CitusTableCacheEntry *cacheEntry)
|
|||
}
|
||||
|
||||
appendStringInfo(insertDistributionCommand,
|
||||
"SELECT citus_internal_add_partition_metadata "
|
||||
"SELECT citus_internal.add_partition_metadata "
|
||||
"(%s::regclass, '%c', %s, %d, '%c')",
|
||||
quote_literal_cstr(qualifiedRelationName),
|
||||
distributionMethod,
|
||||
|
@ -1176,7 +1176,7 @@ DistributionDeleteMetadataCommand(Oid relationId)
|
|||
char *qualifiedRelationName = generate_qualified_relation_name(relationId);
|
||||
|
||||
appendStringInfo(deleteCommand,
|
||||
"SELECT pg_catalog.citus_internal_delete_partition_metadata(%s)",
|
||||
"SELECT citus_internal.delete_partition_metadata(%s)",
|
||||
quote_literal_cstr(qualifiedRelationName));
|
||||
|
||||
return deleteCommand->data;
|
||||
|
@ -1259,7 +1259,7 @@ ShardListInsertCommand(List *shardIntervalList)
|
|||
appendStringInfo(insertPlacementCommand, ") ");
|
||||
|
||||
appendStringInfo(insertPlacementCommand,
|
||||
"SELECT citus_internal_add_placement_metadata("
|
||||
"SELECT citus_internal.add_placement_metadata("
|
||||
"shardid, shardlength, groupid, placementid) "
|
||||
"FROM placement_data;");
|
||||
|
||||
|
@ -1315,7 +1315,7 @@ ShardListInsertCommand(List *shardIntervalList)
|
|||
appendStringInfo(insertShardCommand, ") ");
|
||||
|
||||
appendStringInfo(insertShardCommand,
|
||||
"SELECT citus_internal_add_shard_metadata(relationname, shardid, "
|
||||
"SELECT citus_internal.add_shard_metadata(relationname, shardid, "
|
||||
"storagetype, shardminvalue, shardmaxvalue) "
|
||||
"FROM shard_data;");
|
||||
|
||||
|
@ -1354,7 +1354,7 @@ ShardDeleteCommandList(ShardInterval *shardInterval)
|
|||
|
||||
StringInfo deleteShardCommand = makeStringInfo();
|
||||
appendStringInfo(deleteShardCommand,
|
||||
"SELECT citus_internal_delete_shard_metadata(%ld);", shardId);
|
||||
"SELECT citus_internal.delete_shard_metadata(%ld);", shardId);
|
||||
|
||||
return list_make1(deleteShardCommand->data);
|
||||
}
|
||||
|
@ -1424,7 +1424,7 @@ ColocationIdUpdateCommand(Oid relationId, uint32 colocationId)
|
|||
StringInfo command = makeStringInfo();
|
||||
char *qualifiedRelationName = generate_qualified_relation_name(relationId);
|
||||
appendStringInfo(command,
|
||||
"SELECT citus_internal_update_relation_colocation(%s::regclass, %d)",
|
||||
"SELECT citus_internal.update_relation_colocation(%s::regclass, %d)",
|
||||
quote_literal_cstr(qualifiedRelationName), colocationId);
|
||||
|
||||
return command->data;
|
||||
|
@ -4056,7 +4056,7 @@ citus_internal_database_command(PG_FUNCTION_ARGS)
|
|||
}
|
||||
else
|
||||
{
|
||||
ereport(ERROR, (errmsg("citus_internal_database_command() can only be used "
|
||||
ereport(ERROR, (errmsg("citus_internal.database_command() can only be used "
|
||||
"for CREATE DATABASE command by Citus.")));
|
||||
}
|
||||
|
||||
|
@ -4209,7 +4209,7 @@ ColocationGroupDeleteCommand(uint32 colocationId)
|
|||
StringInfo deleteColocationCommand = makeStringInfo();
|
||||
|
||||
appendStringInfo(deleteColocationCommand,
|
||||
"SELECT pg_catalog.citus_internal_delete_colocation_metadata(%d)",
|
||||
"SELECT citus_internal.delete_colocation_metadata(%d)",
|
||||
colocationId);
|
||||
|
||||
return deleteColocationCommand->data;
|
||||
|
@ -4225,7 +4225,7 @@ TenantSchemaInsertCommand(Oid schemaId, uint32 colocationId)
|
|||
{
|
||||
StringInfo command = makeStringInfo();
|
||||
appendStringInfo(command,
|
||||
"SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)",
|
||||
"SELECT citus_internal.add_tenant_schema(%s, %u)",
|
||||
RemoteSchemaIdExpressionById(schemaId), colocationId);
|
||||
|
||||
return command->data;
|
||||
|
@ -4241,7 +4241,7 @@ TenantSchemaDeleteCommand(char *schemaName)
|
|||
{
|
||||
StringInfo command = makeStringInfo();
|
||||
appendStringInfo(command,
|
||||
"SELECT pg_catalog.citus_internal_delete_tenant_schema(%s)",
|
||||
"SELECT citus_internal.delete_tenant_schema(%s)",
|
||||
RemoteSchemaIdExpressionByName(schemaName));
|
||||
|
||||
return command->data;
|
||||
|
@ -4258,7 +4258,7 @@ UpdateNoneDistTableMetadataCommand(Oid relationId, char replicationModel,
|
|||
{
|
||||
StringInfo command = makeStringInfo();
|
||||
appendStringInfo(command,
|
||||
"SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(%s, '%c', %u, %s)",
|
||||
"SELECT citus_internal.update_none_dist_table_metadata(%s, '%c', %u, %s)",
|
||||
RemoteTableIdExpression(relationId), replicationModel, colocationId,
|
||||
autoConverted ? "true" : "false");
|
||||
|
||||
|
@ -4276,7 +4276,7 @@ AddPlacementMetadataCommand(uint64 shardId, uint64 placementId,
|
|||
{
|
||||
StringInfo command = makeStringInfo();
|
||||
appendStringInfo(command,
|
||||
"SELECT citus_internal_add_placement_metadata(%ld, %ld, %d, %ld)",
|
||||
"SELECT citus_internal.add_placement_metadata(%ld, %ld, %d, %ld)",
|
||||
shardId, shardLength, groupId, placementId);
|
||||
return command->data;
|
||||
}
|
||||
|
@ -4291,7 +4291,7 @@ DeletePlacementMetadataCommand(uint64 placementId)
|
|||
{
|
||||
StringInfo command = makeStringInfo();
|
||||
appendStringInfo(command,
|
||||
"SELECT pg_catalog.citus_internal_delete_placement_metadata(%ld)",
|
||||
"SELECT citus_internal.delete_placement_metadata(%ld)",
|
||||
placementId);
|
||||
return command->data;
|
||||
}
|
||||
|
@ -4957,7 +4957,7 @@ SendTenantSchemaMetadataCommands(MetadataSyncContext *context)
|
|||
|
||||
StringInfo insertTenantSchemaCommand = makeStringInfo();
|
||||
appendStringInfo(insertTenantSchemaCommand,
|
||||
"SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)",
|
||||
"SELECT citus_internal.add_tenant_schema(%s, %u)",
|
||||
RemoteSchemaIdExpressionById(tenantSchemaForm->schemaid),
|
||||
tenantSchemaForm->colocationid);
|
||||
|
||||
|
|
|
@ -507,7 +507,13 @@ citus_disable_node(PG_FUNCTION_ARGS)
|
|||
{
|
||||
text *nodeNameText = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
bool synchronousDisableNode = PG_GETARG_BOOL(2);
|
||||
|
||||
bool synchronousDisableNode = 1;
|
||||
Assert(PG_NARGS() == 2 || PG_NARGS() == 3);
|
||||
if (PG_NARGS() == 3)
|
||||
{
|
||||
synchronousDisableNode = PG_GETARG_BOOL(2);
|
||||
}
|
||||
|
||||
char *nodeName = text_to_cstring(nodeNameText);
|
||||
WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort);
|
||||
|
@ -1692,7 +1698,7 @@ EnsureParentSessionHasExclusiveLockOnPgDistNode(pid_t parentSessionPid)
|
|||
if (!parentHasExclusiveLock)
|
||||
{
|
||||
ereport(ERROR, (errmsg("lock is not held by the caller. Unexpected caller "
|
||||
"for citus_internal_mark_node_not_synced")));
|
||||
"for citus_internal.mark_node_not_synced")));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -426,10 +426,9 @@ ExecuteDropShardPlacementCommandRemotely(ShardPlacement *shardPlacement,
|
|||
errdetail("Marking this shard placement for "
|
||||
"deletion")));
|
||||
|
||||
InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
shardRelationName,
|
||||
shardPlacement->groupId,
|
||||
CLEANUP_DEFERRED_ON_SUCCESS);
|
||||
InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
shardRelationName,
|
||||
shardPlacement->groupId);
|
||||
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -452,15 +452,15 @@ CompareCleanupRecordsByObjectType(const void *leftElement, const void *rightElem
|
|||
|
||||
|
||||
/*
|
||||
* InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry
|
||||
* InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry
|
||||
* as part of the current transaction. This is primarily useful for deferred drop scenarios,
|
||||
* since these records would roll back in case of operation failure.
|
||||
* since these records would roll back in case of operation failure. And for the same reason,
|
||||
* always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS.
|
||||
*/
|
||||
void
|
||||
InsertCleanupRecordInCurrentTransaction(CleanupObject objectType,
|
||||
char *objectName,
|
||||
int nodeGroupId,
|
||||
CleanupPolicy policy)
|
||||
InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType,
|
||||
char *objectName,
|
||||
int nodeGroupId)
|
||||
{
|
||||
/* We must have a valid OperationId. Any operation requring cleanup
|
||||
* will call RegisterOperationNeedingCleanup.
|
||||
|
@ -482,7 +482,8 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType,
|
|||
values[Anum_pg_dist_cleanup_object_type - 1] = Int32GetDatum(objectType);
|
||||
values[Anum_pg_dist_cleanup_object_name - 1] = CStringGetTextDatum(objectName);
|
||||
values[Anum_pg_dist_cleanup_node_group_id - 1] = Int32GetDatum(nodeGroupId);
|
||||
values[Anum_pg_dist_cleanup_policy_type - 1] = Int32GetDatum(policy);
|
||||
values[Anum_pg_dist_cleanup_policy_type - 1] =
|
||||
Int32GetDatum(CLEANUP_DEFERRED_ON_SUCCESS);
|
||||
|
||||
/* open cleanup relation and insert new tuple */
|
||||
Oid relationId = DistCleanupRelationId();
|
||||
|
@ -499,23 +500,27 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType,
|
|||
|
||||
|
||||
/*
|
||||
* InsertCleanupRecordInSubtransaction inserts a new pg_dist_cleanup entry in a
|
||||
* InsertCleanupRecordOutsideTransaction inserts a new pg_dist_cleanup entry in a
|
||||
* separate transaction to ensure the record persists after rollback. We should
|
||||
* delete these records if the operation completes successfully.
|
||||
*
|
||||
* For failure scenarios, use a subtransaction (direct insert via localhost).
|
||||
* This is used in scenarios where we need to cleanup resources on operation
|
||||
* completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE).
|
||||
*/
|
||||
void
|
||||
InsertCleanupRecordInSubtransaction(CleanupObject objectType,
|
||||
char *objectName,
|
||||
int nodeGroupId,
|
||||
CleanupPolicy policy)
|
||||
InsertCleanupRecordOutsideTransaction(CleanupObject objectType,
|
||||
char *objectName,
|
||||
int nodeGroupId,
|
||||
CleanupPolicy policy)
|
||||
{
|
||||
/* We must have a valid OperationId. Any operation requring cleanup
|
||||
* will call RegisterOperationNeedingCleanup.
|
||||
*/
|
||||
Assert(CurrentOperationId != INVALID_OPERATION_ID);
|
||||
|
||||
/* assert the circumstance noted in function comment */
|
||||
Assert(policy == CLEANUP_ALWAYS || policy == CLEANUP_ON_FAILURE);
|
||||
|
||||
StringInfo sequenceName = makeStringInfo();
|
||||
appendStringInfo(sequenceName, "%s.%s",
|
||||
PG_CATALOG,
|
||||
|
|
|
@ -733,11 +733,11 @@ CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList,
|
|||
workerPlacementNode->workerPort)));
|
||||
}
|
||||
|
||||
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
ConstructQualifiedShardName(
|
||||
shardInterval),
|
||||
workerPlacementNode->groupId,
|
||||
CLEANUP_ON_FAILURE);
|
||||
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
ConstructQualifiedShardName(
|
||||
shardInterval),
|
||||
workerPlacementNode->groupId,
|
||||
CLEANUP_ON_FAILURE);
|
||||
|
||||
/* Create new split child shard on the specified placement list */
|
||||
CreateObjectOnPlacement(splitShardCreationCommandList,
|
||||
|
@ -1314,7 +1314,7 @@ DropShardListMetadata(List *shardIntervalList)
|
|||
{
|
||||
ListCell *commandCell = NULL;
|
||||
|
||||
/* send the commands one by one (calls citus_internal_delete_shard_metadata internally) */
|
||||
/* send the commands one by one (calls citus_internal.delete_shard_metadata internally) */
|
||||
List *shardMetadataDeleteCommandList = ShardDeleteCommandList(shardInterval);
|
||||
foreach(commandCell, shardMetadataDeleteCommandList)
|
||||
{
|
||||
|
@ -1717,11 +1717,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList,
|
|||
/* Log shard in pg_dist_cleanup. Given dummy shards are transient resources,
|
||||
* we want to cleanup irrespective of operation success or failure.
|
||||
*/
|
||||
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
ConstructQualifiedShardName(
|
||||
shardInterval),
|
||||
workerPlacementNode->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
ConstructQualifiedShardName(
|
||||
shardInterval),
|
||||
workerPlacementNode->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
|
||||
/* Create dummy source shard on the specified placement list */
|
||||
CreateObjectOnPlacement(splitShardCreationCommandList,
|
||||
|
@ -1780,11 +1780,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList,
|
|||
/* Log shard in pg_dist_cleanup. Given dummy shards are transient resources,
|
||||
* we want to cleanup irrespective of operation success or failure.
|
||||
*/
|
||||
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
ConstructQualifiedShardName(
|
||||
shardInterval),
|
||||
sourceWorkerNode->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
ConstructQualifiedShardName(
|
||||
shardInterval),
|
||||
sourceWorkerNode->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
|
||||
/* Create dummy split child shard on source worker node */
|
||||
CreateObjectOnPlacement(splitShardCreationCommandList, sourceWorkerNode);
|
||||
|
|
|
@ -294,6 +294,17 @@ citus_move_shard_placement(PG_FUNCTION_ARGS)
|
|||
CheckCitusVersion(ERROR);
|
||||
EnsureCoordinator();
|
||||
|
||||
List *referenceTableIdList = NIL;
|
||||
|
||||
if (HasNodesWithMissingReferenceTables(&referenceTableIdList))
|
||||
{
|
||||
ereport(ERROR, (errmsg("there are missing reference tables on some nodes"),
|
||||
errhint("Copy reference tables first with "
|
||||
"replicate_reference_tables() or use "
|
||||
"citus_rebalance_start() that will do it automatically."
|
||||
)));
|
||||
}
|
||||
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
char *sourceNodeName = text_to_cstring(PG_GETARG_TEXT_P(1));
|
||||
int32 sourceNodePort = PG_GETARG_INT32(2);
|
||||
|
@ -593,10 +604,10 @@ InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList)
|
|||
* We also log cleanup record in the current transaction. If the current transaction rolls back,
|
||||
* we do not generate a record at all.
|
||||
*/
|
||||
InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
qualifiedShardName,
|
||||
placement->groupId,
|
||||
CLEANUP_DEFERRED_ON_SUCCESS);
|
||||
InsertCleanupOnSuccessRecordInCurrentTransaction(
|
||||
CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
qualifiedShardName,
|
||||
placement->groupId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -623,10 +634,9 @@ InsertCleanupRecordsForShardPlacementsOnNode(List *shardIntervalList,
|
|||
* We also log cleanup record in the current transaction. If the current transaction rolls back,
|
||||
* we do not generate a record at all.
|
||||
*/
|
||||
InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
qualifiedShardName,
|
||||
groupId,
|
||||
CLEANUP_DEFERRED_ON_SUCCESS);
|
||||
InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
qualifiedShardName,
|
||||
groupId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1382,10 +1392,11 @@ CopyShardTablesViaLogicalReplication(List *shardIntervalList, char *sourceNodeNa
|
|||
char *tableOwner = TableOwner(shardInterval->relationId);
|
||||
|
||||
/* drop the shard we created on the target, in case of failure */
|
||||
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
ConstructQualifiedShardName(shardInterval),
|
||||
GroupForNode(targetNodeName, targetNodePort),
|
||||
CLEANUP_ON_FAILURE);
|
||||
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
ConstructQualifiedShardName(shardInterval),
|
||||
GroupForNode(targetNodeName,
|
||||
targetNodePort),
|
||||
CLEANUP_ON_FAILURE);
|
||||
|
||||
SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort,
|
||||
tableOwner,
|
||||
|
@ -1455,10 +1466,11 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName,
|
|||
char *tableOwner = TableOwner(shardInterval->relationId);
|
||||
|
||||
/* drop the shard we created on the target, in case of failure */
|
||||
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
ConstructQualifiedShardName(shardInterval),
|
||||
GroupForNode(targetNodeName, targetNodePort),
|
||||
CLEANUP_ON_FAILURE);
|
||||
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
|
||||
ConstructQualifiedShardName(shardInterval),
|
||||
GroupForNode(targetNodeName,
|
||||
targetNodePort),
|
||||
CLEANUP_ON_FAILURE);
|
||||
|
||||
SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort,
|
||||
tableOwner, ddlCommandList);
|
||||
|
@ -2035,7 +2047,7 @@ UpdateColocatedShardPlacementMetadataOnWorkers(int64 shardId,
|
|||
StringInfo updateCommand = makeStringInfo();
|
||||
|
||||
appendStringInfo(updateCommand,
|
||||
"SELECT citus_internal_update_placement_metadata(%ld, %d, %d)",
|
||||
"SELECT citus_internal.update_placement_metadata(%ld, %d, %d)",
|
||||
colocatedShard->shardId,
|
||||
sourceGroupId, targetGroupId);
|
||||
SendCommandToWorkersWithMetadata(updateCommand->data);
|
||||
|
|
|
@ -1097,8 +1097,8 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext)
|
|||
if (query->hasRecursive)
|
||||
{
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"recursive CTEs are not supported in distributed "
|
||||
"queries",
|
||||
"recursive CTEs are only supported when they "
|
||||
"contain a filter on the distribution column",
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -1335,10 +1335,10 @@ CreatePublications(MultiConnection *connection,
|
|||
|
||||
WorkerNode *worker = FindWorkerNode(connection->hostname,
|
||||
connection->port);
|
||||
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_PUBLICATION,
|
||||
entry->name,
|
||||
worker->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_PUBLICATION,
|
||||
entry->name,
|
||||
worker->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
|
||||
ExecuteCriticalRemoteCommand(connection, DISABLE_DDL_PROPAGATION);
|
||||
ExecuteCriticalRemoteCommand(connection, createPublicationCommand->data);
|
||||
|
@ -1435,10 +1435,10 @@ CreateReplicationSlots(MultiConnection *sourceConnection,
|
|||
|
||||
WorkerNode *worker = FindWorkerNode(sourceConnection->hostname,
|
||||
sourceConnection->port);
|
||||
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_REPLICATION_SLOT,
|
||||
replicationSlot->name,
|
||||
worker->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_REPLICATION_SLOT,
|
||||
replicationSlot->name,
|
||||
worker->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
|
||||
if (!firstReplicationSlot)
|
||||
{
|
||||
|
@ -1506,10 +1506,10 @@ CreateSubscriptions(MultiConnection *sourceConnection,
|
|||
quote_identifier(GetUserNameFromId(ownerId, false))
|
||||
)));
|
||||
|
||||
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_USER,
|
||||
target->subscriptionOwnerName,
|
||||
worker->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_USER,
|
||||
target->subscriptionOwnerName,
|
||||
worker->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
|
||||
StringInfo conninfo = makeStringInfo();
|
||||
appendStringInfo(conninfo, "host='%s' port=%d user='%s' dbname='%s' "
|
||||
|
@ -1567,10 +1567,10 @@ CreateSubscriptions(MultiConnection *sourceConnection,
|
|||
pfree(createSubscriptionCommand->data);
|
||||
pfree(createSubscriptionCommand);
|
||||
|
||||
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SUBSCRIPTION,
|
||||
target->subscriptionName,
|
||||
worker->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SUBSCRIPTION,
|
||||
target->subscriptionName,
|
||||
worker->groupId,
|
||||
CLEANUP_ALWAYS);
|
||||
|
||||
ExecuteCriticalRemoteCommand(target->superuserConnection, psprintf(
|
||||
"ALTER SUBSCRIPTION %s OWNER TO %s",
|
||||
|
|
|
@ -895,22 +895,13 @@ DecrementExternalClientBackendCounterAtExit(int code, Datum arg)
|
|||
static void
|
||||
CreateRequiredDirectories(void)
|
||||
{
|
||||
const char *subdirs[] = {
|
||||
"pg_foreign_file",
|
||||
"pg_foreign_file/cached",
|
||||
("base/" PG_JOB_CACHE_DIR)
|
||||
};
|
||||
const char *subdir = ("base/" PG_JOB_CACHE_DIR);
|
||||
|
||||
for (int dirNo = 0; dirNo < lengthof(subdirs); dirNo++)
|
||||
if (MakePGDirectory(subdir) != 0 && errno != EEXIST)
|
||||
{
|
||||
int ret = mkdir(subdirs[dirNo], S_IRWXU);
|
||||
|
||||
if (ret != 0 && errno != EEXIST)
|
||||
{
|
||||
ereport(ERROR, (errcode_for_file_access(),
|
||||
errmsg("could not create directory \"%s\": %m",
|
||||
subdirs[dirNo])));
|
||||
}
|
||||
ereport(ERROR, (errcode_for_file_access(),
|
||||
errmsg("could not create directory \"%s\": %m",
|
||||
subdir)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,3 +28,28 @@ REVOKE ALL ON FUNCTION citus_internal.start_management_transaction FROM PUBLIC;
|
|||
|
||||
#include "udfs/citus_internal_add_colocation_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_unmark_object_distributed/12.2-1.sql"
|
||||
#include "udfs/citus_internal_add_object_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_internal_add_partition_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_internal_add_placement_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_internal_add_shard_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_internal_add_tenant_schema/12.2-1.sql"
|
||||
#include "udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql"
|
||||
#include "udfs/citus_internal_delete_colocation_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_internal_delete_partition_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_internal_delete_placement_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_internal_delete_shard_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_internal_delete_tenant_schema/12.2-1.sql"
|
||||
#include "udfs/citus_internal_local_blocked_processes/12.2-1.sql"
|
||||
#include "udfs/citus_internal_global_blocked_processes/12.2-1.sql"
|
||||
#include "udfs/citus_blocking_pids/12.2-1.sql"
|
||||
#include "udfs/citus_isolation_test_session_is_blocked/12.2-1.sql"
|
||||
DROP VIEW IF EXISTS pg_catalog.citus_lock_waits;
|
||||
#include "udfs/citus_lock_waits/12.2-1.sql"
|
||||
|
||||
#include "udfs/citus_internal_mark_node_not_synced/12.2-1.sql"
|
||||
#include "udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql"
|
||||
#include "udfs/citus_drop_trigger/12.2-1.sql"
|
||||
#include "udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_internal_update_placement_metadata/12.2-1.sql"
|
||||
#include "udfs/citus_internal_update_relation_colocation/12.2-1.sql"
|
||||
#include "udfs/repl_origin_helper/12.2-1.sql"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
-- citus--12.2-1--12.1-1
|
||||
|
||||
DROP FUNCTION pg_catalog.citus_internal_database_command(text);
|
||||
DROP FUNCTION citus_internal.database_command(text);
|
||||
DROP FUNCTION citus_internal.acquire_citus_advisory_object_class_lock(int, cstring);
|
||||
|
||||
#include "../udfs/citus_add_rebalance_strategy/10.1-1.sql"
|
||||
|
@ -24,6 +24,33 @@ ALTER TABLE pg_catalog.pg_dist_transaction DROP COLUMN outer_xid;
|
|||
REVOKE USAGE ON SCHEMA citus_internal FROM PUBLIC;
|
||||
|
||||
DROP FUNCTION citus_internal.add_colocation_metadata(int, int, int, regtype, oid);
|
||||
DROP FUNCTION citus_internal.add_object_metadata(text, text[], text[], integer, integer, boolean);
|
||||
DROP FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char");
|
||||
DROP FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint);
|
||||
DROP FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text);
|
||||
DROP FUNCTION citus_internal.add_tenant_schema(oid, integer);
|
||||
DROP FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock);
|
||||
DROP FUNCTION citus_internal.delete_colocation_metadata(int);
|
||||
DROP FUNCTION citus_internal.delete_partition_metadata(regclass);
|
||||
DROP FUNCTION citus_internal.delete_placement_metadata(bigint);
|
||||
DROP FUNCTION citus_internal.delete_shard_metadata(bigint);
|
||||
DROP FUNCTION citus_internal.delete_tenant_schema(oid);
|
||||
DROP FUNCTION citus_internal.local_blocked_processes();
|
||||
#include "../udfs/citus_blocking_pids/11.0-1.sql"
|
||||
#include "../udfs/citus_isolation_test_session_is_blocked/11.1-1.sql"
|
||||
DROP VIEW IF EXISTS pg_catalog.citus_lock_waits;
|
||||
#include "../udfs/citus_lock_waits/11.0-1.sql"
|
||||
DROP FUNCTION citus_internal.global_blocked_processes();
|
||||
|
||||
DROP FUNCTION citus_internal.mark_node_not_synced(int, int);
|
||||
DROP FUNCTION citus_internal.unregister_tenant_schema_globally(oid, text);
|
||||
#include "../udfs/citus_drop_trigger/12.0-1.sql"
|
||||
DROP FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean);
|
||||
DROP FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer);
|
||||
DROP FUNCTION citus_internal.update_relation_colocation(oid, int);
|
||||
DROP FUNCTION citus_internal.start_replication_origin_tracking();
|
||||
DROP FUNCTION citus_internal.stop_replication_origin_tracking();
|
||||
DROP FUNCTION citus_internal.is_replication_origin_tracking_active();
|
||||
|
||||
#include "../udfs/citus_internal_add_colocation_metadata/11.0-1.sql"
|
||||
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
DROP FUNCTION pg_catalog.citus_blocking_pids;
|
||||
CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer)
|
||||
RETURNS int4[] AS $$
|
||||
DECLARE
|
||||
mLocalBlockingPids int4[];
|
||||
mRemoteBlockingPids int4[];
|
||||
mLocalGlobalPid int8;
|
||||
BEGIN
|
||||
SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids;
|
||||
|
||||
IF (array_length(mLocalBlockingPids, 1) > 0) THEN
|
||||
RETURN mLocalBlockingPids;
|
||||
END IF;
|
||||
|
||||
-- pg says we're not blocked locally; check whether we're blocked globally.
|
||||
SELECT global_pid INTO mLocalGlobalPid
|
||||
FROM get_all_active_transactions() WHERE process_id = pBlockedPid;
|
||||
|
||||
SELECT array_agg(global_pid) INTO mRemoteBlockingPids FROM (
|
||||
WITH activeTransactions AS (
|
||||
SELECT global_pid FROM get_all_active_transactions()
|
||||
), blockingTransactions AS (
|
||||
SELECT blocking_global_pid FROM citus_internal.global_blocked_processes()
|
||||
WHERE waiting_global_pid = mLocalGlobalPid
|
||||
)
|
||||
SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions
|
||||
WHERE activeTransactions.global_pid = blockingTransactions.blocking_global_pid
|
||||
) AS sub;
|
||||
|
||||
RETURN mRemoteBlockingPids;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC;
|
|
@ -20,7 +20,7 @@ RETURNS int4[] AS $$
|
|||
WITH activeTransactions AS (
|
||||
SELECT global_pid FROM get_all_active_transactions()
|
||||
), blockingTransactions AS (
|
||||
SELECT blocking_global_pid FROM citus_internal_global_blocked_processes()
|
||||
SELECT blocking_global_pid FROM citus_internal.global_blocked_processes()
|
||||
WHERE waiting_global_pid = mLocalGlobalPid
|
||||
)
|
||||
SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $cdbdt$
|
||||
DECLARE
|
||||
constraint_event_count INTEGER;
|
||||
v_obj record;
|
||||
dropped_table_is_a_partition boolean := false;
|
||||
BEGIN
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
|
||||
WHERE object_type IN ('table', 'foreign table')
|
||||
LOOP
|
||||
-- first drop the table and metadata on the workers
|
||||
-- then drop all the shards on the workers
|
||||
-- finally remove the pg_dist_partition entry on the coordinator
|
||||
PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
|
||||
-- If both original and normal values are false, the dropped table was a partition
|
||||
-- that was dropped as a result of its parent being dropped
|
||||
-- NOTE: the other way around is not true:
|
||||
-- the table being a partition doesn't imply both original and normal values are false
|
||||
SELECT (v_obj.original = false AND v_obj.normal = false) INTO dropped_table_is_a_partition;
|
||||
|
||||
-- The partition's shards will be dropped when dropping the parent's shards, so we can skip:
|
||||
-- i.e. we call citus_drop_all_shards with drop_shards_metadata_only parameter set to true
|
||||
IF dropped_table_is_a_partition
|
||||
THEN
|
||||
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := true);
|
||||
ELSE
|
||||
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false);
|
||||
END IF;
|
||||
|
||||
PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
END LOOP;
|
||||
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
|
||||
LOOP
|
||||
-- Remove entries from pg_catalog.pg_dist_schema for all dropped tenant schemas.
|
||||
-- Also delete the corresponding colocation group from pg_catalog.pg_dist_colocation.
|
||||
--
|
||||
-- Although normally we automatically delete the colocation groups when they become empty,
|
||||
-- we don't do so for the colocation groups that are created for tenant schemas. For this
|
||||
-- reason, here we need to delete the colocation group when the tenant schema is dropped.
|
||||
IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid)
|
||||
THEN
|
||||
PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name);
|
||||
END IF;
|
||||
|
||||
-- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects
|
||||
PERFORM master_unmark_object_distributed(v_obj.classid, v_obj.objid, v_obj.objsubid);
|
||||
END LOOP;
|
||||
|
||||
SELECT COUNT(*) INTO constraint_event_count
|
||||
FROM pg_event_trigger_dropped_objects()
|
||||
WHERE object_type IN ('table constraint');
|
||||
|
||||
IF constraint_event_count > 0
|
||||
THEN
|
||||
-- Tell utility hook that a table constraint is dropped so we might
|
||||
-- need to undistribute some of the citus local tables that are not
|
||||
-- connected to any reference tables.
|
||||
PERFORM notify_constraint_dropped();
|
||||
END IF;
|
||||
END;
|
||||
$cdbdt$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_drop_trigger()
|
||||
IS 'perform checks and actions at the end of DROP actions';
|
|
@ -44,7 +44,7 @@ BEGIN
|
|||
-- reason, here we need to delete the colocation group when the tenant schema is dropped.
|
||||
IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid)
|
||||
THEN
|
||||
PERFORM pg_catalog.citus_internal_unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name);
|
||||
PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name);
|
||||
END IF;
|
||||
|
||||
-- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects
|
||||
|
|
|
@ -11,3 +11,17 @@ CREATE OR REPLACE FUNCTION citus_internal.add_colocation_metadata(
|
|||
|
||||
COMMENT ON FUNCTION citus_internal.add_colocation_metadata(int,int,int,regtype,oid) IS
|
||||
'Inserts a co-location group into pg_dist_colocation';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_colocation_metadata(
|
||||
colocation_id int,
|
||||
shard_count int,
|
||||
replication_factor int,
|
||||
distribution_column_type regtype,
|
||||
distribution_column_collation oid)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
STRICT
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_colocation_metadata(int,int,int,regtype,oid) IS
|
||||
'Inserts a co-location group into pg_dist_colocation';
|
||||
|
|
|
@ -11,3 +11,17 @@ CREATE OR REPLACE FUNCTION citus_internal.add_colocation_metadata(
|
|||
|
||||
COMMENT ON FUNCTION citus_internal.add_colocation_metadata(int,int,int,regtype,oid) IS
|
||||
'Inserts a co-location group into pg_dist_colocation';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_colocation_metadata(
|
||||
colocation_id int,
|
||||
shard_count int,
|
||||
replication_factor int,
|
||||
distribution_column_type regtype,
|
||||
distribution_column_collation oid)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
STRICT
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_colocation_metadata(int,int,int,regtype,oid) IS
|
||||
'Inserts a co-location group into pg_dist_colocation';
|
||||
|
|
29
src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/12.2-1.sql
generated
Normal file
29
src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,29 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.add_object_metadata(
|
||||
typeText text,
|
||||
objNames text[],
|
||||
objArgs text[],
|
||||
distribution_argument_index int,
|
||||
colocationid int,
|
||||
force_delegation bool)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_object_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.add_object_metadata(text,text[],text[],int,int,bool) IS
|
||||
'Inserts distributed object into pg_dist_object';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_object_metadata(
|
||||
typeText text,
|
||||
objNames text[],
|
||||
objArgs text[],
|
||||
distribution_argument_index int,
|
||||
colocationid int,
|
||||
force_delegation bool)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
STRICT
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_object_metadata(text,text[],text[],int,int,bool) IS
|
||||
'Inserts distributed object into pg_dist_object';
|
|
@ -1,3 +1,18 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.add_object_metadata(
|
||||
typeText text,
|
||||
objNames text[],
|
||||
objArgs text[],
|
||||
distribution_argument_index int,
|
||||
colocationid int,
|
||||
force_delegation bool)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_object_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.add_object_metadata(text,text[],text[],int,int,bool) IS
|
||||
'Inserts distributed object into pg_dist_object';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_object_metadata(
|
||||
typeText text,
|
||||
objNames text[],
|
||||
|
|
22
src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/12.2-1.sql
generated
Normal file
22
src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,22 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.add_partition_metadata(
|
||||
relation_id regclass, distribution_method "char",
|
||||
distribution_column text, colocation_id integer,
|
||||
replication_model "char")
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_partition_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char") IS
|
||||
'Inserts into pg_dist_partition with user checks';
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_partition_metadata(
|
||||
relation_id regclass, distribution_method "char",
|
||||
distribution_column text, colocation_id integer,
|
||||
replication_model "char")
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_partition_metadata(regclass, "char", text, integer, "char") IS
|
||||
'Inserts into pg_dist_partition with user checks';
|
|
@ -1,3 +1,15 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.add_partition_metadata(
|
||||
relation_id regclass, distribution_method "char",
|
||||
distribution_column text, colocation_id integer,
|
||||
replication_model "char")
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_partition_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char") IS
|
||||
'Inserts into pg_dist_partition with user checks';
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_partition_metadata(
|
||||
relation_id regclass, distribution_method "char",
|
||||
distribution_column text, colocation_id integer,
|
||||
|
|
36
src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql
generated
Normal file
36
src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,36 @@
|
|||
-- create a new function, without shardstate
|
||||
CREATE OR REPLACE FUNCTION citus_internal.add_placement_metadata(
|
||||
shard_id bigint,
|
||||
shard_length bigint, group_id integer,
|
||||
placement_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint) IS
|
||||
'Inserts into pg_dist_shard_placement with user checks';
|
||||
|
||||
-- create a new function, without shardstate
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata(
|
||||
shard_id bigint,
|
||||
shard_length bigint, group_id integer,
|
||||
placement_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, bigint, integer, bigint) IS
|
||||
'Inserts into pg_dist_shard_placement with user checks';
|
||||
|
||||
-- replace the old one so it would call the old C function with shard_state
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata(
|
||||
shard_id bigint, shard_state integer,
|
||||
shard_length bigint, group_id integer,
|
||||
placement_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata_legacy$$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, integer, bigint, integer, bigint) IS
|
||||
'Inserts into pg_dist_shard_placement with user checks';
|
||||
|
|
@ -1,3 +1,15 @@
|
|||
-- create a new function, without shardstate
|
||||
CREATE OR REPLACE FUNCTION citus_internal.add_placement_metadata(
|
||||
shard_id bigint,
|
||||
shard_length bigint, group_id integer,
|
||||
placement_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint) IS
|
||||
'Inserts into pg_dist_shard_placement with user checks';
|
||||
|
||||
-- create a new function, without shardstate
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata(
|
||||
shard_id bigint,
|
||||
|
|
21
src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql
generated
Normal file
21
src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,21 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.add_shard_metadata(
|
||||
relation_id regclass, shard_id bigint,
|
||||
storage_type "char", shard_min_value text,
|
||||
shard_max_value text
|
||||
)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$;
|
||||
COMMENT ON FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text) IS
|
||||
'Inserts into pg_dist_shard with user checks';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata(
|
||||
relation_id regclass, shard_id bigint,
|
||||
storage_type "char", shard_min_value text,
|
||||
shard_max_value text
|
||||
)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_shard_metadata(regclass, bigint, "char", text, text) IS
|
||||
'Inserts into pg_dist_shard with user checks';
|
|
@ -1,3 +1,14 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.add_shard_metadata(
|
||||
relation_id regclass, shard_id bigint,
|
||||
storage_type "char", shard_min_value text,
|
||||
shard_max_value text
|
||||
)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$;
|
||||
COMMENT ON FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text) IS
|
||||
'Inserts into pg_dist_shard with user checks';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata(
|
||||
relation_id regclass, shard_id bigint,
|
||||
storage_type "char", shard_min_value text,
|
||||
|
|
17
src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql
generated
Normal file
17
src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,17 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.add_tenant_schema(schema_id Oid, colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_tenant_schema$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.add_tenant_schema(Oid, int) IS
|
||||
'insert given tenant schema into pg_dist_schema with given colocation id';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_tenant_schema(Oid, int) IS
|
||||
'insert given tenant schema into pg_dist_schema with given colocation id';
|
|
@ -1,3 +1,12 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.add_tenant_schema(schema_id Oid, colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_add_tenant_schema$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.add_tenant_schema(Oid, int) IS
|
||||
'insert given tenant schema into pg_dist_schema with given colocation id';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
|
|
17
src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql
generated
Normal file
17
src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,17 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock)
|
||||
RETURNS void
|
||||
LANGUAGE C STABLE PARALLEL SAFE STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$;
|
||||
COMMENT ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock)
|
||||
IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster';
|
||||
|
||||
REVOKE ALL ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC;
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock)
|
||||
RETURNS void
|
||||
LANGUAGE C STABLE PARALLEL SAFE STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock)
|
||||
IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC;
|
|
@ -1,3 +1,12 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock)
|
||||
RETURNS void
|
||||
LANGUAGE C STABLE PARALLEL SAFE STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$;
|
||||
COMMENT ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock)
|
||||
IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster';
|
||||
|
||||
REVOKE ALL ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC;
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock)
|
||||
RETURNS void
|
||||
LANGUAGE C STABLE PARALLEL SAFE STRICT
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
--
|
||||
-- citus_internal_database_command run given database command without transaction block restriction.
|
||||
-- citus_internal.database_command run given database command without transaction block restriction.
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text)
|
||||
CREATE OR REPLACE FUNCTION citus_internal.database_command(command text)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_database_command$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS
|
||||
COMMENT ON FUNCTION citus_internal.database_command(text) IS
|
||||
'run a database command without transaction block restrictions';
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
--
|
||||
-- citus_internal_database_command run given database command without transaction block restriction.
|
||||
-- citus_internal.database_command run given database command without transaction block restriction.
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text)
|
||||
CREATE OR REPLACE FUNCTION citus_internal.database_command(command text)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_database_command$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS
|
||||
COMMENT ON FUNCTION citus_internal.database_command(text) IS
|
||||
'run a database command without transaction block restrictions';
|
||||
|
|
19
src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql
generated
Normal file
19
src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,19 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.delete_colocation_metadata(
|
||||
colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_delete_colocation_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.delete_colocation_metadata(int) IS
|
||||
'deletes a co-location group from pg_dist_colocation';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_colocation_metadata(
|
||||
colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
STRICT
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_colocation_metadata(int) IS
|
||||
'deletes a co-location group from pg_dist_colocation';
|
|
@ -1,3 +1,13 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.delete_colocation_metadata(
|
||||
colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_delete_colocation_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.delete_colocation_metadata(int) IS
|
||||
'deletes a co-location group from pg_dist_colocation';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_colocation_metadata(
|
||||
colocation_id int)
|
||||
RETURNS void
|
||||
|
|
14
src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql
generated
Normal file
14
src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,14 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.delete_partition_metadata(table_name regclass)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_delete_partition_metadata$$;
|
||||
COMMENT ON FUNCTION citus_internal.delete_partition_metadata(regclass) IS
|
||||
'Deletes a row from pg_dist_partition with table ownership checks';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_partition_metadata(table_name regclass)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_partition_metadata(regclass) IS
|
||||
'Deletes a row from pg_dist_partition with table ownership checks';
|
||||
|
|
@ -1,3 +1,10 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.delete_partition_metadata(table_name regclass)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_delete_partition_metadata$$;
|
||||
COMMENT ON FUNCTION citus_internal.delete_partition_metadata(regclass) IS
|
||||
'Deletes a row from pg_dist_partition with table ownership checks';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_partition_metadata(table_name regclass)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
|
|
19
src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql
generated
Normal file
19
src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,19 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.delete_placement_metadata(
|
||||
placement_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME',
|
||||
$$citus_internal_delete_placement_metadata$$;
|
||||
COMMENT ON FUNCTION citus_internal.delete_placement_metadata(bigint)
|
||||
IS 'Delete placement with given id from pg_dist_placement metadata table.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata(
|
||||
placement_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME',
|
||||
$$citus_internal_delete_placement_metadata$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_placement_metadata(bigint)
|
||||
IS 'Delete placement with given id from pg_dist_placement metadata table.';
|
|
@ -1,3 +1,13 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.delete_placement_metadata(
|
||||
placement_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME',
|
||||
$$citus_internal_delete_placement_metadata$$;
|
||||
COMMENT ON FUNCTION citus_internal.delete_placement_metadata(bigint)
|
||||
IS 'Delete placement with given id from pg_dist_placement metadata table.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata(
|
||||
placement_id bigint)
|
||||
RETURNS void
|
||||
|
|
14
src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql
generated
Normal file
14
src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,14 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.delete_shard_metadata(shard_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_delete_shard_metadata$$;
|
||||
COMMENT ON FUNCTION citus_internal.delete_shard_metadata(bigint) IS
|
||||
'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_shard_metadata(shard_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_shard_metadata(bigint) IS
|
||||
'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks';
|
||||
|
|
@ -1,3 +1,10 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.delete_shard_metadata(shard_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_delete_shard_metadata$$;
|
||||
COMMENT ON FUNCTION citus_internal.delete_shard_metadata(bigint) IS
|
||||
'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_shard_metadata(shard_id bigint)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
|
|
17
src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql
generated
Normal file
17
src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,17 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.delete_tenant_schema(schema_id Oid)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_delete_tenant_schema$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.delete_tenant_schema(Oid) IS
|
||||
'delete given tenant schema from pg_dist_schema';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_tenant_schema(Oid) IS
|
||||
'delete given tenant schema from pg_dist_schema';
|
|
@ -1,3 +1,12 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.delete_tenant_schema(schema_id Oid)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_delete_tenant_schema$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.delete_tenant_schema(Oid) IS
|
||||
'delete given tenant schema from pg_dist_schema';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
|
|
35
src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql
generated
Normal file
35
src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,35 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes(
|
||||
OUT waiting_global_pid int8,
|
||||
OUT waiting_pid int4,
|
||||
OUT waiting_node_id int4,
|
||||
OUT waiting_transaction_num int8,
|
||||
OUT waiting_transaction_stamp timestamptz,
|
||||
OUT blocking_global_pid int8,
|
||||
OUT blocking_pid int4,
|
||||
OUT blocking_node_id int4,
|
||||
OUT blocking_transaction_num int8,
|
||||
OUT blocking_transaction_stamp timestamptz,
|
||||
OUT blocking_transaction_waiting bool)
|
||||
RETURNS SETOF RECORD
|
||||
LANGUAGE C STRICT
|
||||
AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$;
|
||||
COMMENT ON FUNCTION citus_internal.global_blocked_processes()
|
||||
IS 'returns a global list of blocked backends originating from this node';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes(
|
||||
OUT waiting_global_pid int8,
|
||||
OUT waiting_pid int4,
|
||||
OUT waiting_node_id int4,
|
||||
OUT waiting_transaction_num int8,
|
||||
OUT waiting_transaction_stamp timestamptz,
|
||||
OUT blocking_global_pid int8,
|
||||
OUT blocking_pid int4,
|
||||
OUT blocking_node_id int4,
|
||||
OUT blocking_transaction_num int8,
|
||||
OUT blocking_transaction_stamp timestamptz,
|
||||
OUT blocking_transaction_waiting bool)
|
||||
RETURNS SETOF RECORD
|
||||
LANGUAGE C STRICT
|
||||
AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_global_blocked_processes()
|
||||
IS 'returns a global list of blocked backends originating from this node';
|
|
@ -1,3 +1,21 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes(
|
||||
OUT waiting_global_pid int8,
|
||||
OUT waiting_pid int4,
|
||||
OUT waiting_node_id int4,
|
||||
OUT waiting_transaction_num int8,
|
||||
OUT waiting_transaction_stamp timestamptz,
|
||||
OUT blocking_global_pid int8,
|
||||
OUT blocking_pid int4,
|
||||
OUT blocking_node_id int4,
|
||||
OUT blocking_transaction_num int8,
|
||||
OUT blocking_transaction_stamp timestamptz,
|
||||
OUT blocking_transaction_waiting bool)
|
||||
RETURNS SETOF RECORD
|
||||
LANGUAGE C STRICT
|
||||
AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$;
|
||||
COMMENT ON FUNCTION citus_internal.global_blocked_processes()
|
||||
IS 'returns a global list of blocked backends originating from this node';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes(
|
||||
OUT waiting_global_pid int8,
|
||||
OUT waiting_pid int4,
|
||||
|
|
35
src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql
generated
Normal file
35
src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,35 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes(
|
||||
OUT waiting_global_pid int8,
|
||||
OUT waiting_pid int4,
|
||||
OUT waiting_node_id int4,
|
||||
OUT waiting_transaction_num int8,
|
||||
OUT waiting_transaction_stamp timestamptz,
|
||||
OUT blocking_global_pid int8,
|
||||
OUT blocking_pid int4,
|
||||
OUT blocking_node_id int4,
|
||||
OUT blocking_transaction_num int8,
|
||||
OUT blocking_transaction_stamp timestamptz,
|
||||
OUT blocking_transaction_waiting bool)
|
||||
RETURNS SETOF RECORD
|
||||
LANGUAGE C STRICT
|
||||
AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$;
|
||||
COMMENT ON FUNCTION citus_internal.local_blocked_processes()
|
||||
IS 'returns all local lock wait chains, that start from any citus backend';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes(
|
||||
OUT waiting_global_pid int8,
|
||||
OUT waiting_pid int4,
|
||||
OUT waiting_node_id int4,
|
||||
OUT waiting_transaction_num int8,
|
||||
OUT waiting_transaction_stamp timestamptz,
|
||||
OUT blocking_global_pid int8,
|
||||
OUT blocking_pid int4,
|
||||
OUT blocking_node_id int4,
|
||||
OUT blocking_transaction_num int8,
|
||||
OUT blocking_transaction_stamp timestamptz,
|
||||
OUT blocking_transaction_waiting bool)
|
||||
RETURNS SETOF RECORD
|
||||
LANGUAGE C STRICT
|
||||
AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_local_blocked_processes()
|
||||
IS 'returns all local lock wait chains, that start from any citus backend';
|
|
@ -1,3 +1,21 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes(
|
||||
OUT waiting_global_pid int8,
|
||||
OUT waiting_pid int4,
|
||||
OUT waiting_node_id int4,
|
||||
OUT waiting_transaction_num int8,
|
||||
OUT waiting_transaction_stamp timestamptz,
|
||||
OUT blocking_global_pid int8,
|
||||
OUT blocking_pid int4,
|
||||
OUT blocking_node_id int4,
|
||||
OUT blocking_transaction_num int8,
|
||||
OUT blocking_transaction_stamp timestamptz,
|
||||
OUT blocking_transaction_waiting bool)
|
||||
RETURNS SETOF RECORD
|
||||
LANGUAGE C STRICT
|
||||
AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$;
|
||||
COMMENT ON FUNCTION citus_internal.local_blocked_processes()
|
||||
IS 'returns all local lock wait chains, that start from any citus backend';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes(
|
||||
OUT waiting_global_pid int8,
|
||||
OUT waiting_pid int4,
|
||||
|
|
13
src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql
generated
Normal file
13
src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,13 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int)
|
||||
RETURNS VOID
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$;
|
||||
COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int)
|
||||
IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int)
|
||||
RETURNS VOID
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$;
|
||||
COMMENT ON FUNCTION citus_internal_mark_node_not_synced(int, int)
|
||||
IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.';
|
|
@ -1,3 +1,10 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int)
|
||||
RETURNS VOID
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$;
|
||||
COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int)
|
||||
IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int)
|
||||
RETURNS VOID
|
||||
LANGUAGE C STRICT
|
||||
|
|
15
src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql
generated
Normal file
15
src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,15 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$;
|
||||
COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS
|
||||
'Delete a tenant schema and the corresponding colocation group from metadata tables.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS
|
||||
'Delete a tenant schema and the corresponding colocation group from metadata tables.';
|
|
@ -1,3 +1,11 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$;
|
||||
COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS
|
||||
'Delete a tenant schema and the corresponding colocation group from metadata tables.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
|
|
23
src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql
generated
Normal file
23
src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,23 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata(
|
||||
relation_id oid,
|
||||
replication_model "char",
|
||||
colocation_id bigint,
|
||||
auto_converted boolean)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$;
|
||||
COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean)
|
||||
IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(
|
||||
relation_id oid,
|
||||
replication_model "char",
|
||||
colocation_id bigint,
|
||||
auto_converted boolean)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(oid, "char", bigint, boolean)
|
||||
IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.';
|
|
@ -1,3 +1,15 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata(
|
||||
relation_id oid,
|
||||
replication_model "char",
|
||||
colocation_id bigint,
|
||||
auto_converted boolean)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$;
|
||||
COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean)
|
||||
IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(
|
||||
relation_id oid,
|
||||
replication_model "char",
|
||||
|
|
19
src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql
generated
Normal file
19
src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,19 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata(
|
||||
shard_id bigint, source_group_id integer,
|
||||
target_group_id integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS
|
||||
'Updates into pg_dist_placement with user checks';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata(
|
||||
shard_id bigint, source_group_id integer,
|
||||
target_group_id integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_update_placement_metadata(bigint, integer, integer) IS
|
||||
'Updates into pg_dist_placement with user checks';
|
|
@ -1,3 +1,13 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata(
|
||||
shard_id bigint, source_group_id integer,
|
||||
target_group_id integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$;
|
||||
|
||||
COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS
|
||||
'Updates into pg_dist_placement with user checks';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata(
|
||||
shard_id bigint, source_group_id integer,
|
||||
target_group_id integer)
|
||||
|
|
14
src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql
generated
Normal file
14
src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,14 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$;
|
||||
COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS
|
||||
'Updates colocationId field of pg_dist_partition for the relation_id';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_update_relation_colocation(oid, int) IS
|
||||
'Updates colocationId field of pg_dist_partition for the relation_id';
|
||||
|
|
@ -1,3 +1,10 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$;
|
||||
COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS
|
||||
'Updates colocationId field of pg_dist_partition for the relation_id';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
|
|
45
src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql
generated
Normal file
45
src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql
generated
Normal file
|
@ -0,0 +1,45 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[])
|
||||
RETURNS boolean AS $$
|
||||
DECLARE
|
||||
mBlockedGlobalPid int8;
|
||||
workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id');
|
||||
coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id');
|
||||
BEGIN
|
||||
IF pg_catalog.old_pg_isolation_test_session_is_blocked(pBlockedPid, pInterestingPids) THEN
|
||||
RETURN true;
|
||||
END IF;
|
||||
|
||||
-- pg says we're not blocked locally; check whether we're blocked globally.
|
||||
-- Note that worker process may be blocked or waiting for a lock. So we need to
|
||||
-- get transaction number for both of them. Following IF provides the transaction
|
||||
-- number when the worker process waiting for other session.
|
||||
IF EXISTS (SELECT 1 FROM get_global_active_transactions()
|
||||
WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN
|
||||
SELECT global_pid INTO mBlockedGlobalPid FROM get_global_active_transactions()
|
||||
WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId;
|
||||
ELSE
|
||||
-- Check whether transactions initiated from the coordinator get locked
|
||||
SELECT global_pid INTO mBlockedGlobalPid
|
||||
FROM get_all_active_transactions() WHERE process_id = pBlockedPid;
|
||||
END IF;
|
||||
|
||||
-- We convert the blocking_global_pid to a regular pid and only look at
|
||||
-- blocks caused by the interesting pids, or the workerProcessPid. If we
|
||||
-- don't do that we might find unrelated blocks caused by some random
|
||||
-- other processes that are not involved in this isolation test. Because we
|
||||
-- run our isolation tests on a single physical machine, the PID part of
|
||||
-- the GPID is known to be unique within the whole cluster.
|
||||
RETURN EXISTS (
|
||||
SELECT 1 FROM citus_internal.global_blocked_processes()
|
||||
WHERE waiting_global_pid = mBlockedGlobalPid
|
||||
AND (
|
||||
citus_pid_for_gpid(blocking_global_pid) in (
|
||||
select * from unnest(pInterestingPids)
|
||||
)
|
||||
OR citus_pid_for_gpid(blocking_global_pid) = workerProcessId
|
||||
)
|
||||
);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
REVOKE ALL ON FUNCTION citus_isolation_test_session_is_blocked(integer,integer[]) FROM PUBLIC;
|
|
@ -30,7 +30,7 @@ RETURNS boolean AS $$
|
|||
-- run our isolation tests on a single physical machine, the PID part of
|
||||
-- the GPID is known to be unique within the whole cluster.
|
||||
RETURN EXISTS (
|
||||
SELECT 1 FROM citus_internal_global_blocked_processes()
|
||||
SELECT 1 FROM citus_internal.global_blocked_processes()
|
||||
WHERE waiting_global_pid = mBlockedGlobalPid
|
||||
AND (
|
||||
citus_pid_for_gpid(blocking_global_pid) in (
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
SET search_path = 'pg_catalog';
|
||||
|
||||
CREATE VIEW citus.citus_lock_waits AS
|
||||
WITH
|
||||
unique_global_wait_edges_with_calculated_gpids AS (
|
||||
SELECT
|
||||
-- if global_pid is NULL, it is most likely that a backend is blocked on a DDL
|
||||
-- also for legacy reasons citus_internal.global_blocked_processes() returns groupId, we replace that with nodeIds
|
||||
case WHEN waiting_global_pid !=0 THEN waiting_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(waiting_node_id), waiting_pid) END waiting_global_pid,
|
||||
case WHEN blocking_global_pid !=0 THEN blocking_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(blocking_node_id), blocking_pid) END blocking_global_pid,
|
||||
|
||||
-- citus_internal.global_blocked_processes returns groupId, we replace it here with actual
|
||||
-- nodeId to be consisten with the other views
|
||||
get_nodeid_for_groupid(blocking_node_id) as blocking_node_id,
|
||||
get_nodeid_for_groupid(waiting_node_id) as waiting_node_id,
|
||||
|
||||
blocking_transaction_waiting
|
||||
|
||||
FROM citus_internal.global_blocked_processes()
|
||||
),
|
||||
unique_global_wait_edges AS
|
||||
(
|
||||
SELECT DISTINCT ON(waiting_global_pid, blocking_global_pid) * FROM unique_global_wait_edges_with_calculated_gpids
|
||||
),
|
||||
citus_dist_stat_activity_with_calculated_gpids AS
|
||||
(
|
||||
-- if global_pid is NULL, it is most likely that a backend is blocked on a DDL
|
||||
SELECT CASE WHEN global_pid != 0 THEN global_pid ELSE citus_calculate_gpid(nodeid, pid) END global_pid, nodeid, pid, query FROM citus_dist_stat_activity
|
||||
)
|
||||
SELECT
|
||||
waiting.global_pid as waiting_gpid,
|
||||
blocking.global_pid as blocking_gpid,
|
||||
waiting.query AS blocked_statement,
|
||||
blocking.query AS current_statement_in_blocking_process,
|
||||
waiting.nodeid AS waiting_nodeid,
|
||||
blocking.nodeid AS blocking_nodeid
|
||||
FROM
|
||||
unique_global_wait_edges
|
||||
JOIN
|
||||
citus_dist_stat_activity_with_calculated_gpids waiting ON (unique_global_wait_edges.waiting_global_pid = waiting.global_pid)
|
||||
JOIN
|
||||
citus_dist_stat_activity_with_calculated_gpids blocking ON (unique_global_wait_edges.blocking_global_pid = blocking.global_pid);
|
||||
|
||||
ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog;
|
||||
GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC;
|
||||
|
||||
RESET search_path;
|
|
@ -5,18 +5,18 @@ WITH
|
|||
unique_global_wait_edges_with_calculated_gpids AS (
|
||||
SELECT
|
||||
-- if global_pid is NULL, it is most likely that a backend is blocked on a DDL
|
||||
-- also for legacy reasons citus_internal_global_blocked_processes() returns groupId, we replace that with nodeIds
|
||||
-- also for legacy reasons citus_internal.global_blocked_processes() returns groupId, we replace that with nodeIds
|
||||
case WHEN waiting_global_pid !=0 THEN waiting_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(waiting_node_id), waiting_pid) END waiting_global_pid,
|
||||
case WHEN blocking_global_pid !=0 THEN blocking_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(blocking_node_id), blocking_pid) END blocking_global_pid,
|
||||
|
||||
-- citus_internal_global_blocked_processes returns groupId, we replace it here with actual
|
||||
-- citus_internal.global_blocked_processes returns groupId, we replace it here with actual
|
||||
-- nodeId to be consisten with the other views
|
||||
get_nodeid_for_groupid(blocking_node_id) as blocking_node_id,
|
||||
get_nodeid_for_groupid(waiting_node_id) as waiting_node_id,
|
||||
|
||||
blocking_transaction_waiting
|
||||
|
||||
FROM citus_internal_global_blocked_processes()
|
||||
FROM citus_internal.global_blocked_processes()
|
||||
),
|
||||
unique_global_wait_edges AS
|
||||
(
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.start_replication_origin_tracking()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$;
|
||||
COMMENT ON FUNCTION citus_internal.start_replication_origin_tracking()
|
||||
IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC';
|
||||
|
||||
CREATE OR REPLACE FUNCTION citus_internal.stop_replication_origin_tracking()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$;
|
||||
COMMENT ON FUNCTION citus_internal.stop_replication_origin_tracking()
|
||||
IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC';
|
||||
|
||||
CREATE OR REPLACE FUNCTION citus_internal.is_replication_origin_tracking_active()
|
||||
RETURNS boolean
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$;
|
||||
COMMENT ON FUNCTION citus_internal.is_replication_origin_tracking_active()
|
||||
IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking()
|
||||
IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_stop_replication_origin_tracking()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_stop_replication_origin_tracking()
|
||||
IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_is_replication_origin_tracking_active()
|
||||
RETURNS boolean
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_is_replication_origin_tracking_active()
|
||||
IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC';
|
|
@ -1,3 +1,24 @@
|
|||
CREATE OR REPLACE FUNCTION citus_internal.start_replication_origin_tracking()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$;
|
||||
COMMENT ON FUNCTION citus_internal.start_replication_origin_tracking()
|
||||
IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC';
|
||||
|
||||
CREATE OR REPLACE FUNCTION citus_internal.stop_replication_origin_tracking()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$;
|
||||
COMMENT ON FUNCTION citus_internal.stop_replication_origin_tracking()
|
||||
IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC';
|
||||
|
||||
CREATE OR REPLACE FUNCTION citus_internal.is_replication_origin_tracking_active()
|
||||
RETURNS boolean
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$;
|
||||
COMMENT ON FUNCTION citus_internal.is_replication_origin_tracking_active()
|
||||
IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
|
|
|
@ -192,7 +192,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx)
|
|||
"waiting_node_id, waiting_transaction_num, waiting_transaction_stamp, "
|
||||
"blocking_global_pid,blocking_pid, blocking_node_id, "
|
||||
"blocking_transaction_num, blocking_transaction_stamp, blocking_transaction_waiting "
|
||||
"FROM citus_internal_local_blocked_processes()");
|
||||
"FROM citus_internal.local_blocked_processes()");
|
||||
}
|
||||
|
||||
int querySent = SendRemoteCommand(connection, queryString->data);
|
||||
|
@ -226,7 +226,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx)
|
|||
else if (!onlyDistributedTx && colCount != 11)
|
||||
{
|
||||
ereport(WARNING, (errmsg("unexpected number of columns from "
|
||||
"citus_internal_local_blocked_processes")));
|
||||
"citus_internal.local_blocked_processes")));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -107,6 +107,12 @@ bool IsMainDB = true;
|
|||
*/
|
||||
char *SuperuserRole = NULL;
|
||||
|
||||
/*
|
||||
* IsMainDBCommandInXact shows if the query sent to the main database requires
|
||||
* a transaction
|
||||
*/
|
||||
bool IsMainDBCommandInXact = true;
|
||||
|
||||
|
||||
/*
|
||||
* start_management_transaction starts a management transaction
|
||||
|
@ -190,7 +196,11 @@ RunCitusMainDBQuery(char *query)
|
|||
PostPortNumber,
|
||||
SuperuserRole,
|
||||
MainDb);
|
||||
RemoteTransactionBegin(MainDBConnection);
|
||||
|
||||
if (IsMainDBCommandInXact)
|
||||
{
|
||||
RemoteTransactionBegin(MainDBConnection);
|
||||
}
|
||||
}
|
||||
|
||||
SendRemoteCommand(MainDBConnection, query);
|
||||
|
|
|
@ -333,7 +333,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg)
|
|||
* If this is a non-Citus main database we should try to commit the prepared
|
||||
* transactions created by the Citus main database on the worker nodes.
|
||||
*/
|
||||
if (!IsMainDB && MainDBConnection != NULL)
|
||||
if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact)
|
||||
{
|
||||
RunCitusMainDBQuery(COMMIT_MANAGEMENT_COMMAND_2PC);
|
||||
CleanCitusMainDBConnection();
|
||||
|
@ -533,7 +533,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg)
|
|||
* main database query. So if some error happens on the distributed main
|
||||
* database query we wouldn't have committed the current query.
|
||||
*/
|
||||
if (!IsMainDB && MainDBConnection != NULL)
|
||||
if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact)
|
||||
{
|
||||
RunCitusMainDBQuery("COMMIT");
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ static bool FileIsLink(const char *filename, struct stat filestat);
|
|||
void
|
||||
CitusCreateDirectory(StringInfo directoryName)
|
||||
{
|
||||
int makeOK = mkdir(directoryName->data, S_IRWXU);
|
||||
int makeOK = MakePGDirectory(directoryName->data);
|
||||
if (makeOK != 0)
|
||||
{
|
||||
ereport(ERROR, (errcode_for_file_access(),
|
||||
|
|
|
@ -186,7 +186,7 @@ SetupReplicationOriginRemoteSession(MultiConnection *connection)
|
|||
{
|
||||
StringInfo replicationOriginSessionSetupQuery = makeStringInfo();
|
||||
appendStringInfo(replicationOriginSessionSetupQuery,
|
||||
"select pg_catalog.citus_internal_start_replication_origin_tracking();");
|
||||
"select citus_internal.start_replication_origin_tracking();");
|
||||
ExecuteCriticalRemoteCommand(connection,
|
||||
replicationOriginSessionSetupQuery->data);
|
||||
connection->isReplicationOriginSessionSetup = true;
|
||||
|
@ -205,7 +205,7 @@ ResetReplicationOriginRemoteSession(MultiConnection *connection)
|
|||
{
|
||||
StringInfo replicationOriginSessionResetQuery = makeStringInfo();
|
||||
appendStringInfo(replicationOriginSessionResetQuery,
|
||||
"select pg_catalog.citus_internal_stop_replication_origin_tracking();");
|
||||
"select citus_internal.stop_replication_origin_tracking();");
|
||||
ExecuteCriticalRemoteCommand(connection,
|
||||
replicationOriginSessionResetQuery->data);
|
||||
connection->isReplicationOriginSessionSetup = false;
|
||||
|
@ -229,7 +229,7 @@ IsRemoteReplicationOriginSessionSetup(MultiConnection *connection)
|
|||
|
||||
StringInfo isReplicationOriginSessionSetupQuery = makeStringInfo();
|
||||
appendStringInfo(isReplicationOriginSessionSetupQuery,
|
||||
"SELECT pg_catalog.citus_internal_is_replication_origin_tracking_active()");
|
||||
"SELECT citus_internal.is_replication_origin_tracking_active()");
|
||||
bool result =
|
||||
ExecuteRemoteCommandAndCheckResult(connection,
|
||||
isReplicationOriginSessionSetupQuery->data,
|
||||
|
|
|
@ -126,7 +126,6 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
const char *nullPrintCharacter = "\\N";
|
||||
|
||||
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
|
||||
const int fileMode = (S_IRUSR | S_IWUSR);
|
||||
|
||||
/* use the memory context that was in place when the DestReceiver was created */
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(taskFileDest->memoryContext);
|
||||
|
@ -148,8 +147,7 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
|
||||
taskFileDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(
|
||||
taskFileDest->filePath,
|
||||
fileFlags,
|
||||
fileMode));
|
||||
fileFlags));
|
||||
|
||||
if (copyOutState->binary)
|
||||
{
|
||||
|
|
|
@ -189,7 +189,7 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context);
|
|||
#define WORKER_DROP_ALL_SHELL_TABLES \
|
||||
"CALL pg_catalog.worker_drop_all_shell_tables(%s)"
|
||||
#define CITUS_INTERNAL_MARK_NODE_NOT_SYNCED \
|
||||
"SELECT citus_internal_mark_node_not_synced(%d, %d)"
|
||||
"SELECT citus_internal.mark_node_not_synced(%d, %d)"
|
||||
|
||||
#define REMOVE_ALL_CITUS_TABLES_COMMAND \
|
||||
"SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition"
|
||||
|
|
|
@ -152,5 +152,6 @@ extern bool IsMainDB;
|
|||
extern char *SuperuserRole;
|
||||
extern char *MainDb;
|
||||
extern struct MultiConnection *MainDBConnection;
|
||||
extern bool IsMainDBCommandInXact;
|
||||
|
||||
#endif /* REMOTE_TRANSACTION_H */
|
||||
|
|
|
@ -81,16 +81,16 @@ typedef enum CleanupPolicy
|
|||
extern OperationId RegisterOperationNeedingCleanup(void);
|
||||
|
||||
/*
|
||||
* InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry
|
||||
* InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry
|
||||
* as part of the current transaction.
|
||||
*
|
||||
* This is primarily useful for deferred cleanup (CLEANUP_DEFERRED_ON_SUCCESS)
|
||||
* scenarios, since the records would roll back in case of failure.
|
||||
* scenarios, since the records would roll back in case of failure. And for the
|
||||
* same reason, always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS.
|
||||
*/
|
||||
extern void InsertCleanupRecordInCurrentTransaction(CleanupObject objectType,
|
||||
char *objectName,
|
||||
int nodeGroupId,
|
||||
CleanupPolicy policy);
|
||||
extern void InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType,
|
||||
char *objectName,
|
||||
int nodeGroupId);
|
||||
|
||||
/*
|
||||
* InsertCleanupRecordInSeparateTransaction inserts a new pg_dist_cleanup entry
|
||||
|
@ -99,10 +99,10 @@ extern void InsertCleanupRecordInCurrentTransaction(CleanupObject objectType,
|
|||
* This is used in scenarios where we need to cleanup resources on operation
|
||||
* completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE).
|
||||
*/
|
||||
extern void InsertCleanupRecordInSubtransaction(CleanupObject objectType,
|
||||
char *objectName,
|
||||
int nodeGroupId,
|
||||
CleanupPolicy policy);
|
||||
extern void InsertCleanupRecordOutsideTransaction(CleanupObject objectType,
|
||||
char *objectName,
|
||||
int nodeGroupId,
|
||||
CleanupPolicy policy);
|
||||
|
||||
/*
|
||||
* FinalizeOperationNeedingCleanupOnSuccess is be called by an operation to signal
|
||||
|
|
|
@ -21,7 +21,8 @@
|
|||
/* Function declarations for transmitting files between two nodes */
|
||||
extern void RedirectCopyDataToRegularFile(const char *filename);
|
||||
extern void SendRegularFile(const char *filename);
|
||||
extern File FileOpenForTransmit(const char *filename, int fileFlags, int fileMode);
|
||||
extern File FileOpenForTransmit(const char *filename, int fileFlags);
|
||||
extern File FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode);
|
||||
|
||||
|
||||
#endif /* TRANSMIT_H */
|
||||
|
|
|
@ -16,6 +16,7 @@ pytest-timeout = "*"
|
|||
pytest-xdist = "*"
|
||||
pytest-repeat = "*"
|
||||
pyyaml = "*"
|
||||
werkzeug = "==2.3.7"
|
||||
|
||||
[dev-packages]
|
||||
black = "*"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "b92bf682aeeea1a66a16beaf78584a5318fd0ae908ce85c7e2a4807aa2bee532"
|
||||
"sha256": "bf20354a2d9c93d46041ac4c6fa427588ebfe29343ea0b02138b9079f2d82f18"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
|
@ -119,11 +119,11 @@
|
|||
},
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082",
|
||||
"sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"
|
||||
"sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f",
|
||||
"sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"
|
||||
],
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==2023.7.22"
|
||||
"version": "==2024.2.2"
|
||||
},
|
||||
"cffi": {
|
||||
"hashes": [
|
||||
|
@ -180,7 +180,7 @@
|
|||
"sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956",
|
||||
"sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"
|
||||
],
|
||||
"markers": "python_version >= '3.8'",
|
||||
"markers": "platform_python_implementation != 'PyPy'",
|
||||
"version": "==1.16.0"
|
||||
},
|
||||
"click": {
|
||||
|
@ -200,33 +200,42 @@
|
|||
},
|
||||
"cryptography": {
|
||||
"hashes": [
|
||||
"sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67",
|
||||
"sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311",
|
||||
"sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8",
|
||||
"sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13",
|
||||
"sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143",
|
||||
"sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f",
|
||||
"sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829",
|
||||
"sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd",
|
||||
"sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397",
|
||||
"sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac",
|
||||
"sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d",
|
||||
"sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a",
|
||||
"sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839",
|
||||
"sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e",
|
||||
"sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6",
|
||||
"sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9",
|
||||
"sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860",
|
||||
"sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca",
|
||||
"sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91",
|
||||
"sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d",
|
||||
"sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714",
|
||||
"sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb",
|
||||
"sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f"
|
||||
"sha256:04859aa7f12c2b5f7e22d25198ddd537391f1695df7057c8700f71f26f47a129",
|
||||
"sha256:069d2ce9be5526a44093a0991c450fe9906cdf069e0e7cd67d9dee49a62b9ebe",
|
||||
"sha256:0d3ec384058b642f7fb7e7bff9664030011ed1af8f852540c76a1317a9dd0d20",
|
||||
"sha256:0fab2a5c479b360e5e0ea9f654bcebb535e3aa1e493a715b13244f4e07ea8eec",
|
||||
"sha256:0fea01527d4fb22ffe38cd98951c9044400f6eff4788cf52ae116e27d30a1ba3",
|
||||
"sha256:1b797099d221df7cce5ff2a1d272761d1554ddf9a987d3e11f6459b38cd300fd",
|
||||
"sha256:1e935c2900fb53d31f491c0de04f41110351377be19d83d908c1fd502ae8daa5",
|
||||
"sha256:20100c22b298c9eaebe4f0b9032ea97186ac2555f426c3e70670f2517989543b",
|
||||
"sha256:20180da1b508f4aefc101cebc14c57043a02b355d1a652b6e8e537967f1e1b46",
|
||||
"sha256:25b09b73db78facdfd7dd0fa77a3f19e94896197c86e9f6dc16bce7b37a96504",
|
||||
"sha256:2619487f37da18d6826e27854a7f9d4d013c51eafb066c80d09c63cf24505306",
|
||||
"sha256:2eb6368d5327d6455f20327fb6159b97538820355ec00f8cc9464d617caecead",
|
||||
"sha256:35772a6cffd1f59b85cb670f12faba05513446f80352fe811689b4e439b5d89e",
|
||||
"sha256:39d5c93e95bcbc4c06313fc6a500cee414ee39b616b55320c1904760ad686938",
|
||||
"sha256:3d96ea47ce6d0055d5b97e761d37b4e84195485cb5a38401be341fabf23bc32a",
|
||||
"sha256:4dcab7c25e48fc09a73c3e463d09ac902a932a0f8d0c568238b3696d06bf377b",
|
||||
"sha256:5fbf0f3f0fac7c089308bd771d2c6c7b7d53ae909dce1db52d8e921f6c19bb3a",
|
||||
"sha256:6c25e1e9c2ce682d01fc5e2dde6598f7313027343bd14f4049b82ad0402e52cd",
|
||||
"sha256:762f3771ae40e111d78d77cbe9c1035e886ac04a234d3ee0856bf4ecb3749d54",
|
||||
"sha256:90147dad8c22d64b2ff7331f8d4cddfdc3ee93e4879796f837bdbb2a0b141e0c",
|
||||
"sha256:935cca25d35dda9e7bd46a24831dfd255307c55a07ff38fd1a92119cffc34857",
|
||||
"sha256:93fbee08c48e63d5d1b39ab56fd3fdd02e6c2431c3da0f4edaf54954744c718f",
|
||||
"sha256:9541c69c62d7446539f2c1c06d7046aef822940d248fa4b8962ff0302862cc1f",
|
||||
"sha256:c23f03cfd7d9826cdcbad7850de67e18b4654179e01fe9bc623d37c2638eb4ef",
|
||||
"sha256:c3d1f5a1d403a8e640fa0887e9f7087331abb3f33b0f2207d2cc7f213e4a864c",
|
||||
"sha256:d1998e545081da0ab276bcb4b33cce85f775adb86a516e8f55b3dac87f469548",
|
||||
"sha256:d5cf11bc7f0b71fb71af26af396c83dfd3f6eed56d4b6ef95d57867bf1e4ba65",
|
||||
"sha256:db0480ffbfb1193ac4e1e88239f31314fe4c6cdcf9c0b8712b55414afbf80db4",
|
||||
"sha256:de4ae486041878dc46e571a4c70ba337ed5233a1344c14a0790c4c4be4bbb8b4",
|
||||
"sha256:de5086cd475d67113ccb6f9fae6d8fe3ac54a4f9238fd08bfdb07b03d791ff0a",
|
||||
"sha256:df34312149b495d9d03492ce97471234fd9037aa5ba217c2a6ea890e9166f151",
|
||||
"sha256:ead69ba488f806fe1b1b4050febafdbf206b81fa476126f3e16110c818bac396"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==41.0.4"
|
||||
"version": "==42.0.3"
|
||||
},
|
||||
"docopt": {
|
||||
"hashes": [
|
||||
|
@ -237,11 +246,11 @@
|
|||
},
|
||||
"exceptiongroup": {
|
||||
"hashes": [
|
||||
"sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9",
|
||||
"sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"
|
||||
"sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14",
|
||||
"sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"
|
||||
],
|
||||
"markers": "python_version < '3.11'",
|
||||
"version": "==1.1.3"
|
||||
"version": "==1.2.0"
|
||||
},
|
||||
"execnet": {
|
||||
"hashes": [
|
||||
|
@ -253,12 +262,12 @@
|
|||
},
|
||||
"filelock": {
|
||||
"hashes": [
|
||||
"sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4",
|
||||
"sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd"
|
||||
"sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e",
|
||||
"sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==3.12.4"
|
||||
"version": "==3.13.1"
|
||||
},
|
||||
"flask": {
|
||||
"hashes": [
|
||||
|
@ -318,11 +327,11 @@
|
|||
},
|
||||
"jinja2": {
|
||||
"hashes": [
|
||||
"sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852",
|
||||
"sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"
|
||||
"sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa",
|
||||
"sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"
|
||||
],
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==3.1.2"
|
||||
"version": "==3.1.3"
|
||||
},
|
||||
"kaitaistruct": {
|
||||
"hashes": [
|
||||
|
@ -342,69 +351,69 @@
|
|||
},
|
||||
"markupsafe": {
|
||||
"hashes": [
|
||||
"sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e",
|
||||
"sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e",
|
||||
"sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431",
|
||||
"sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686",
|
||||
"sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c",
|
||||
"sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559",
|
||||
"sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc",
|
||||
"sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb",
|
||||
"sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939",
|
||||
"sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c",
|
||||
"sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0",
|
||||
"sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4",
|
||||
"sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9",
|
||||
"sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575",
|
||||
"sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba",
|
||||
"sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d",
|
||||
"sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd",
|
||||
"sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3",
|
||||
"sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00",
|
||||
"sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155",
|
||||
"sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac",
|
||||
"sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52",
|
||||
"sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f",
|
||||
"sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8",
|
||||
"sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b",
|
||||
"sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007",
|
||||
"sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24",
|
||||
"sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea",
|
||||
"sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198",
|
||||
"sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0",
|
||||
"sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee",
|
||||
"sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be",
|
||||
"sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2",
|
||||
"sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1",
|
||||
"sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707",
|
||||
"sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6",
|
||||
"sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c",
|
||||
"sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58",
|
||||
"sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823",
|
||||
"sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779",
|
||||
"sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636",
|
||||
"sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c",
|
||||
"sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad",
|
||||
"sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee",
|
||||
"sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc",
|
||||
"sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2",
|
||||
"sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48",
|
||||
"sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7",
|
||||
"sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e",
|
||||
"sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b",
|
||||
"sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa",
|
||||
"sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5",
|
||||
"sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e",
|
||||
"sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb",
|
||||
"sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9",
|
||||
"sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57",
|
||||
"sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc",
|
||||
"sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc",
|
||||
"sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2",
|
||||
"sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"
|
||||
"sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf",
|
||||
"sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff",
|
||||
"sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f",
|
||||
"sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3",
|
||||
"sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532",
|
||||
"sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f",
|
||||
"sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617",
|
||||
"sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df",
|
||||
"sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4",
|
||||
"sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906",
|
||||
"sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f",
|
||||
"sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4",
|
||||
"sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8",
|
||||
"sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371",
|
||||
"sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2",
|
||||
"sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465",
|
||||
"sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52",
|
||||
"sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6",
|
||||
"sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169",
|
||||
"sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad",
|
||||
"sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2",
|
||||
"sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0",
|
||||
"sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029",
|
||||
"sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f",
|
||||
"sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a",
|
||||
"sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced",
|
||||
"sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5",
|
||||
"sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c",
|
||||
"sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf",
|
||||
"sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9",
|
||||
"sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb",
|
||||
"sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad",
|
||||
"sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3",
|
||||
"sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1",
|
||||
"sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46",
|
||||
"sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc",
|
||||
"sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a",
|
||||
"sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee",
|
||||
"sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900",
|
||||
"sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5",
|
||||
"sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea",
|
||||
"sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f",
|
||||
"sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5",
|
||||
"sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e",
|
||||
"sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a",
|
||||
"sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f",
|
||||
"sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50",
|
||||
"sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a",
|
||||
"sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b",
|
||||
"sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4",
|
||||
"sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff",
|
||||
"sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2",
|
||||
"sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46",
|
||||
"sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b",
|
||||
"sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf",
|
||||
"sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5",
|
||||
"sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5",
|
||||
"sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab",
|
||||
"sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd",
|
||||
"sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"
|
||||
],
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==2.1.3"
|
||||
"version": "==2.1.5"
|
||||
},
|
||||
"mitmproxy": {
|
||||
"editable": true,
|
||||
|
@ -491,11 +500,11 @@
|
|||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
"sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12",
|
||||
"sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"
|
||||
"sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981",
|
||||
"sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"
|
||||
],
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==1.3.0"
|
||||
"version": "==1.4.0"
|
||||
},
|
||||
"protobuf": {
|
||||
"hashes": [
|
||||
|
@ -526,12 +535,12 @@
|
|||
},
|
||||
"psycopg": {
|
||||
"hashes": [
|
||||
"sha256:7542c45810ea16356e5126c9b4291cbc3802aa326fcbba09ff154fe380de29be",
|
||||
"sha256:cd711edb64b07d7f8a233c365806caf7e55bbe7cbbd8d5c680f672bb5353c8d5"
|
||||
"sha256:31144d3fb4c17d78094d9e579826f047d4af1da6a10427d91dfcfb6ecdf6f12b",
|
||||
"sha256:4d5a0a5a8590906daa58ebd5f3cfc34091377354a1acced269dd10faf55da60e"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==3.1.11"
|
||||
"version": "==3.1.18"
|
||||
},
|
||||
"publicsuffix2": {
|
||||
"hashes": [
|
||||
|
@ -542,11 +551,11 @@
|
|||
},
|
||||
"pyasn1": {
|
||||
"hashes": [
|
||||
"sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57",
|
||||
"sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"
|
||||
"sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58",
|
||||
"sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
|
||||
"version": "==0.5.0"
|
||||
"version": "==0.5.1"
|
||||
},
|
||||
"pycparser": {
|
||||
"hashes": [
|
||||
|
@ -557,11 +566,11 @@
|
|||
},
|
||||
"pyopenssl": {
|
||||
"hashes": [
|
||||
"sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2",
|
||||
"sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac"
|
||||
"sha256:6aa33039a93fffa4563e655b61d11364d01264be8ccb49906101e02a334530bf",
|
||||
"sha256:ba07553fb6fd6a7a2259adb9b84e12302a9a8a75c44046e8bb5d3e5ee887e3c3"
|
||||
],
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==23.2.0"
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==24.0.0"
|
||||
},
|
||||
"pyparsing": {
|
||||
"hashes": [
|
||||
|
@ -579,48 +588,48 @@
|
|||
},
|
||||
"pytest": {
|
||||
"hashes": [
|
||||
"sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002",
|
||||
"sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069"
|
||||
"sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c",
|
||||
"sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==7.4.2"
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==8.0.0"
|
||||
},
|
||||
"pytest-asyncio": {
|
||||
"hashes": [
|
||||
"sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d",
|
||||
"sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"
|
||||
"sha256:3a048872a9c4ba14c3e90cc1aa20cbc2def7d01c7c8db3777ec281ba9c057675",
|
||||
"sha256:4e7093259ba018d58ede7d5315131d21923a60f8a6e9ee266ce1589685c89eac"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==0.21.1"
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==0.23.5"
|
||||
},
|
||||
"pytest-repeat": {
|
||||
"hashes": [
|
||||
"sha256:4474a7d9e9137f6d8cc8ae297f8c4168d33c56dd740aa78cfffe562557e6b96e",
|
||||
"sha256:5cd3289745ab3156d43eb9c8e7f7d00a926f3ae5c9cf425bec649b2fe15bad5b"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
|
||||
"version": "==0.9.1"
|
||||
},
|
||||
"pytest-timeout": {
|
||||
"hashes": [
|
||||
"sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9",
|
||||
"sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==2.1.0"
|
||||
},
|
||||
"pytest-xdist": {
|
||||
"hashes": [
|
||||
"sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93",
|
||||
"sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2"
|
||||
"sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed",
|
||||
"sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==3.3.1"
|
||||
"version": "==0.9.3"
|
||||
},
|
||||
"pytest-timeout": {
|
||||
"hashes": [
|
||||
"sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90",
|
||||
"sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==2.2.0"
|
||||
},
|
||||
"pytest-xdist": {
|
||||
"hashes": [
|
||||
"sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a",
|
||||
"sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==3.5.0"
|
||||
},
|
||||
"pyyaml": {
|
||||
"hashes": [
|
||||
|
@ -653,6 +662,7 @@
|
|||
"sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4",
|
||||
"sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba",
|
||||
"sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8",
|
||||
"sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef",
|
||||
"sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5",
|
||||
"sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd",
|
||||
"sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3",
|
||||
|
@ -693,36 +703,37 @@
|
|||
"sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001",
|
||||
"sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462",
|
||||
"sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9",
|
||||
"sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe",
|
||||
"sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b",
|
||||
"sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b",
|
||||
"sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615",
|
||||
"sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62",
|
||||
"sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15",
|
||||
"sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b",
|
||||
"sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1",
|
||||
"sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9",
|
||||
"sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675",
|
||||
"sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1",
|
||||
"sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899",
|
||||
"sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7",
|
||||
"sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7",
|
||||
"sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312",
|
||||
"sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa",
|
||||
"sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f",
|
||||
"sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91",
|
||||
"sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa",
|
||||
"sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b",
|
||||
"sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6",
|
||||
"sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3",
|
||||
"sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334",
|
||||
"sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5",
|
||||
"sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3",
|
||||
"sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe",
|
||||
"sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3",
|
||||
"sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c",
|
||||
"sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed",
|
||||
"sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337",
|
||||
"sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880",
|
||||
"sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f",
|
||||
"sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d",
|
||||
"sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248",
|
||||
"sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d",
|
||||
"sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279",
|
||||
"sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf",
|
||||
"sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512",
|
||||
"sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069",
|
||||
|
@ -731,7 +742,6 @@
|
|||
"sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d",
|
||||
"sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31",
|
||||
"sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92",
|
||||
"sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd",
|
||||
"sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5",
|
||||
"sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28",
|
||||
"sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d",
|
||||
|
@ -760,28 +770,28 @@
|
|||
},
|
||||
"tornado": {
|
||||
"hashes": [
|
||||
"sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f",
|
||||
"sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5",
|
||||
"sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d",
|
||||
"sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3",
|
||||
"sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2",
|
||||
"sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a",
|
||||
"sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16",
|
||||
"sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a",
|
||||
"sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17",
|
||||
"sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0",
|
||||
"sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe"
|
||||
"sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0",
|
||||
"sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63",
|
||||
"sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263",
|
||||
"sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052",
|
||||
"sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f",
|
||||
"sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee",
|
||||
"sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78",
|
||||
"sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579",
|
||||
"sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212",
|
||||
"sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e",
|
||||
"sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"
|
||||
],
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==6.3.3"
|
||||
"version": "==6.4"
|
||||
},
|
||||
"typing-extensions": {
|
||||
"hashes": [
|
||||
"sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0",
|
||||
"sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"
|
||||
"sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783",
|
||||
"sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"
|
||||
],
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==4.8.0"
|
||||
"version": "==4.9.0"
|
||||
},
|
||||
"urwid": {
|
||||
"hashes": [
|
||||
|
@ -791,12 +801,12 @@
|
|||
},
|
||||
"werkzeug": {
|
||||
"hashes": [
|
||||
"sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc",
|
||||
"sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"
|
||||
"sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8",
|
||||
"sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==3.0.1"
|
||||
"version": "==2.3.7"
|
||||
},
|
||||
"wsproto": {
|
||||
"hashes": [
|
||||
|
@ -864,40 +874,40 @@
|
|||
"develop": {
|
||||
"attrs": {
|
||||
"hashes": [
|
||||
"sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04",
|
||||
"sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"
|
||||
"sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30",
|
||||
"sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"
|
||||
],
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==23.1.0"
|
||||
"version": "==23.2.0"
|
||||
},
|
||||
"black": {
|
||||
"hashes": [
|
||||
"sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f",
|
||||
"sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7",
|
||||
"sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100",
|
||||
"sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573",
|
||||
"sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d",
|
||||
"sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f",
|
||||
"sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9",
|
||||
"sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300",
|
||||
"sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948",
|
||||
"sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325",
|
||||
"sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9",
|
||||
"sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71",
|
||||
"sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186",
|
||||
"sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f",
|
||||
"sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe",
|
||||
"sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855",
|
||||
"sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80",
|
||||
"sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393",
|
||||
"sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c",
|
||||
"sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204",
|
||||
"sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377",
|
||||
"sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301"
|
||||
"sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8",
|
||||
"sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8",
|
||||
"sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd",
|
||||
"sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9",
|
||||
"sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31",
|
||||
"sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92",
|
||||
"sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f",
|
||||
"sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29",
|
||||
"sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4",
|
||||
"sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693",
|
||||
"sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218",
|
||||
"sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a",
|
||||
"sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23",
|
||||
"sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0",
|
||||
"sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982",
|
||||
"sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894",
|
||||
"sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540",
|
||||
"sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430",
|
||||
"sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b",
|
||||
"sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2",
|
||||
"sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6",
|
||||
"sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==23.9.1"
|
||||
"version": "==24.2.0"
|
||||
},
|
||||
"click": {
|
||||
"hashes": [
|
||||
|
@ -909,30 +919,30 @@
|
|||
},
|
||||
"flake8": {
|
||||
"hashes": [
|
||||
"sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23",
|
||||
"sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"
|
||||
"sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132",
|
||||
"sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_full_version >= '3.8.1'",
|
||||
"version": "==6.1.0"
|
||||
"version": "==7.0.0"
|
||||
},
|
||||
"flake8-bugbear": {
|
||||
"hashes": [
|
||||
"sha256:90cf04b19ca02a682feb5aac67cae8de742af70538590509941ab10ae8351f71",
|
||||
"sha256:b182cf96ea8f7a8595b2f87321d7d9b28728f4d9c3318012d896543d19742cb5"
|
||||
"sha256:663ef5de80cd32aacd39d362212983bc4636435a6f83700b4ed35acbd0b7d1b8",
|
||||
"sha256:f9cb5f2a9e792dd80ff68e89a14c12eed8620af8b41a49d823b7a33064ac9658"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_full_version >= '3.8.1'",
|
||||
"version": "==23.9.16"
|
||||
"version": "==24.2.6"
|
||||
},
|
||||
"isort": {
|
||||
"hashes": [
|
||||
"sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504",
|
||||
"sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"
|
||||
"sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109",
|
||||
"sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"
|
||||
],
|
||||
"index": "pypi",
|
||||
"markers": "python_full_version >= '3.8.0'",
|
||||
"version": "==5.12.0"
|
||||
"version": "==5.13.2"
|
||||
},
|
||||
"mccabe": {
|
||||
"hashes": [
|
||||
|
@ -960,19 +970,19 @@
|
|||
},
|
||||
"pathspec": {
|
||||
"hashes": [
|
||||
"sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20",
|
||||
"sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"
|
||||
"sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08",
|
||||
"sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"
|
||||
],
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==0.11.2"
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==0.12.1"
|
||||
},
|
||||
"platformdirs": {
|
||||
"hashes": [
|
||||
"sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3",
|
||||
"sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"
|
||||
"sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068",
|
||||
"sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"
|
||||
],
|
||||
"markers": "python_version >= '3.7'",
|
||||
"version": "==3.11.0"
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==4.2.0"
|
||||
},
|
||||
"pycodestyle": {
|
||||
"hashes": [
|
||||
|
@ -984,11 +994,11 @@
|
|||
},
|
||||
"pyflakes": {
|
||||
"hashes": [
|
||||
"sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774",
|
||||
"sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"
|
||||
"sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f",
|
||||
"sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"
|
||||
],
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==3.1.0"
|
||||
"version": "==3.2.0"
|
||||
},
|
||||
"tomli": {
|
||||
"hashes": [
|
||||
|
@ -1000,11 +1010,11 @@
|
|||
},
|
||||
"typing-extensions": {
|
||||
"hashes": [
|
||||
"sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0",
|
||||
"sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"
|
||||
"sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783",
|
||||
"sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"
|
||||
],
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==4.8.0"
|
||||
"version": "==4.9.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -212,6 +212,18 @@ DEPS = {
|
|||
["columnar_create", "columnar_load"],
|
||||
repeatable=False,
|
||||
),
|
||||
"multi_metadata_sync": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_sequence_default",
|
||||
"alter_database_propagation",
|
||||
"alter_role_propagation",
|
||||
"grant_on_schema_propagation",
|
||||
"multi_test_catalog_views",
|
||||
"multi_drop_extension",
|
||||
],
|
||||
repeatable=False,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
-- For versions >= 15, pg15_create_drop_database_propagation.sql is used.
|
||||
-- For versions >= 16, pg16_create_drop_database_propagation.sql is used.
|
||||
-- Test the UDF that we use to issue database command during metadata sync.
|
||||
SELECT pg_catalog.citus_internal_database_command(null);
|
||||
SELECT citus_internal.database_command(null);
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
CREATE ROLE test_db_commands WITH LOGIN;
|
||||
ALTER SYSTEM SET citus.enable_manual_metadata_changes_for_user TO 'test_db_commands';
|
||||
|
@ -21,22 +21,22 @@ SELECT pg_sleep(0.1);
|
|||
|
||||
SET ROLE test_db_commands;
|
||||
-- fails on null input
|
||||
SELECT pg_catalog.citus_internal_database_command(null);
|
||||
SELECT citus_internal.database_command(null);
|
||||
ERROR: command cannot be NULL
|
||||
-- fails on non create / drop db command
|
||||
SELECT pg_catalog.citus_internal_database_command('CREATE TABLE foo_bar(a int)');
|
||||
ERROR: citus_internal_database_command() can only be used for CREATE DATABASE command by Citus.
|
||||
SELECT pg_catalog.citus_internal_database_command('SELECT 1');
|
||||
ERROR: citus_internal_database_command() can only be used for CREATE DATABASE command by Citus.
|
||||
SELECT pg_catalog.citus_internal_database_command('asfsfdsg');
|
||||
SELECT citus_internal.database_command('CREATE TABLE foo_bar(a int)');
|
||||
ERROR: citus_internal.database_command() can only be used for CREATE DATABASE command by Citus.
|
||||
SELECT citus_internal.database_command('SELECT 1');
|
||||
ERROR: citus_internal.database_command() can only be used for CREATE DATABASE command by Citus.
|
||||
SELECT citus_internal.database_command('asfsfdsg');
|
||||
ERROR: syntax error at or near "asfsfdsg"
|
||||
SELECT pg_catalog.citus_internal_database_command('');
|
||||
SELECT citus_internal.database_command('');
|
||||
ERROR: cannot execute multiple utility events
|
||||
RESET ROLE;
|
||||
ALTER ROLE test_db_commands nocreatedb;
|
||||
SET ROLE test_db_commands;
|
||||
-- make sure that pg_catalog.citus_internal_database_command doesn't cause privilege escalation
|
||||
SELECT pg_catalog.citus_internal_database_command('CREATE DATABASE no_permissions');
|
||||
-- make sure that citus_internal.database_command doesn't cause privilege escalation
|
||||
SELECT citus_internal.database_command('CREATE DATABASE no_permissions');
|
||||
ERROR: permission denied to create database
|
||||
RESET ROLE;
|
||||
DROP USER test_db_commands;
|
||||
|
|
|
@ -369,9 +369,9 @@ ROLLBACK;
|
|||
\set VERBOSITY DEFAULT
|
||||
-- Test the UDFs that we use to convert Citus local tables to single-shard tables and
|
||||
-- reference tables.
|
||||
SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true);
|
||||
SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, true);
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
SELECT pg_catalog.citus_internal_delete_placement_metadata(1);
|
||||
SELECT citus_internal.delete_placement_metadata(1);
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
CREATE ROLE test_user_create_ref_dist WITH LOGIN;
|
||||
GRANT ALL ON SCHEMA create_ref_dist_from_citus_local TO test_user_create_ref_dist;
|
||||
|
@ -393,15 +393,15 @@ SET citus.next_shard_id TO 1850000;
|
|||
SET citus.next_placement_id TO 8510000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET search_path TO create_ref_dist_from_citus_local;
|
||||
SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true);
|
||||
SELECT citus_internal.update_none_dist_table_metadata(null, 't', 1, true);
|
||||
ERROR: relation_id cannot be NULL
|
||||
SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true);
|
||||
SELECT citus_internal.update_none_dist_table_metadata(1, null, 1, true);
|
||||
ERROR: replication_model cannot be NULL
|
||||
SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true);
|
||||
SELECT citus_internal.update_none_dist_table_metadata(1, 't', null, true);
|
||||
ERROR: colocation_id cannot be NULL
|
||||
SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null);
|
||||
SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, null);
|
||||
ERROR: auto_converted cannot be NULL
|
||||
SELECT pg_catalog.citus_internal_delete_placement_metadata(null);
|
||||
SELECT citus_internal.delete_placement_metadata(null);
|
||||
ERROR: placement_id cannot be NULL
|
||||
CREATE TABLE udf_test (col_1 int);
|
||||
SELECT citus_add_local_table_to_metadata('udf_test');
|
||||
|
@ -411,8 +411,8 @@ SELECT citus_add_local_table_to_metadata('udf_test');
|
|||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true);
|
||||
citus_internal_update_none_dist_table_metadata
|
||||
SELECT citus_internal.update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true);
|
||||
update_none_dist_table_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -426,8 +426,8 @@ BEGIN;
|
|||
|
||||
SELECT placementid AS udf_test_placementid FROM pg_dist_shard_placement
|
||||
WHERE shardid = get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.udf_test') \gset
|
||||
SELECT pg_catalog.citus_internal_delete_placement_metadata(:udf_test_placementid);
|
||||
citus_internal_delete_placement_metadata
|
||||
SELECT citus_internal.delete_placement_metadata(:udf_test_placementid);
|
||||
delete_placement_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
|
@ -196,6 +196,7 @@ SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::t
|
|||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
create role test_admin_role;
|
||||
-- test grants with distributed and non-distributed roles
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
|
@ -221,29 +222,72 @@ CREATE ROLE non_dist_role_4;
|
|||
NOTICE: not propagating CREATE ROLE/USER commands to other nodes
|
||||
HINT: Connect to other nodes directly to manually create all necessary users and roles.
|
||||
SET citus.enable_create_role_propagation TO ON;
|
||||
grant dist_role_3,dist_role_1 to test_admin_role with admin option;
|
||||
SET ROLE dist_role_1;
|
||||
GRANT non_dist_role_1 TO non_dist_role_2;
|
||||
SET citus.enable_create_role_propagation TO OFF;
|
||||
grant dist_role_1 to non_dist_role_1 with admin option;
|
||||
SET ROLE non_dist_role_1;
|
||||
GRANT dist_role_1 TO dist_role_2;
|
||||
GRANT dist_role_1 TO dist_role_2 granted by non_dist_role_1;
|
||||
RESET ROLE;
|
||||
SET citus.enable_create_role_propagation TO ON;
|
||||
GRANT dist_role_3 TO non_dist_role_3;
|
||||
GRANT dist_role_3 TO non_dist_role_3 granted by test_admin_role;
|
||||
GRANT non_dist_role_4 TO dist_role_4;
|
||||
GRANT dist_role_3 TO dist_role_4 granted by test_admin_role;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
|
||||
SELECT result FROM run_command_on_all_nodes(
|
||||
$$
|
||||
SELECT json_agg(q.* ORDER BY member) FROM (
|
||||
SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option
|
||||
FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3'
|
||||
) q;
|
||||
$$
|
||||
);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
[{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, +
|
||||
{"member":"non_dist_role_3","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, +
|
||||
{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}]
|
||||
[{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, +
|
||||
{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}]
|
||||
[{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, +
|
||||
{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}]
|
||||
(3 rows)
|
||||
|
||||
REVOKE dist_role_3 from dist_role_4 granted by test_admin_role cascade;
|
||||
SELECT result FROM run_command_on_all_nodes(
|
||||
$$
|
||||
SELECT json_agg(q.* ORDER BY member) FROM (
|
||||
SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option
|
||||
FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3'
|
||||
order by member::regrole::text
|
||||
) q;
|
||||
$$
|
||||
);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
[{"member":"non_dist_role_3","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, +
|
||||
{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}]
|
||||
[{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}]
|
||||
[{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}]
|
||||
(3 rows)
|
||||
|
||||
SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1','test_admin_role')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
|
||||
role | member | grantor | admin_option
|
||||
---------------------------------------------------------------------
|
||||
dist_role_1 | dist_role_2 | t | f
|
||||
dist_role_1 | non_dist_role_1 | t | t
|
||||
dist_role_1 | test_admin_role | t | t
|
||||
dist_role_3 | non_dist_role_3 | t | f
|
||||
dist_role_3 | test_admin_role | t | t
|
||||
non_dist_role_1 | non_dist_role_2 | t | f
|
||||
non_dist_role_4 | dist_role_4 | t | f
|
||||
(4 rows)
|
||||
(7 rows)
|
||||
|
||||
SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1;
|
||||
objid
|
||||
|
@ -255,6 +299,25 @@ SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::
|
|||
non_dist_role_4
|
||||
(5 rows)
|
||||
|
||||
REVOKE dist_role_3 from non_dist_role_3 granted by test_admin_role cascade;
|
||||
SELECT result FROM run_command_on_all_nodes(
|
||||
$$
|
||||
SELECT json_agg(q.* ORDER BY member) FROM (
|
||||
SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option
|
||||
FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3'
|
||||
order by member::regrole::text
|
||||
) q;
|
||||
$$
|
||||
);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
[{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}]
|
||||
[{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}]
|
||||
[{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}]
|
||||
(3 rows)
|
||||
|
||||
revoke dist_role_3,dist_role_1 from test_admin_role cascade;
|
||||
drop role test_admin_role;
|
||||
\c - - - :worker_1_port
|
||||
SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
|
||||
role | member | grantor | admin_option
|
||||
|
@ -276,9 +339,8 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1;
|
|||
SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
|
||||
role | member | grantor | admin_option
|
||||
---------------------------------------------------------------------
|
||||
dist_role_1 | dist_role_2 | postgres | f
|
||||
non_dist_role_4 | dist_role_4 | postgres | f
|
||||
(2 rows)
|
||||
(1 row)
|
||||
|
||||
SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1;
|
||||
rolname
|
||||
|
|
|
@ -354,8 +354,8 @@ NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.pa
|
|||
NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE
|
||||
NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1')
|
||||
NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1')
|
||||
NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400)
|
||||
NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400)
|
||||
NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400)
|
||||
NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400)
|
||||
ROLLBACK;
|
||||
NOTICE: issuing ROLLBACK
|
||||
NOTICE: issuing ROLLBACK
|
||||
|
@ -377,8 +377,8 @@ NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCAD
|
|||
NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1')
|
||||
NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1')
|
||||
NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.child1_xxxxx CASCADE
|
||||
NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400)
|
||||
NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400)
|
||||
NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400)
|
||||
NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400)
|
||||
ROLLBACK;
|
||||
NOTICE: issuing ROLLBACK
|
||||
NOTICE: issuing ROLLBACK
|
||||
|
|
|
@ -132,7 +132,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port;
|
|||
|
||||
-- Check failures on DDL command propagation
|
||||
CREATE TABLE t2 (id int PRIMARY KEY);
|
||||
SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadata").kill()');
|
||||
SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_placement_metadata").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -140,7 +140,7 @@ SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadat
|
|||
|
||||
SELECT create_distributed_table('t2', 'id');
|
||||
ERROR: connection not open
|
||||
SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_shard_metadata").cancel(' || :pid || ')');
|
||||
SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_shard_metadata").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -650,7 +650,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_t
|
|||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to add partition metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").cancel(' || :pid || ')');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_metadata").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -658,7 +658,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_
|
|||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").kill()');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_metadata").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -667,7 +667,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_
|
|||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to add shard metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").cancel(' || :pid || ')');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -675,7 +675,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_meta
|
|||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").kill()');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -684,7 +684,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_meta
|
|||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to add placement metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").cancel(' || :pid || ')');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -692,7 +692,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_
|
|||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").kill()');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -718,7 +718,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_colocation
|
|||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to add distributed object metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").cancel(' || :pid || ')');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_object_metadata").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -726,7 +726,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_met
|
|||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").kill()');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_object_metadata").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -0,0 +1,471 @@
|
|||
-- Public role has connect,temp,temporary privileges on database
|
||||
-- To test these scenarios, we need to revoke these privileges from public role
|
||||
-- since public role privileges are inherited by new roles/users
|
||||
set citus.enable_create_database_propagation to on;
|
||||
create database test_2pc_db;
|
||||
show citus.main_db;
|
||||
citus.main_db
|
||||
---------------------------------------------------------------------
|
||||
regression
|
||||
(1 row)
|
||||
|
||||
revoke connect,temp,temporary on database test_2pc_db from public;
|
||||
CREATE SCHEMA grant_on_database_propagation_non_maindb;
|
||||
SET search_path TO grant_on_database_propagation_non_maindb;
|
||||
-- test grant/revoke CREATE privilege propagation on database
|
||||
create user "myuser'_test";
|
||||
\c test_2pc_db - - :master_port
|
||||
grant create on database test_2pc_db to "myuser'_test";
|
||||
\c regression - - :master_port;
|
||||
select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(3 rows)
|
||||
|
||||
\c test_2pc_db - - :master_port
|
||||
revoke create on database test_2pc_db from "myuser'_test";
|
||||
\c regression - - :master_port;
|
||||
select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(3 rows)
|
||||
|
||||
drop user "myuser'_test";
|
||||
---------------------------------------------------------------------
|
||||
-- test grant/revoke CONNECT privilege propagation on database
|
||||
\c regression - - :master_port
|
||||
create user myuser2;
|
||||
\c test_2pc_db - - :master_port
|
||||
grant CONNECT on database test_2pc_db to myuser2;
|
||||
\c regression - - :master_port;
|
||||
select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(3 rows)
|
||||
|
||||
\c test_2pc_db - - :master_port
|
||||
revoke connect on database test_2pc_db from myuser2;
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(3 rows)
|
||||
|
||||
drop user myuser2;
|
||||
---------------------------------------------------------------------
|
||||
-- test grant/revoke TEMP privilege propagation on database
|
||||
\c regression - - :master_port
|
||||
create user myuser3;
|
||||
-- test grant/revoke temp on database
|
||||
\c test_2pc_db - - :master_port
|
||||
grant TEMP on database test_2pc_db to myuser3;
|
||||
\c regression - - :master_port;
|
||||
select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(3 rows)
|
||||
|
||||
\c test_2pc_db - - :worker_1_port
|
||||
revoke TEMP on database test_2pc_db from myuser3;
|
||||
\c regression - - :master_port;
|
||||
select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(3 rows)
|
||||
|
||||
drop user myuser3;
|
||||
---------------------------------------------------------------------
|
||||
\c regression - - :master_port
|
||||
-- test temporary privilege on database
|
||||
create user myuser4;
|
||||
-- test grant/revoke temporary on database
|
||||
\c test_2pc_db - - :worker_1_port
|
||||
grant TEMPORARY on database test_2pc_db to myuser4;
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(3 rows)
|
||||
|
||||
\c test_2pc_db - - :master_port
|
||||
revoke TEMPORARY on database test_2pc_db from myuser4;
|
||||
\c regression - - :master_port;
|
||||
select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(3 rows)
|
||||
|
||||
drop user myuser4;
|
||||
---------------------------------------------------------------------
|
||||
-- test ALL privileges with ALL statement on database
|
||||
create user myuser5;
|
||||
grant ALL on database test_2pc_db to myuser5;
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(12 rows)
|
||||
|
||||
\c test_2pc_db - - :master_port
|
||||
revoke ALL on database test_2pc_db from myuser5;
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(12 rows)
|
||||
|
||||
drop user myuser5;
|
||||
---------------------------------------------------------------------
|
||||
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database
|
||||
create user myuser6;
|
||||
\c test_2pc_db - - :master_port
|
||||
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6;
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(12 rows)
|
||||
|
||||
\c test_2pc_db - - :master_port
|
||||
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6;
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(12 rows)
|
||||
|
||||
drop user myuser6;
|
||||
---------------------------------------------------------------------
|
||||
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option
|
||||
create user myuser7;
|
||||
create user myuser_1;
|
||||
\c test_2pc_db - - :master_port
|
||||
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7;
|
||||
set role myuser7;
|
||||
--here since myuser7 does not have grant option, it should fail
|
||||
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1;
|
||||
WARNING: no privileges were granted for "test_2pc_db"
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(12 rows)
|
||||
|
||||
\c test_2pc_db - - :master_port
|
||||
RESET ROLE;
|
||||
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option;
|
||||
set role myuser7;
|
||||
--here since myuser have grant option, it should succeed
|
||||
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7;
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(12 rows)
|
||||
|
||||
\c test_2pc_db - - :master_port
|
||||
RESET ROLE;
|
||||
--below test should fail and should throw an error since myuser_1 still have the dependent privileges
|
||||
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict;
|
||||
ERROR: dependent privileges exist
|
||||
HINT: Use CASCADE to revoke them too.
|
||||
--below test should fail and should throw an error since myuser_1 still have the dependent privileges
|
||||
revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ;
|
||||
ERROR: dependent privileges exist
|
||||
HINT: Use CASCADE to revoke them too.
|
||||
--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade
|
||||
revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ;
|
||||
--here we test if myuser7 still have the privileges after revoke grant option for
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(12 rows)
|
||||
|
||||
\c test_2pc_db - - :master_port
|
||||
reset role;
|
||||
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7;
|
||||
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1;
|
||||
\c regression - - :master_port
|
||||
drop user myuser_1;
|
||||
drop user myuser7;
|
||||
---------------------------------------------------------------------
|
||||
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database
|
||||
-- and multi user
|
||||
\c regression - - :master_port
|
||||
create user myuser8;
|
||||
create user myuser_2;
|
||||
set citus.enable_create_database_propagation to on;
|
||||
create database test_db;
|
||||
revoke connect,temp,temporary on database test_db from public;
|
||||
\c test_2pc_db - - :master_port
|
||||
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2;
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(12 rows)
|
||||
|
||||
select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(12 rows)
|
||||
|
||||
select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(12 rows)
|
||||
|
||||
select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(12 rows)
|
||||
|
||||
\c test_2pc_db - - :master_port
|
||||
RESET ROLE;
|
||||
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ;
|
||||
--below test should succeed and should not throw any error
|
||||
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2;
|
||||
--below test should succeed and should not throw any error
|
||||
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade;
|
||||
\c regression - - :master_port
|
||||
select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(12 rows)
|
||||
|
||||
select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(12 rows)
|
||||
|
||||
select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(12 rows)
|
||||
|
||||
select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CREATE,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(CONNECT,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMP,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(TEMPORARY,f)
|
||||
(12 rows)
|
||||
|
||||
\c test_2pc_db - - :master_port
|
||||
reset role;
|
||||
\c regression - - :master_port
|
||||
drop user myuser_2;
|
||||
drop user myuser8;
|
||||
set citus.enable_create_database_propagation to on;
|
||||
drop database test_db;
|
||||
---------------------------------------------------------------------
|
||||
-- rollbacks public role database privileges to original state
|
||||
grant connect,temp,temporary on database test_2pc_db to public;
|
||||
drop database test_2pc_db;
|
||||
set citus.enable_create_database_propagation to off;
|
||||
DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE;
|
||||
reset citus.enable_create_database_propagation;
|
||||
reset search_path;
|
||||
---------------------------------------------------------------------
|
|
@ -0,0 +1,160 @@
|
|||
CREATE SCHEMA grant_role2pc;
|
||||
SET search_path TO grant_role2pc;
|
||||
set citus.enable_create_database_propagation to on;
|
||||
CREATE DATABASE grant_role2pc_db;
|
||||
\c grant_role2pc_db
|
||||
SHOW citus.main_db;
|
||||
citus.main_db
|
||||
---------------------------------------------------------------------
|
||||
regression
|
||||
(1 row)
|
||||
|
||||
SET citus.superuser TO 'postgres';
|
||||
CREATE USER grant_role2pc_user1;
|
||||
CREATE USER grant_role2pc_user2;
|
||||
CREATE USER grant_role2pc_user3;
|
||||
CREATE USER grant_role2pc_user4;
|
||||
CREATE USER grant_role2pc_user5;
|
||||
CREATE USER grant_role2pc_user6;
|
||||
CREATE USER grant_role2pc_user7;
|
||||
\c grant_role2pc_db
|
||||
--test with empty superuser
|
||||
SET citus.superuser TO '';
|
||||
grant grant_role2pc_user1 to grant_role2pc_user2;
|
||||
ERROR: No superuser role is given for Citus main database connection
|
||||
HINT: Set citus.superuser to a superuser role name
|
||||
SET citus.superuser TO 'postgres';
|
||||
grant grant_role2pc_user1 to grant_role2pc_user2 with admin option granted by CURRENT_USER;
|
||||
\c regression
|
||||
select result FROM run_command_on_all_nodes(
|
||||
$$
|
||||
SELECT array_to_json(array_agg(row_to_json(t)))
|
||||
FROM (
|
||||
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
|
||||
FROM pg_auth_members
|
||||
WHERE member::regrole::text = 'grant_role2pc_user2'
|
||||
order by member::regrole::text, roleid::regrole::text
|
||||
) t
|
||||
$$
|
||||
);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
[{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}]
|
||||
[{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}]
|
||||
[{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}]
|
||||
(3 rows)
|
||||
|
||||
\c grant_role2pc_db
|
||||
--test grant under transactional context with multiple operations
|
||||
BEGIN;
|
||||
grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION;
|
||||
grant grant_role2pc_user1 to grant_role2pc_user4 granted by grant_role2pc_user3 ;
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION granted by grant_role2pc_user3;
|
||||
grant grant_role2pc_user1 to grant_role2pc_user6;
|
||||
ROLLBACK;
|
||||
BEGIN;
|
||||
grant grant_role2pc_user1 to grant_role2pc_user7;
|
||||
SELECT 1/0;
|
||||
ERROR: division by zero
|
||||
commit;
|
||||
\c regression
|
||||
select result FROM run_command_on_all_nodes($$
|
||||
SELECT array_to_json(array_agg(row_to_json(t)))
|
||||
FROM (
|
||||
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
|
||||
FROM pg_auth_members
|
||||
WHERE member::regrole::text in
|
||||
('grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7')
|
||||
order by member::regrole::text, roleid::regrole::text
|
||||
) t
|
||||
$$);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
[{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
[{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
[{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
(3 rows)
|
||||
|
||||
\c grant_role2pc_db
|
||||
grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3;
|
||||
\c regression
|
||||
select result FROM run_command_on_all_nodes($$
|
||||
SELECT array_to_json(array_agg(row_to_json(t)))
|
||||
FROM (
|
||||
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
|
||||
FROM pg_auth_members
|
||||
WHERE member::regrole::text in
|
||||
('grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7')
|
||||
order by member::regrole::text, roleid::regrole::text
|
||||
) t
|
||||
$$);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
[{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
[{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
[{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
(3 rows)
|
||||
|
||||
\c grant_role2pc_db
|
||||
revoke admin option for grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3;
|
||||
--test revoke under transactional context with multiple operations
|
||||
BEGIN;
|
||||
revoke grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3 ;
|
||||
revoke grant_role2pc_user1 from grant_role2pc_user4 granted by grant_role2pc_user3;
|
||||
COMMIT;
|
||||
\c grant_role2pc_db - - :worker_1_port
|
||||
BEGIN;
|
||||
revoke grant_role2pc_user1 from grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3;
|
||||
revoke grant_role2pc_user1 from grant_role2pc_user3 cascade;
|
||||
COMMIT;
|
||||
\c regression
|
||||
select result FROM run_command_on_all_nodes($$
|
||||
SELECT array_to_json(array_agg(row_to_json(t)))
|
||||
FROM (
|
||||
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
|
||||
FROM pg_auth_members
|
||||
WHERE member::regrole::text in
|
||||
('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7')
|
||||
order by member::regrole::text, roleid::regrole::text
|
||||
) t
|
||||
$$);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
[{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
[{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
[{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
(3 rows)
|
||||
|
||||
\c grant_role2pc_db - - :worker_1_port
|
||||
BEGIN;
|
||||
grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION;
|
||||
grant grant_role2pc_user1 to grant_role2pc_user6;
|
||||
COMMIT;
|
||||
\c regression - - :master_port
|
||||
select result FROM run_command_on_all_nodes($$
|
||||
SELECT array_to_json(array_agg(row_to_json(t)))
|
||||
FROM (
|
||||
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
|
||||
FROM pg_auth_members
|
||||
WHERE member::regrole::text in
|
||||
('grant_role2pc_user5','grant_role2pc_user6')
|
||||
order by member::regrole::text, roleid::regrole::text
|
||||
) t
|
||||
$$);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
[{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
[{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
[{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}]
|
||||
(3 rows)
|
||||
|
||||
revoke grant_role2pc_user1 from grant_role2pc_user5,grant_role2pc_user6;
|
||||
--clean resources
|
||||
DROP SCHEMA grant_role2pc;
|
||||
set citus.enable_create_database_propagation to on;
|
||||
DROP DATABASE grant_role2pc_db;
|
||||
drop user grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7;
|
||||
drop user grant_role2pc_user1;
|
||||
reset citus.enable_create_database_propagation;
|
|
@ -138,7 +138,7 @@ step s2-view-worker:
|
|||
('%pg_prepared_xacts%'),
|
||||
('%COMMIT%'),
|
||||
('%dump_local_%'),
|
||||
('%citus_internal_local_blocked_processes%'),
|
||||
('%citus_internal.local_blocked_processes%'),
|
||||
('%add_node%'),
|
||||
('%csa_from_one_node%'),
|
||||
('%pg_locks%'))
|
||||
|
|
|
@ -0,0 +1,132 @@
|
|||
CREATE SCHEMA metadata_sync_2pc_schema;
|
||||
SET search_path TO metadata_sync_2pc_schema;
|
||||
set citus.enable_create_database_propagation to on;
|
||||
CREATE DATABASE metadata_sync_2pc_db;
|
||||
revoke connect,temp,temporary on database metadata_sync_2pc_db from public;
|
||||
\c metadata_sync_2pc_db
|
||||
SHOW citus.main_db;
|
||||
citus.main_db
|
||||
---------------------------------------------------------------------
|
||||
regression
|
||||
(1 row)
|
||||
|
||||
CREATE USER "grant_role2pc'_user1";
|
||||
CREATE USER "grant_role2pc'_user2";
|
||||
CREATE USER "grant_role2pc'_user3";
|
||||
CREATE USER grant_role2pc_user4;
|
||||
CREATE USER grant_role2pc_user5;
|
||||
\c regression
|
||||
select 1 from citus_remove_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
\c metadata_sync_2pc_db
|
||||
grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION;
|
||||
grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3";
|
||||
--test for grant on database
|
||||
\c metadata_sync_2pc_db - - :master_port
|
||||
grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1";
|
||||
grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2";
|
||||
grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3";
|
||||
\c regression
|
||||
select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(2 rows)
|
||||
|
||||
select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(2 rows)
|
||||
|
||||
select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(8 rows)
|
||||
|
||||
\c regression
|
||||
set citus.enable_create_database_propagation to on;
|
||||
select 1 from citus_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
select result FROM run_command_on_all_nodes($$
|
||||
SELECT array_to_json(array_agg(row_to_json(t)))
|
||||
FROM (
|
||||
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
|
||||
FROM pg_auth_members
|
||||
WHERE member::regrole::text in
|
||||
('"grant_role2pc''_user2"','"grant_role2pc''_user3"','grant_role2pc_user4','grant_role2pc_user5')
|
||||
order by member::regrole::text
|
||||
) t
|
||||
$$);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
[{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false}]
|
||||
[{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false}]
|
||||
[{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false}]
|
||||
(3 rows)
|
||||
|
||||
select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(3 rows)
|
||||
|
||||
select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(3 rows)
|
||||
|
||||
select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']);
|
||||
check_database_privileges
|
||||
---------------------------------------------------------------------
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CREATE,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(CONNECT,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMP,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(TEMPORARY,t)
|
||||
(12 rows)
|
||||
|
||||
\c metadata_sync_2pc_db
|
||||
revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3";
|
||||
revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3";
|
||||
revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3";
|
||||
revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3";
|
||||
revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2";
|
||||
revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1";
|
||||
\c regression
|
||||
drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5;
|
||||
set citus.enable_create_database_propagation to on;
|
||||
drop database metadata_sync_2pc_db;
|
||||
drop schema metadata_sync_2pc_schema;
|
||||
reset citus.enable_create_database_propagation;
|
||||
reset search_path;
|
|
@ -12,9 +12,9 @@ RESET client_min_messages;
|
|||
SET search_path TO metadata_sync_helpers;
|
||||
CREATE TABLE test(col_1 int);
|
||||
-- not in a distributed transaction
|
||||
SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
SELECT citus_internal_update_relation_colocation ('test'::regclass, 1);
|
||||
SELECT citus_internal.update_relation_colocation ('test'::regclass, 1);
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
-- in a distributed transaction, but the application name is not Citus
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
|
@ -24,7 +24,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
ROLLBACK;
|
||||
-- in a distributed transaction and the application name is Citus, allowed.
|
||||
|
@ -36,8 +36,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -61,7 +61,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: must be owner of table test
|
||||
ROLLBACK;
|
||||
-- we do not own the relation
|
||||
|
@ -73,7 +73,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_update_relation_colocation ('test'::regclass, 10);
|
||||
SELECT citus_internal.update_relation_colocation ('test'::regclass, 10);
|
||||
ERROR: must be owner of table test
|
||||
ROLLBACK;
|
||||
-- finally, a user can only add its own tables to the metadata
|
||||
|
@ -87,8 +87,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -109,8 +109,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_rebalancer gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -125,7 +125,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=not a correct gpid';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
ROLLBACK;
|
||||
-- also faills if done by the rebalancer
|
||||
|
@ -137,7 +137,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_rebalancer gpid=not a correct gpid';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
ROLLBACK;
|
||||
-- application_name with suffix is ok (e.g. pgbouncer might add this)
|
||||
|
@ -149,8 +149,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001 - from 10.12.14.16:10370';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -165,7 +165,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
ROLLBACK;
|
||||
-- empty application_name
|
||||
|
@ -177,7 +177,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to '';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
ROLLBACK;
|
||||
-- application_name with incorrect prefix
|
||||
|
@ -189,7 +189,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
ROLLBACK;
|
||||
-- fails because there is no X distribution method
|
||||
|
@ -201,7 +201,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
ERROR: Metadata syncing is only allowed for hash, reference and local tables:X
|
||||
ROLLBACK;
|
||||
-- fails because there is the column does not exist
|
||||
|
@ -213,7 +213,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's');
|
||||
ERROR: column "non_existing_col" of relation "test_2" does not exist
|
||||
ROLLBACK;
|
||||
--- fails because we do not allow NULL parameters
|
||||
|
@ -225,7 +225,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's');
|
||||
ERROR: relation cannot be NULL
|
||||
ROLLBACK;
|
||||
-- fails because colocationId cannot be negative
|
||||
|
@ -237,7 +237,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's');
|
||||
ERROR: Metadata syncing is only allowed for valid colocation id values.
|
||||
ROLLBACK;
|
||||
-- fails because there is no X replication model
|
||||
|
@ -249,7 +249,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X');
|
||||
ERROR: Metadata syncing is only allowed for hash, reference and local tables:X
|
||||
ROLLBACK;
|
||||
-- the same table cannot be added twice, that is enforced by a primary key
|
||||
|
@ -262,13 +262,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: duplicate key value violates unique constraint "pg_dist_partition_logical_relid_index"
|
||||
ROLLBACK;
|
||||
-- the same table cannot be added twice, that is enforced by a primary key even if distribution key changes
|
||||
|
@ -281,13 +281,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's');
|
||||
ERROR: duplicate key value violates unique constraint "pg_dist_partition_logical_relid_index"
|
||||
ROLLBACK;
|
||||
-- hash distributed table cannot have NULL distribution key
|
||||
|
@ -300,7 +300,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's');
|
||||
ERROR: Distribution column cannot be NULL for relation "test_2"
|
||||
ROLLBACK;
|
||||
-- even if metadata_sync_helper_role is not owner of the table test
|
||||
|
@ -332,8 +332,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -349,7 +349,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111);
|
||||
SELECT citus_internal.update_placement_metadata(1420007, 10000, 11111);
|
||||
ERROR: could not find valid entry for shard xxxxx
|
||||
ROLLBACK;
|
||||
-- non-existing users should fail to pass the checks
|
||||
|
@ -378,7 +378,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
ERROR: role "non_existing_user" does not exist
|
||||
ROLLBACK;
|
||||
\c - postgres - :worker_1_port
|
||||
|
@ -409,7 +409,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's');
|
||||
ERROR: Reference or local tables cannot have distribution columns
|
||||
ROLLBACK;
|
||||
-- non-valid replication model
|
||||
|
@ -421,7 +421,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A');
|
||||
SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A');
|
||||
ERROR: Metadata syncing is only allowed for known replication models.
|
||||
ROLLBACK;
|
||||
-- not-matching replication model for reference table
|
||||
|
@ -433,7 +433,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c');
|
||||
SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c');
|
||||
ERROR: Local or references tables can only have 's' or 't' as the replication model.
|
||||
ROLLBACK;
|
||||
-- add entry for super user table
|
||||
|
@ -448,8 +448,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -470,7 +470,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: must be owner of table super_user_table
|
||||
ROLLBACK;
|
||||
-- the user is only allowed to add a shard for add a table which is in pg_dist_partition
|
||||
|
@ -485,7 +485,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: The relation "test_2" does not have a valid entry in pg_dist_partition.
|
||||
ROLLBACK;
|
||||
-- ok, now add the table to the pg_dist_partition
|
||||
|
@ -497,20 +497,20 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_internal_add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -525,8 +525,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232);
|
||||
citus_internal_update_relation_colocation
|
||||
SELECT citus_internal.update_relation_colocation ('test_2'::regclass, 1231231232);
|
||||
update_relation_colocation
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -544,7 +544,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: Invalid shard id: -1
|
||||
ROLLBACK;
|
||||
-- invalid storage types are not allowed
|
||||
|
@ -559,7 +559,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: Invalid shard storage type: X
|
||||
ROLLBACK;
|
||||
-- NULL shard ranges are not allowed for hash distributed tables
|
||||
|
@ -574,7 +574,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: Shards of has distributed table "test_2" cannot have NULL shard ranges
|
||||
ROLLBACK;
|
||||
-- non-integer shard ranges are not allowed
|
||||
|
@ -589,7 +589,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: invalid input syntax for type integer: "non-int"
|
||||
ROLLBACK;
|
||||
-- shardMinValue should be smaller than shardMaxValue
|
||||
|
@ -604,7 +604,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: shardMinValue=-1610612737 is greater than shardMaxValue=-2147483648 for table "test_2", which is not allowed
|
||||
ROLLBACK;
|
||||
-- we do not allow overlapping shards for the same table
|
||||
|
@ -621,7 +621,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text),
|
||||
('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text),
|
||||
('test_2'::regclass, 1420002::bigint, 't'::"char", '10'::text, '50'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: Shard intervals overlap for table "test_2": 1420001 and 1420000
|
||||
ROLLBACK;
|
||||
-- Now let's check valid pg_dist_object updates
|
||||
|
@ -637,7 +637,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ERROR: unrecognized object type "non_existing_type"
|
||||
ROLLBACK;
|
||||
-- check the sanity of distributionArgumentIndex and colocationId
|
||||
|
@ -652,7 +652,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ERROR: distribution_argument_index must be between 0 and 100
|
||||
ROLLBACK;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
|
@ -666,7 +666,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ERROR: colocationId must be a positive number
|
||||
ROLLBACK;
|
||||
-- check with non-existing object
|
||||
|
@ -681,10 +681,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ERROR: role "non_existing_user" does not exist
|
||||
ROLLBACK;
|
||||
-- since citus_internal_add_object_metadata is strict function returns NULL
|
||||
-- since citus_internal.add_object_metadata is strict function returns NULL
|
||||
-- if any parameter is NULL
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
|
@ -697,15 +697,15 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
citus_internal_add_object_metadata
|
||||
SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
add_object_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
\c - postgres - :worker_1_port
|
||||
-- Show that citus_internal_add_object_metadata only works for object types
|
||||
-- Show that citus_internal.add_object_metadata only works for object types
|
||||
-- which is known how to distribute
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
|
@ -724,10 +724,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
SET ROLE metadata_sync_helper_role;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('operator', ARRAY['===']::text[], ARRAY['int','int']::text[], -1, 0, false))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ERROR: operator object can not be distributed by Citus
|
||||
ROLLBACK;
|
||||
-- Show that citus_internal_add_object_metadata checks the priviliges
|
||||
-- Show that citus_internal.add_object_metadata checks the priviliges
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
assign_distributed_transaction_id
|
||||
|
@ -744,7 +744,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
SET ROLE metadata_sync_helper_role;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0, false))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ERROR: must be owner of function distribution_test_function
|
||||
ROLLBACK;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
|
@ -761,7 +761,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
SET ROLE metadata_sync_helper_role;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('type', ARRAY['distributed_test_type']::text[], ARRAY[]::text[], -1, 0, false))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ERROR: must be owner of type distributed_test_type
|
||||
ROLLBACK;
|
||||
-- we do not allow wrong partmethod
|
||||
|
@ -780,7 +780,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text),
|
||||
('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: Metadata syncing is only allowed for hash, reference and local tables: X
|
||||
ROLLBACK;
|
||||
-- we do not allow NULL shardMinMax values
|
||||
|
@ -797,8 +797,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
citus_internal_add_shard_metadata
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
add_shard_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -807,7 +807,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 1420000;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: Shards of has distributed table "test_2" cannot have NULL shard ranges
|
||||
ROLLBACK;
|
||||
\c - metadata_sync_helper_role - :worker_1_port
|
||||
|
@ -830,8 +830,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
('test_2'::regclass, 1420004::bigint, 't'::"char", '51'::text, '60'::text),
|
||||
('test_2'::regclass, 1420005::bigint, 't'::"char", '61'::text, '70'::text),
|
||||
('test_3'::regclass, 1420008::bigint, 't'::"char", '11'::text, '20'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
citus_internal_add_shard_metadata
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
add_shard_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
|
@ -852,7 +852,7 @@ BEGIN;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
|
||||
SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251);
|
||||
ERROR: cannot colocate tables test_2 and test_3
|
||||
ROLLBACK;
|
||||
-- now, add few more shards for test_3 to make it colocated with test_2
|
||||
|
@ -871,8 +871,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
('test_3'::regclass, 1420011::bigint, 't'::"char", '41'::text, '50'::text),
|
||||
('test_3'::regclass, 1420012::bigint, 't'::"char", '51'::text, '60'::text),
|
||||
('test_3'::regclass, 1420013::bigint, 't'::"char", '61'::text, '70'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
citus_internal_add_shard_metadata
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
add_shard_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
|
@ -894,7 +894,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: Shards of reference or local table "test_ref" should have NULL shard ranges
|
||||
ROLLBACK;
|
||||
-- reference tables cannot have multiple shards
|
||||
|
@ -910,7 +910,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL),
|
||||
('test_ref'::regclass, 1420007::bigint, 't'::"char", NULL, NULL))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: relation "test_ref" has already at least one shard, adding more is not allowed
|
||||
ROLLBACK;
|
||||
-- finally, add a shard for reference tables
|
||||
|
@ -925,8 +925,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
citus_internal_add_shard_metadata
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
add_shard_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -946,8 +946,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
citus_internal_add_shard_metadata
|
||||
SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
add_shard_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -966,9 +966,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint))
|
||||
SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS
|
||||
(VALUES (-10, 0::bigint, 1::int, 1500000::bigint))
|
||||
SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
ERROR: could not find valid entry for shard xxxxx
|
||||
ROLLBACK;
|
||||
-- invalid placementid
|
||||
|
@ -983,7 +983,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420000, 0::bigint, 1::int, -10))
|
||||
SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
ERROR: Shard placement has invalid placement id (-10) for shard(1420000)
|
||||
ROLLBACK;
|
||||
-- non-existing shard
|
||||
|
@ -998,7 +998,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS
|
||||
(VALUES (1430100, 0::bigint, 1::int, 10))
|
||||
SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
ERROR: could not find valid entry for shard xxxxx
|
||||
ROLLBACK;
|
||||
-- non-existing node with non-existing node-id 123123123
|
||||
|
@ -1013,7 +1013,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS
|
||||
(VALUES ( 1420000, 0::bigint, 123123123::int, 1500000))
|
||||
SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
ERROR: Node with group id 123123123 for shard placement xxxxx does not exist
|
||||
ROLLBACK;
|
||||
-- create a volatile function that returns the local node id
|
||||
|
@ -1044,7 +1044,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
WITH placement_data(shardid, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420000, 0::bigint, get_node_id(), 1500000),
|
||||
(1420000, 0::bigint, get_node_id(), 1500001))
|
||||
SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
ERROR: duplicate key value violates unique constraint "placement_shardid_groupid_unique_index"
|
||||
ROLLBACK;
|
||||
-- shard is not owned by us
|
||||
|
@ -1059,7 +1059,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420007, 0::bigint, get_node_id(), 1500000))
|
||||
SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
ERROR: must be owner of table super_user_table
|
||||
ROLLBACK;
|
||||
-- sucessfully add placements
|
||||
|
@ -1085,8 +1085,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1420011, 0::bigint, get_node_id(), 1500009),
|
||||
(1420012, 0::bigint, get_node_id(), 1500010),
|
||||
(1420013, 0::bigint, get_node_id(), 1500011))
|
||||
SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
citus_internal_add_placement_metadata
|
||||
SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
add_placement_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
|
@ -1112,8 +1112,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
|
||||
citus_internal_update_relation_colocation
|
||||
SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251);
|
||||
update_relation_colocation
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -1130,7 +1130,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000);
|
||||
SELECT citus_internal.update_placement_metadata(1420000, get_node_id(), get_node_id()+1000);
|
||||
ERROR: Node with group id 1014 for shard placement xxxxx does not exist
|
||||
COMMIT;
|
||||
-- fails because the source node doesn't contain the shard
|
||||
|
@ -1143,7 +1143,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id());
|
||||
SELECT citus_internal.update_placement_metadata(1420000, get_node_id()+10000, get_node_id());
|
||||
ERROR: Active placement for shard xxxxx is not found on group:14
|
||||
COMMIT;
|
||||
-- fails because shard does not exist
|
||||
|
@ -1156,7 +1156,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1);
|
||||
SELECT citus_internal.update_placement_metadata(0, get_node_id(), get_node_id()+1);
|
||||
ERROR: Shard id does not exists: 0
|
||||
COMMIT;
|
||||
-- fails because none-existing shard
|
||||
|
@ -1169,7 +1169,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1);
|
||||
SELECT citus_internal.update_placement_metadata(213123123123, get_node_id(), get_node_id()+1);
|
||||
ERROR: Shard id does not exists: 213123123123
|
||||
COMMIT;
|
||||
-- fails because we do not own the shard
|
||||
|
@ -1182,7 +1182,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1);
|
||||
SELECT citus_internal.update_placement_metadata(1420007, get_node_id(), get_node_id()+1);
|
||||
ERROR: must be owner of table super_user_table
|
||||
COMMIT;
|
||||
-- the user only allowed to delete their own shards
|
||||
|
@ -1197,7 +1197,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420007))
|
||||
SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data;
|
||||
SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data;
|
||||
ERROR: must be owner of table super_user_table
|
||||
ROLLBACK;
|
||||
-- the user cannot delete non-existing shards
|
||||
|
@ -1212,7 +1212,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420100))
|
||||
SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data;
|
||||
SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data;
|
||||
ERROR: Shard id does not exists: 1420100
|
||||
ROLLBACK;
|
||||
-- sucessfully delete shards
|
||||
|
@ -1239,8 +1239,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420000))
|
||||
SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data;
|
||||
citus_internal_delete_shard_metadata
|
||||
SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data;
|
||||
delete_shard_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -1274,7 +1274,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET repmodel = 't'
|
||||
WHERE logicalrelid = 'test_2'::regclass;
|
||||
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
|
||||
SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251);
|
||||
ERROR: cannot colocate tables test_2 and test_3
|
||||
ROLLBACK;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
|
@ -1298,7 +1298,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
|
||||
WHERE logicalrelid = 'test_2'::regclass;
|
||||
\endif
|
||||
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
|
||||
SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251);
|
||||
ERROR: cannot colocate tables test_2 and test_3
|
||||
ROLLBACK;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
|
@ -1313,7 +1313,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET partmethod = ''
|
||||
WHERE logicalrelid = 'test_2'::regclass;
|
||||
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
|
||||
SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251);
|
||||
ERROR: The relation "test_2" does not have a valid entry in pg_dist_partition.
|
||||
ROLLBACK;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
|
@ -1328,7 +1328,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET partmethod = 'a'
|
||||
WHERE logicalrelid = 'test_2'::regclass;
|
||||
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
|
||||
SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251);
|
||||
ERROR: Updating colocation ids are only allowed for hash and single shard distributed tables: a
|
||||
ROLLBACK;
|
||||
-- colocated hash distributed table should have the same dist key columns
|
||||
|
@ -1343,13 +1343,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_internal_add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's');
|
||||
ERROR: cannot colocate tables test_6 and test_5
|
||||
ROLLBACK;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
|
@ -1367,13 +1367,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
SELECT citus_internal.add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's');
|
||||
add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_internal_add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's');
|
||||
SELECT citus_internal.add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's');
|
||||
ERROR: cannot colocate tables test_8 and test_7
|
||||
ROLLBACK;
|
||||
-- we don't need the table/schema anymore
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue