Merge branch 'main' into run-gha-on-main

pull/7292/head
Jelte Fennema-Nio 2023-11-01 12:50:59 +01:00 committed by GitHub
commit 71da78dd9f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 377 additions and 106 deletions

View File

@ -24,6 +24,7 @@
#include "access/sysattr.h"
#include "access/xact.h"
#include "catalog/dependency.h"
#include "catalog/index.h"
#include "catalog/indexing.h"
#include "catalog/pg_authid.h"
#include "catalog/pg_constraint.h"
@ -88,11 +89,11 @@ static uint64 * AllocateUint64(uint64 value);
static void RecordDistributedRelationDependencies(Oid distributedRelationId);
static GroupShardPlacement * TupleToGroupShardPlacement(TupleDesc tupleDesc,
HeapTuple heapTuple);
static bool DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType,
bool failOnError, uint64 *tableSize);
static bool DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
SizeQueryType sizeQueryType, bool failOnError,
uint64 *tableSize);
static bool DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType,
bool failOnError, uint64 *relationSize);
static bool DistributedRelationSizeOnWorker(WorkerNode *workerNode, Oid relationId,
SizeQueryType sizeQueryType, bool failOnError,
uint64 *relationSize);
static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId);
static char * GenerateShardIdNameValuesForShardList(List *shardIntervalList,
bool firstValue);
@ -282,7 +283,7 @@ citus_shard_sizes(PG_FUNCTION_ARGS)
/*
* citus_total_relation_size accepts a table name and returns a distributed table
* citus_total_relation_size accepts a distributed table name and returns a distributed table
* and its indexes' total relation size.
*/
Datum
@ -294,20 +295,20 @@ citus_total_relation_size(PG_FUNCTION_ARGS)
bool failOnError = PG_GETARG_BOOL(1);
SizeQueryType sizeQueryType = TOTAL_RELATION_SIZE;
uint64 tableSize = 0;
uint64 relationSize = 0;
if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &tableSize))
if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize))
{
Assert(!failOnError);
PG_RETURN_NULL();
}
PG_RETURN_INT64(tableSize);
PG_RETURN_INT64(relationSize);
}
/*
* citus_table_size accepts a table name and returns a distributed table's total
* citus_table_size accepts a distributed table name and returns a distributed table's total
* relation size.
*/
Datum
@ -318,21 +319,24 @@ citus_table_size(PG_FUNCTION_ARGS)
Oid relationId = PG_GETARG_OID(0);
bool failOnError = true;
SizeQueryType sizeQueryType = TABLE_SIZE;
uint64 tableSize = 0;
uint64 relationSize = 0;
if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &tableSize))
/* We do not check if relation is really a table, like PostgreSQL is doing. */
if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize))
{
Assert(!failOnError);
PG_RETURN_NULL();
}
PG_RETURN_INT64(tableSize);
PG_RETURN_INT64(relationSize);
}
/*
* citus_relation_size accept a table name and returns a relation's 'main'
* citus_relation_size accept a distributed relation name and returns a relation's 'main'
* fork's size.
*
* Input relation is allowed to be an index on a distributed table too.
*/
Datum
citus_relation_size(PG_FUNCTION_ARGS)
@ -344,7 +348,7 @@ citus_relation_size(PG_FUNCTION_ARGS)
SizeQueryType sizeQueryType = RELATION_SIZE;
uint64 relationSize = 0;
if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &relationSize))
if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize))
{
Assert(!failOnError);
PG_RETURN_NULL();
@ -506,13 +510,16 @@ ReceiveShardIdAndSizeResults(List *connectionList, Tuplestorestate *tupleStore,
/*
* DistributedTableSize is helper function for each kind of citus size functions.
* It first checks whether the table is distributed and size query can be run on
* it. Connection to each node has to be established to get the size of the table.
* DistributedRelationSize is helper function for each kind of citus size
* functions. It first checks whether the relation is a distributed table or an
* index belonging to a distributed table and size query can be run on it.
* Connection to each node has to be established to get the size of the
* relation.
* Input relation is allowed to be an index on a distributed table too.
*/
static bool
DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnError,
uint64 *tableSize)
DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType,
bool failOnError, uint64 *relationSize)
{
int logLevel = WARNING;
@ -538,7 +545,7 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr
if (relation == NULL)
{
ereport(logLevel,
(errmsg("could not compute table size: relation does not exist")));
(errmsg("could not compute relation size: relation does not exist")));
return false;
}
@ -553,8 +560,9 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr
{
uint64 relationSizeOnNode = 0;
bool gotSize = DistributedTableSizeOnWorker(workerNode, relationId, sizeQueryType,
failOnError, &relationSizeOnNode);
bool gotSize = DistributedRelationSizeOnWorker(workerNode, relationId,
sizeQueryType,
failOnError, &relationSizeOnNode);
if (!gotSize)
{
return false;
@ -563,21 +571,22 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr
sumOfSizes += relationSizeOnNode;
}
*tableSize = sumOfSizes;
*relationSize = sumOfSizes;
return true;
}
/*
* DistributedTableSizeOnWorker gets the workerNode and relationId to calculate
* DistributedRelationSizeOnWorker gets the workerNode and relationId to calculate
* size of that relation on the given workerNode by summing up the size of each
* shard placement.
* Input relation is allowed to be an index on a distributed table too.
*/
static bool
DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
SizeQueryType sizeQueryType,
bool failOnError, uint64 *tableSize)
DistributedRelationSizeOnWorker(WorkerNode *workerNode, Oid relationId,
SizeQueryType sizeQueryType,
bool failOnError, uint64 *relationSize)
{
int logLevel = WARNING;
@ -591,6 +600,17 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
uint32 connectionFlag = 0;
PGresult *result = NULL;
/* if the relation is an index, update relationId and define indexId */
Oid indexId = InvalidOid;
Oid relKind = get_rel_relkind(relationId);
if (relKind == RELKIND_INDEX || relKind == RELKIND_PARTITIONED_INDEX)
{
indexId = relationId;
bool missingOk = false;
relationId = IndexGetRelation(indexId, missingOk);
}
List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode, relationId);
/*
@ -598,21 +618,22 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
* But citus size functions shouldn't include them, like PG.
*/
bool optimizePartitionCalculations = false;
StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(
StringInfo relationSizeQuery = GenerateSizeQueryOnMultiplePlacements(
shardIntervalsOnNode,
indexId,
sizeQueryType,
optimizePartitionCalculations);
MultiConnection *connection = GetNodeConnection(connectionFlag, workerNodeName,
workerNodePort);
int queryResult = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data,
int queryResult = ExecuteOptionalRemoteCommand(connection, relationSizeQuery->data,
&result);
if (queryResult != 0)
{
ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("could not connect to %s:%d to get size of "
"table \"%s\"",
"relation \"%s\"",
workerNodeName, workerNodePort,
get_rel_name(relationId))));
@ -626,19 +647,19 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
ClearResults(connection, failOnError);
ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("cannot parse size of table \"%s\" from %s:%d",
errmsg("cannot parse size of relation \"%s\" from %s:%d",
get_rel_name(relationId), workerNodeName,
workerNodePort)));
return false;
}
StringInfo tableSizeStringInfo = (StringInfo) linitial(sizeList);
char *tableSizeString = tableSizeStringInfo->data;
StringInfo relationSizeStringInfo = (StringInfo) linitial(sizeList);
char *relationSizeString = relationSizeStringInfo->data;
if (strlen(tableSizeString) > 0)
if (strlen(relationSizeString) > 0)
{
*tableSize = SafeStringToUint64(tableSizeString);
*relationSize = SafeStringToUint64(relationSizeString);
}
else
{
@ -647,7 +668,7 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
* being executed. For this case we get an empty string as table size.
* We can take that as zero to prevent any unnecessary errors.
*/
*tableSize = 0;
*relationSize = 0;
}
PQclear(result);
@ -732,7 +753,7 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId)
/*
* GenerateSizeQueryOnMultiplePlacements generates a select size query to get
* size of multiple tables. Note that, different size functions supported by PG
* size of multiple relations. Note that, different size functions supported by PG
* are also supported by this function changing the size query type given as the
* last parameter to function. Depending on the sizeQueryType enum parameter, the
* generated query will call one of the functions: pg_relation_size,
@ -740,9 +761,13 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId)
* This function uses UDFs named worker_partitioned_*_size for partitioned tables,
* if the parameter optimizePartitionCalculations is true. The UDF to be called is
* determined by the parameter sizeQueryType.
*
* indexId is provided if we're interested in the size of an index, not the whole
* table.
*/
StringInfo
GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
Oid indexId,
SizeQueryType sizeQueryType,
bool optimizePartitionCalculations)
{
@ -766,16 +791,20 @@ GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
*/
continue;
}
/* we need to build the shard relation name, being an index or table */
Oid objectId = OidIsValid(indexId) ? indexId : shardInterval->relationId;
uint64 shardId = shardInterval->shardId;
Oid schemaId = get_rel_namespace(shardInterval->relationId);
Oid schemaId = get_rel_namespace(objectId);
char *schemaName = get_namespace_name(schemaId);
char *shardName = get_rel_name(shardInterval->relationId);
char *shardName = get_rel_name(objectId);
AppendShardIdToName(&shardName, shardId);
char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
char *quotedShardName = quote_literal_cstr(shardQualifiedName);
/* for partitoned tables, we will call worker_partitioned_... size functions */
/* for partitioned tables, we will call worker_partitioned_... size functions */
if (optimizePartitionCalculations && PartitionedTable(shardInterval->relationId))
{
partitionedShardNames = lappend(partitionedShardNames, quotedShardName);
@ -1010,7 +1039,7 @@ AppendShardIdNameValues(StringInfo selectQuery, ShardInterval *shardInterval)
/*
* ErrorIfNotSuitableToGetSize determines whether the table is suitable to find
* ErrorIfNotSuitableToGetSize determines whether the relation is suitable to find
* its' size with internal functions.
*/
static void
@ -1018,11 +1047,32 @@ ErrorIfNotSuitableToGetSize(Oid relationId)
{
if (!IsCitusTable(relationId))
{
char *relationName = get_rel_name(relationId);
char *escapedQueryString = quote_literal_cstr(relationName);
ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("cannot calculate the size because relation %s is not "
"distributed", escapedQueryString)));
Oid relKind = get_rel_relkind(relationId);
if (relKind != RELKIND_INDEX && relKind != RELKIND_PARTITIONED_INDEX)
{
char *relationName = get_rel_name(relationId);
char *escapedRelationName = quote_literal_cstr(relationName);
ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg(
"cannot calculate the size because relation %s "
"is not distributed",
escapedRelationName)));
}
bool missingOk = false;
Oid indexId = relationId;
relationId = IndexGetRelation(relationId, missingOk);
if (!IsCitusTable(relationId))
{
char *tableName = get_rel_name(relationId);
char *escapedTableName = quote_literal_cstr(tableName);
char *indexName = get_rel_name(indexId);
char *escapedIndexName = quote_literal_cstr(indexName);
ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg(
"cannot calculate the size because table %s for "
"index %s is not distributed",
escapedTableName, escapedIndexName)));
}
}
}

View File

@ -792,7 +792,12 @@ ShardListSizeInBytes(List *shardList, char *workerNodeName, uint32
/* we skip child tables of a partitioned table if this boolean variable is true */
bool optimizePartitionCalculations = true;
/* we're interested in whole table, not a particular index */
Oid indexId = InvalidOid;
StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(shardList,
indexId,
TOTAL_RELATION_SIZE,
optimizePartitionCalculations);

View File

@ -90,6 +90,28 @@ activate_node_snapshot(PG_FUNCTION_ARGS)
}
/*
* IsMetadataSynced checks the workers to see if all workers with metadata are
* synced.
*/
static bool
IsMetadataSynced(void)
{
List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerList)
{
if (workerNode->hasMetadata && !workerNode->metadataSynced)
{
return false;
}
}
return true;
}
/*
* wait_until_metadata_sync waits until the maintenance daemon does a metadata
* sync, or times out.
@ -99,19 +121,10 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS)
{
uint32 timeout = PG_GETARG_UINT32(0);
List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock);
bool waitNotifications = false;
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerList)
{
/* if already has metadata, no need to do it again */
if (workerNode->hasMetadata && !workerNode->metadataSynced)
{
waitNotifications = true;
break;
}
}
/* First we start listening. */
MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION,
LOCAL_HOST_NAME, PostPortNumber);
ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL);
/*
* If all the metadata nodes have already been synced, we should not wait.
@ -119,15 +132,12 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS)
* the notification and we'd wait unnecessarily here. Worse, the test outputs
* might be inconsistent across executions due to the warning.
*/
if (!waitNotifications)
if (IsMetadataSynced())
{
CloseConnection(connection);
PG_RETURN_VOID();
}
MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION,
LOCAL_HOST_NAME, PostPortNumber);
ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL);
int waitFlags = WL_SOCKET_READABLE | WL_TIMEOUT | WL_POSTMASTER_DEATH;
int waitResult = WaitLatchOrSocket(NULL, waitFlags, PQsocket(connection->pgConn),
timeout, 0);
@ -139,7 +149,7 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS)
{
ClearResults(connection, true);
}
else if (waitResult & WL_TIMEOUT)
else if (waitResult & WL_TIMEOUT && !IsMetadataSynced())
{
elog(WARNING, "waiting for metadata sync timed out");
}

View File

@ -342,6 +342,7 @@ extern void LookupTaskPlacementHostAndPort(ShardPlacement *taskPlacement, char *
int *nodePort);
extern bool IsDummyPlacement(ShardPlacement *taskPlacement);
extern StringInfo GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
Oid indexId,
SizeQueryType sizeQueryType,
bool optimizePartitionCalculations);
extern List * RemoveCoordinatorPlacementIfNotSingleNode(List *placementList);

View File

@ -175,6 +175,7 @@ DEPS = {
),
"grant_on_schema_propagation": TestDeps("minimal_schedule"),
"propagate_extension_commands": TestDeps("minimal_schedule"),
"multi_size_queries": TestDeps("base_schedule", ["multi_copy"]),
}

View File

@ -226,7 +226,7 @@ step s1-drop: DROP TABLE drop_hash;
step s2-table-size: SELECT citus_total_relation_size('drop_hash'); <waiting ...>
step s1-commit: COMMIT;
step s2-table-size: <... completed>
ERROR: could not compute table size: relation does not exist
ERROR: could not compute relation size: relation does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist

View File

@ -90,7 +90,7 @@ SELECT citus_disable_node('localhost', :worker_2_port);
(1 row)
SELECT public.wait_until_metadata_sync(60000);
SELECT public.wait_until_metadata_sync(20000);
wait_until_metadata_sync
---------------------------------------------------------------------
@ -812,7 +812,7 @@ SELECT citus_disable_node('localhost', 9999);
(1 row)
SELECT public.wait_until_metadata_sync(60000);
SELECT public.wait_until_metadata_sync(20000);
wait_until_metadata_sync
---------------------------------------------------------------------
@ -1258,3 +1258,9 @@ SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHER
t
(1 row)
-- Grant all on public schema to public
--
-- That's the default on Postgres versions < 15 and we want to
-- keep permissions compatible accross versions, in regression
-- tests.
GRANT ALL ON SCHEMA public TO PUBLIC;

View File

@ -7,19 +7,25 @@
SET citus.next_shard_id TO 1390000;
-- Tests with invalid relation IDs
SELECT citus_table_size(1);
ERROR: could not compute table size: relation does not exist
ERROR: could not compute relation size: relation does not exist
SELECT citus_relation_size(1);
ERROR: could not compute table size: relation does not exist
ERROR: could not compute relation size: relation does not exist
SELECT citus_total_relation_size(1);
ERROR: could not compute table size: relation does not exist
ERROR: could not compute relation size: relation does not exist
-- Tests with non-distributed table
CREATE TABLE non_distributed_table (x int);
CREATE TABLE non_distributed_table (x int primary key);
SELECT citus_table_size('non_distributed_table');
ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed
SELECT citus_relation_size('non_distributed_table');
ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed
SELECT citus_total_relation_size('non_distributed_table');
ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed
SELECT citus_table_size('non_distributed_table_pkey');
ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed
SELECT citus_relation_size('non_distributed_table_pkey');
ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed
SELECT citus_total_relation_size('non_distributed_table_pkey');
ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed
DROP TABLE non_distributed_table;
-- fix broken placements via disabling the node
SET client_min_messages TO ERROR;
@ -31,24 +37,70 @@ SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2,
-- Tests on distributed table with replication factor > 1
VACUUM (FULL) lineitem_hash_part;
SELECT citus_table_size('lineitem_hash_part');
citus_table_size
SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part');
?column?
---------------------------------------------------------------------
3801088
t
(1 row)
SELECT citus_relation_size('lineitem_hash_part');
citus_relation_size
SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part');
?column?
---------------------------------------------------------------------
3801088
t
(1 row)
SELECT citus_total_relation_size('lineitem_hash_part');
citus_total_relation_size
SELECT citus_relation_size('lineitem_hash_part') > 0;
?column?
---------------------------------------------------------------------
3801088
t
(1 row)
CREATE INDEX lineitem_hash_part_idx ON lineitem_hash_part(l_orderkey);
VACUUM (FULL) lineitem_hash_part;
SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part');
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part');
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_relation_size('lineitem_hash_part') > 0;
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_relation_size('lineitem_hash_part_idx') <= citus_table_size('lineitem_hash_part_idx');
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_table_size('lineitem_hash_part_idx') <= citus_total_relation_size('lineitem_hash_part_idx');
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_relation_size('lineitem_hash_part_idx') > 0;
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_total_relation_size('lineitem_hash_part') >=
citus_table_size('lineitem_hash_part') + citus_table_size('lineitem_hash_part_idx');
?column?
---------------------------------------------------------------------
t
(1 row)
DROP INDEX lineitem_hash_part_idx;
VACUUM (FULL) customer_copy_hash;
-- Tests on distributed tables with streaming replication.
SELECT citus_table_size('customer_copy_hash');
@ -72,10 +124,10 @@ SELECT citus_total_relation_size('customer_copy_hash');
-- Make sure we can get multiple sizes in a single query
SELECT citus_table_size('customer_copy_hash'),
citus_table_size('customer_copy_hash'),
citus_table_size('supplier');
citus_table_size('customer_copy_hash');
citus_table_size | citus_table_size | citus_table_size
---------------------------------------------------------------------
548864 | 548864 | 655360
548864 | 548864 | 548864
(1 row)
CREATE INDEX index_1 on customer_copy_hash(c_custkey);
@ -99,6 +151,24 @@ SELECT citus_total_relation_size('customer_copy_hash');
2646016
(1 row)
SELECT citus_table_size('index_1');
citus_table_size
---------------------------------------------------------------------
1048576
(1 row)
SELECT citus_relation_size('index_1');
citus_relation_size
---------------------------------------------------------------------
1048576
(1 row)
SELECT citus_total_relation_size('index_1');
citus_total_relation_size
---------------------------------------------------------------------
1048576
(1 row)
-- Tests on reference table
VACUUM (FULL) supplier;
SELECT citus_table_size('supplier');
@ -139,6 +209,74 @@ SELECT citus_total_relation_size('supplier');
688128
(1 row)
SELECT citus_table_size('index_2');
citus_table_size
---------------------------------------------------------------------
122880
(1 row)
SELECT citus_relation_size('index_2');
citus_relation_size
---------------------------------------------------------------------
122880
(1 row)
SELECT citus_total_relation_size('index_2');
citus_total_relation_size
---------------------------------------------------------------------
122880
(1 row)
-- Test on partitioned table
CREATE TABLE split_me (dist_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
CREATE INDEX ON split_me(dist_col);
-- create 2 partitions
CREATE TABLE m PARTITION OF split_me FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
CREATE TABLE e PARTITION OF split_me FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
INSERT INTO split_me SELECT 1, '2018-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 360) i;
INSERT INTO split_me SELECT 2, '2019-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 180) i;
-- before citus
SELECT citus_relation_size('split_me');
ERROR: cannot calculate the size because relation 'split_me' is not distributed
SELECT citus_relation_size('split_me_dist_col_idx');
ERROR: cannot calculate the size because table 'split_me' for index 'split_me_dist_col_idx' is not distributed
SELECT citus_relation_size('m');
ERROR: cannot calculate the size because relation 'm' is not distributed
SELECT citus_relation_size('m_dist_col_idx');
ERROR: cannot calculate the size because table 'm' for index 'm_dist_col_idx' is not distributed
-- distribute the table(s)
SELECT create_distributed_table('split_me', 'dist_col');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- after citus
SELECT citus_relation_size('split_me');
citus_relation_size
---------------------------------------------------------------------
0
(1 row)
SELECT citus_relation_size('split_me_dist_col_idx');
citus_relation_size
---------------------------------------------------------------------
0
(1 row)
SELECT citus_relation_size('m');
citus_relation_size
---------------------------------------------------------------------
32768
(1 row)
SELECT citus_relation_size('m_dist_col_idx');
citus_relation_size
---------------------------------------------------------------------
81920
(1 row)
DROP TABLE split_me;
-- Test inside the transaction
BEGIN;
ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL;

View File

@ -133,12 +133,6 @@ ORDER BY 1, 2;
validatable_constraint_8000016 | t
(10 rows)
DROP TABLE constrained_table;
DROP TABLE referenced_table CASCADE;
DROP TABLE referencing_table;
SET client_min_messages TO WARNING;
DROP SCHEMA validate_constraint CASCADE;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to type constraint_validity
drop cascades to view constraint_validations_in_workers
drop cascades to view constraint_validations
SET search_path TO DEFAULT;

View File

@ -201,7 +201,8 @@ test: citus_copy_shard_placement
# multi_utilities cannot be run in parallel with other tests because it checks
# global locks
test: multi_utilities
test: foreign_key_to_reference_table validate_constraint
test: foreign_key_to_reference_table
test: validate_constraint
test: multi_repartition_udt multi_repartitioned_subquery_udf multi_subtransactions
test: multi_modifying_xacts
@ -297,7 +298,8 @@ test: replicate_reference_tables_to_coordinator
test: citus_local_tables
test: mixed_relkind_tests
test: multi_row_router_insert create_distributed_table_concurrently
test: multi_reference_table citus_local_tables_queries
test: multi_reference_table
test: citus_local_tables_queries
test: citus_local_table_triggers
test: coordinator_shouldhaveshards
test: local_shard_utility_command_execution

View File

@ -154,7 +154,8 @@ test: multi_outer_join
# ---
test: multi_complex_count_distinct
test: multi_upsert multi_simple_queries
test: foreign_key_to_reference_table validate_constraint
test: foreign_key_to_reference_table
test: validate_constraint
# ---------
# creates hash and range-partitioned tables and performs COPY

View File

@ -150,7 +150,9 @@ test: multi_outer_join
test: multi_create_fdw
test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list
test: multi_upsert multi_simple_queries multi_data_types
test: multi_utilities foreign_key_to_reference_table validate_constraint
test: multi_utilities
test: foreign_key_to_reference_table
test: validate_constraint
test: multi_repartition_udt multi_repartitioned_subquery_udf
# ---------

View File

@ -39,7 +39,7 @@ SELECT master_get_active_worker_nodes();
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
SELECT citus_disable_node('localhost', :worker_2_port);
SELECT public.wait_until_metadata_sync(60000);
SELECT public.wait_until_metadata_sync(20000);
SELECT master_get_active_worker_nodes();
-- add some shard placements to the cluster
@ -328,7 +328,7 @@ SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_g
SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary');
SELECT master_activate_node('localhost', 9999);
SELECT citus_disable_node('localhost', 9999);
SELECT public.wait_until_metadata_sync(60000);
SELECT public.wait_until_metadata_sync(20000);
SELECT master_remove_node('localhost', 9999);
-- check that you can't manually add two primaries to a group
@ -530,3 +530,10 @@ RESET citus.metadata_sync_mode;
-- verify that at the end of this file, all primary nodes have metadata synced
SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
-- Grant all on public schema to public
--
-- That's the default on Postgres versions < 15 and we want to
-- keep permissions compatible accross versions, in regression
-- tests.
GRANT ALL ON SCHEMA public TO PUBLIC;

View File

@ -13,10 +13,15 @@ SELECT citus_relation_size(1);
SELECT citus_total_relation_size(1);
-- Tests with non-distributed table
CREATE TABLE non_distributed_table (x int);
CREATE TABLE non_distributed_table (x int primary key);
SELECT citus_table_size('non_distributed_table');
SELECT citus_relation_size('non_distributed_table');
SELECT citus_total_relation_size('non_distributed_table');
SELECT citus_table_size('non_distributed_table_pkey');
SELECT citus_relation_size('non_distributed_table_pkey');
SELECT citus_total_relation_size('non_distributed_table_pkey');
DROP TABLE non_distributed_table;
-- fix broken placements via disabling the node
@ -26,9 +31,25 @@ SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2,
-- Tests on distributed table with replication factor > 1
VACUUM (FULL) lineitem_hash_part;
SELECT citus_table_size('lineitem_hash_part');
SELECT citus_relation_size('lineitem_hash_part');
SELECT citus_total_relation_size('lineitem_hash_part');
SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part');
SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part');
SELECT citus_relation_size('lineitem_hash_part') > 0;
CREATE INDEX lineitem_hash_part_idx ON lineitem_hash_part(l_orderkey);
VACUUM (FULL) lineitem_hash_part;
SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part');
SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part');
SELECT citus_relation_size('lineitem_hash_part') > 0;
SELECT citus_relation_size('lineitem_hash_part_idx') <= citus_table_size('lineitem_hash_part_idx');
SELECT citus_table_size('lineitem_hash_part_idx') <= citus_total_relation_size('lineitem_hash_part_idx');
SELECT citus_relation_size('lineitem_hash_part_idx') > 0;
SELECT citus_total_relation_size('lineitem_hash_part') >=
citus_table_size('lineitem_hash_part') + citus_table_size('lineitem_hash_part_idx');
DROP INDEX lineitem_hash_part_idx;
VACUUM (FULL) customer_copy_hash;
@ -40,7 +61,7 @@ SELECT citus_total_relation_size('customer_copy_hash');
-- Make sure we can get multiple sizes in a single query
SELECT citus_table_size('customer_copy_hash'),
citus_table_size('customer_copy_hash'),
citus_table_size('supplier');
citus_table_size('customer_copy_hash');
CREATE INDEX index_1 on customer_copy_hash(c_custkey);
VACUUM (FULL) customer_copy_hash;
@ -50,6 +71,10 @@ SELECT citus_table_size('customer_copy_hash');
SELECT citus_relation_size('customer_copy_hash');
SELECT citus_total_relation_size('customer_copy_hash');
SELECT citus_table_size('index_1');
SELECT citus_relation_size('index_1');
SELECT citus_total_relation_size('index_1');
-- Tests on reference table
VACUUM (FULL) supplier;
@ -64,6 +89,38 @@ SELECT citus_table_size('supplier');
SELECT citus_relation_size('supplier');
SELECT citus_total_relation_size('supplier');
SELECT citus_table_size('index_2');
SELECT citus_relation_size('index_2');
SELECT citus_total_relation_size('index_2');
-- Test on partitioned table
CREATE TABLE split_me (dist_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
CREATE INDEX ON split_me(dist_col);
-- create 2 partitions
CREATE TABLE m PARTITION OF split_me FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
CREATE TABLE e PARTITION OF split_me FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
INSERT INTO split_me SELECT 1, '2018-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 360) i;
INSERT INTO split_me SELECT 2, '2019-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 180) i;
-- before citus
SELECT citus_relation_size('split_me');
SELECT citus_relation_size('split_me_dist_col_idx');
SELECT citus_relation_size('m');
SELECT citus_relation_size('m_dist_col_idx');
-- distribute the table(s)
SELECT create_distributed_table('split_me', 'dist_col');
-- after citus
SELECT citus_relation_size('split_me');
SELECT citus_relation_size('split_me_dist_col_idx');
SELECT citus_relation_size('m');
SELECT citus_relation_size('m_dist_col_idx');
DROP TABLE split_me;
-- Test inside the transaction
BEGIN;
ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL;

View File

@ -116,9 +116,6 @@ SELECT *
FROM constraint_validations_in_workers
ORDER BY 1, 2;
DROP TABLE constrained_table;
DROP TABLE referenced_table CASCADE;
DROP TABLE referencing_table;
SET client_min_messages TO WARNING;
DROP SCHEMA validate_constraint CASCADE;
SET search_path TO DEFAULT;