Merge branch 'main' into create_alter_database

pull/7240/head
Gürkan İndibay 2023-11-01 13:30:09 +03:00 committed by GitHub
commit 595d078f95
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 365 additions and 1326 deletions

File diff suppressed because it is too large Load Diff

View File

@ -27,9 +27,9 @@ jobs:
style_checker_image_name: "citus/stylechecker" style_checker_image_name: "citus/stylechecker"
style_checker_tools_version: "0.8.18" style_checker_tools_version: "0.8.18"
image_suffix: "-v9d71045" image_suffix: "-v9d71045"
pg14_version: "14.9" pg14_version: '{ "major": "14", "full": "14.9" }'
pg15_version: "15.4" pg15_version: '{ "major": "15", "full": "15.4" }'
pg16_version: "16.0" pg16_version: '{ "major": "16", "full": "16.0" }'
upgrade_pg_versions: "14.9-15.4-16.0" upgrade_pg_versions: "14.9-15.4-16.0"
steps: steps:
# Since GHA jobs needs at least one step we use a noop step here. # Since GHA jobs needs at least one step we use a noop step here.
@ -93,7 +93,7 @@ jobs:
run: ci/check_migration_files.sh run: ci/check_migration_files.sh
build: build:
needs: params needs: params
name: Build for PG ${{ matrix.pg_version}} name: Build for PG${{ fromJson(matrix.pg_version).major }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -107,7 +107,7 @@ jobs:
- ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg16_version }}
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: container:
image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ matrix.image_suffix }}" image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
options: --user root options: --user root
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v3.5.0
@ -124,7 +124,7 @@ jobs:
./build-${{ env.PG_MAJOR }}/* ./build-${{ env.PG_MAJOR }}/*
./install-${{ env.PG_MAJOR }}.tar ./install-${{ env.PG_MAJOR }}.tar
test-citus: test-citus:
name: PG${{ matrix.pg_version }} - ${{ matrix.make }} name: PG${{ fromJson(matrix.pg_version).major }} - ${{ matrix.make }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -211,7 +211,7 @@ jobs:
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: container:
image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ needs.params.outputs.image_suffix }}" image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root --dns=8.8.8.8 options: --user root --dns=8.8.8.8
# Due to Github creates a default network for each job, we need to use # Due to Github creates a default network for each job, we need to use
# --dns= to have similar DNS settings as our other CI systems or local # --dns= to have similar DNS settings as our other CI systems or local
@ -228,17 +228,17 @@ jobs:
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with: with:
folder: ${{ matrix.pg_version }}_${{ matrix.make }} folder: ${{ fromJson(matrix.pg_version).major }}_${{ matrix.make }}
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.PG_MAJOR }}_${{ matrix.suite }}_${{ matrix.make }} flags: ${{ env.PG_MAJOR }}_${{ matrix.suite }}_${{ matrix.make }}
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-arbitrary-configs: test-arbitrary-configs:
name: PG${{ matrix.pg_version }} - check-arbitrary-configs-${{ matrix.parallel }} name: PG${{ fromJson(matrix.pg_version).major }} - check-arbitrary-configs-${{ matrix.parallel }}
runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"] runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"]
container: container:
image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ needs.params.outputs.image_suffix }}" image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root options: --user root
needs: needs:
- params - params
@ -333,10 +333,10 @@ jobs:
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-citus-upgrade: test-citus-upgrade:
name: PG${{ needs.params.outputs.pg14_version }} - check-citus-upgrade name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: container:
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ needs.params.outputs.pg14_version }}${{ needs.params.outputs.image_suffix }}" image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root options: --user root
needs: needs:
- params - params
@ -383,7 +383,7 @@ jobs:
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }} CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: container:
image: ${{ needs.params.outputs.test_image_name }}:${{ needs.params.outputs.pg16_version }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }}
needs: needs:
- params - params
- test-citus - test-citus
@ -478,7 +478,7 @@ jobs:
name: Test flakyness name: Test flakyness
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: container:
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ needs.params.outputs.pg16_version }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }}
options: --user root options: --user root
env: env:
runs: 8 runs: 8

View File

@ -24,9 +24,11 @@ jobs:
- name: Get Postgres Versions - name: Get Postgres Versions
id: get-postgres-versions id: get-postgres-versions
run: | run: |
# Postgres versions are stored in .github/workflows/build_and_test.yml file in "pg[pg-version]_version" set -euxo pipefail
# format. Below command extracts the versions and get the unique values. # Postgres versions are stored in .github/workflows/build_and_test.yml
pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE 'pg[0-9]+_version: "[0-9.]+"' | sed -E 's/pg([0-9]+)_version: "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',') # file in json strings with major and full keys.
# Below command extracts the versions and get the unique values.
pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE '"major": "[0-9]+", "full": "[0-9.]+"' | sed -E 's/"major": "([0-9]+)", "full": "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',')
pg_versions_array="[ ${pg_versions} ]" pg_versions_array="[ ${pg_versions} ]"
echo "Supported PG Versions: ${pg_versions_array}" echo "Supported PG Versions: ${pg_versions_array}"
# Below line is needed to set the output variable to be used in the next job # Below line is needed to set the output variable to be used in the next job

View File

@ -14,8 +14,8 @@ ci_scripts=$(
grep -v -E '^(ci_helpers.sh|fix_style.sh)$' grep -v -E '^(ci_helpers.sh|fix_style.sh)$'
) )
for script in $ci_scripts; do for script in $ci_scripts; do
if ! grep "\\bci/$script\\b" .circleci/config.yml > /dev/null; then if ! grep "\\bci/$script\\b" -r .github > /dev/null; then
echo "ERROR: CI script with name \"$script\" is not actually used in .circleci/config.yml" echo "ERROR: CI script with name \"$script\" is not actually used in .github folder"
exit 1 exit 1
fi fi
if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then

View File

@ -1,96 +0,0 @@
#!/bin/bash
# Testing this script locally requires you to set the following environment
# variables:
# CIRCLE_BRANCH, GIT_USERNAME and GIT_TOKEN
# fail if trying to reference a variable that is not set.
set -u
# exit immediately if a command fails
set -e
# Fail on pipe failures
set -o pipefail
PR_BRANCH="${CIRCLE_BRANCH}"
ENTERPRISE_REMOTE="https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/citusdata/citus-enterprise"
# shellcheck disable=SC1091
source ci/ci_helpers.sh
# List executed commands. This is done so debugging this script is easier when
# it fails. It's explicitly done after git remote add so username and password
# are not shown in CI output (even though it's also filtered out by CircleCI)
set -x
check_compile () {
echo "INFO: checking if merged code can be compiled"
./configure --without-libcurl
make -j10
}
# Clone current git repo (which should be community) to a temporary working
# directory and go there
GIT_DIR_ROOT="$(git rev-parse --show-toplevel)"
TMP_GIT_DIR="$(mktemp --directory -t citus-merge-check.XXXXXXXXX)"
git clone "$GIT_DIR_ROOT" "$TMP_GIT_DIR"
cd "$TMP_GIT_DIR"
# Fails in CI without this
git config user.email "citus-bot@microsoft.com"
git config user.name "citus bot"
# Disable "set -x" temporarily, because $ENTERPRISE_REMOTE contains passwords
{ set +x ; } 2> /dev/null
git remote add enterprise "$ENTERPRISE_REMOTE"
set -x
git remote set-url --push enterprise no-pushing
# Fetch enterprise-master
git fetch enterprise enterprise-master
git checkout "enterprise/enterprise-master"
if git merge --no-commit "origin/$PR_BRANCH"; then
echo "INFO: community PR branch could be merged into enterprise-master"
# check that we can compile after the merge
if check_compile; then
exit 0
fi
echo "WARN: Failed to compile after community PR branch was merged into enterprise"
fi
# undo partial merge
git merge --abort
# If we have a conflict on enterprise merge on the master branch, we have a problem.
# Provide an error message to indicate that enterprise merge is needed to fix this check.
if [[ $PR_BRANCH = master ]]; then
echo "ERROR: Master branch has merge conflicts with enterprise-master."
echo "Try re-running this CI job after merging your changes into enterprise-master."
exit 1
fi
if ! git fetch enterprise "$PR_BRANCH" ; then
echo "ERROR: enterprise/$PR_BRANCH was not found and community PR branch could not be merged into enterprise-master"
exit 1
fi
# Show the top commit of the enterprise PR branch to make debugging easier
git log -n 1 "enterprise/$PR_BRANCH"
# Check that this branch contains the top commit of the current community PR
# branch. If it does not it means it's not up to date with the current PR, so
# the enterprise branch should be updated.
if ! git merge-base --is-ancestor "origin/$PR_BRANCH" "enterprise/$PR_BRANCH" ; then
echo "ERROR: enterprise/$PR_BRANCH is not up to date with community PR branch"
exit 1
fi
# Now check if we can merge the enterprise PR into enterprise-master without
# issues.
git merge --no-commit "enterprise/$PR_BRANCH"
# check that we can compile after the merge
check_compile

View File

@ -24,6 +24,7 @@
#include "access/sysattr.h" #include "access/sysattr.h"
#include "access/xact.h" #include "access/xact.h"
#include "catalog/dependency.h" #include "catalog/dependency.h"
#include "catalog/index.h"
#include "catalog/indexing.h" #include "catalog/indexing.h"
#include "catalog/pg_authid.h" #include "catalog/pg_authid.h"
#include "catalog/pg_constraint.h" #include "catalog/pg_constraint.h"
@ -88,11 +89,11 @@ static uint64 * AllocateUint64(uint64 value);
static void RecordDistributedRelationDependencies(Oid distributedRelationId); static void RecordDistributedRelationDependencies(Oid distributedRelationId);
static GroupShardPlacement * TupleToGroupShardPlacement(TupleDesc tupleDesc, static GroupShardPlacement * TupleToGroupShardPlacement(TupleDesc tupleDesc,
HeapTuple heapTuple); HeapTuple heapTuple);
static bool DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, static bool DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType,
bool failOnError, uint64 *tableSize); bool failOnError, uint64 *relationSize);
static bool DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, static bool DistributedRelationSizeOnWorker(WorkerNode *workerNode, Oid relationId,
SizeQueryType sizeQueryType, bool failOnError, SizeQueryType sizeQueryType, bool failOnError,
uint64 *tableSize); uint64 *relationSize);
static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId); static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId);
static char * GenerateShardIdNameValuesForShardList(List *shardIntervalList, static char * GenerateShardIdNameValuesForShardList(List *shardIntervalList,
bool firstValue); bool firstValue);
@ -282,7 +283,7 @@ citus_shard_sizes(PG_FUNCTION_ARGS)
/* /*
* citus_total_relation_size accepts a table name and returns a distributed table * citus_total_relation_size accepts a distributed table name and returns a distributed table
* and its indexes' total relation size. * and its indexes' total relation size.
*/ */
Datum Datum
@ -294,20 +295,20 @@ citus_total_relation_size(PG_FUNCTION_ARGS)
bool failOnError = PG_GETARG_BOOL(1); bool failOnError = PG_GETARG_BOOL(1);
SizeQueryType sizeQueryType = TOTAL_RELATION_SIZE; SizeQueryType sizeQueryType = TOTAL_RELATION_SIZE;
uint64 tableSize = 0; uint64 relationSize = 0;
if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &tableSize)) if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize))
{ {
Assert(!failOnError); Assert(!failOnError);
PG_RETURN_NULL(); PG_RETURN_NULL();
} }
PG_RETURN_INT64(tableSize); PG_RETURN_INT64(relationSize);
} }
/* /*
* citus_table_size accepts a table name and returns a distributed table's total * citus_table_size accepts a distributed table name and returns a distributed table's total
* relation size. * relation size.
*/ */
Datum Datum
@ -318,21 +319,24 @@ citus_table_size(PG_FUNCTION_ARGS)
Oid relationId = PG_GETARG_OID(0); Oid relationId = PG_GETARG_OID(0);
bool failOnError = true; bool failOnError = true;
SizeQueryType sizeQueryType = TABLE_SIZE; SizeQueryType sizeQueryType = TABLE_SIZE;
uint64 tableSize = 0; uint64 relationSize = 0;
if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &tableSize)) /* We do not check if relation is really a table, like PostgreSQL is doing. */
if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize))
{ {
Assert(!failOnError); Assert(!failOnError);
PG_RETURN_NULL(); PG_RETURN_NULL();
} }
PG_RETURN_INT64(tableSize); PG_RETURN_INT64(relationSize);
} }
/* /*
* citus_relation_size accept a table name and returns a relation's 'main' * citus_relation_size accept a distributed relation name and returns a relation's 'main'
* fork's size. * fork's size.
*
* Input relation is allowed to be an index on a distributed table too.
*/ */
Datum Datum
citus_relation_size(PG_FUNCTION_ARGS) citus_relation_size(PG_FUNCTION_ARGS)
@ -344,7 +348,7 @@ citus_relation_size(PG_FUNCTION_ARGS)
SizeQueryType sizeQueryType = RELATION_SIZE; SizeQueryType sizeQueryType = RELATION_SIZE;
uint64 relationSize = 0; uint64 relationSize = 0;
if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &relationSize)) if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize))
{ {
Assert(!failOnError); Assert(!failOnError);
PG_RETURN_NULL(); PG_RETURN_NULL();
@ -506,13 +510,16 @@ ReceiveShardIdAndSizeResults(List *connectionList, Tuplestorestate *tupleStore,
/* /*
* DistributedTableSize is helper function for each kind of citus size functions. * DistributedRelationSize is helper function for each kind of citus size
* It first checks whether the table is distributed and size query can be run on * functions. It first checks whether the relation is a distributed table or an
* it. Connection to each node has to be established to get the size of the table. * index belonging to a distributed table and size query can be run on it.
* Connection to each node has to be established to get the size of the
* relation.
* Input relation is allowed to be an index on a distributed table too.
*/ */
static bool static bool
DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnError, DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType,
uint64 *tableSize) bool failOnError, uint64 *relationSize)
{ {
int logLevel = WARNING; int logLevel = WARNING;
@ -538,7 +545,7 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr
if (relation == NULL) if (relation == NULL)
{ {
ereport(logLevel, ereport(logLevel,
(errmsg("could not compute table size: relation does not exist"))); (errmsg("could not compute relation size: relation does not exist")));
return false; return false;
} }
@ -553,8 +560,9 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr
{ {
uint64 relationSizeOnNode = 0; uint64 relationSizeOnNode = 0;
bool gotSize = DistributedTableSizeOnWorker(workerNode, relationId, sizeQueryType, bool gotSize = DistributedRelationSizeOnWorker(workerNode, relationId,
failOnError, &relationSizeOnNode); sizeQueryType,
failOnError, &relationSizeOnNode);
if (!gotSize) if (!gotSize)
{ {
return false; return false;
@ -563,21 +571,22 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr
sumOfSizes += relationSizeOnNode; sumOfSizes += relationSizeOnNode;
} }
*tableSize = sumOfSizes; *relationSize = sumOfSizes;
return true; return true;
} }
/* /*
* DistributedTableSizeOnWorker gets the workerNode and relationId to calculate * DistributedRelationSizeOnWorker gets the workerNode and relationId to calculate
* size of that relation on the given workerNode by summing up the size of each * size of that relation on the given workerNode by summing up the size of each
* shard placement. * shard placement.
* Input relation is allowed to be an index on a distributed table too.
*/ */
static bool static bool
DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, DistributedRelationSizeOnWorker(WorkerNode *workerNode, Oid relationId,
SizeQueryType sizeQueryType, SizeQueryType sizeQueryType,
bool failOnError, uint64 *tableSize) bool failOnError, uint64 *relationSize)
{ {
int logLevel = WARNING; int logLevel = WARNING;
@ -591,6 +600,17 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
uint32 connectionFlag = 0; uint32 connectionFlag = 0;
PGresult *result = NULL; PGresult *result = NULL;
/* if the relation is an index, update relationId and define indexId */
Oid indexId = InvalidOid;
Oid relKind = get_rel_relkind(relationId);
if (relKind == RELKIND_INDEX || relKind == RELKIND_PARTITIONED_INDEX)
{
indexId = relationId;
bool missingOk = false;
relationId = IndexGetRelation(indexId, missingOk);
}
List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode, relationId); List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode, relationId);
/* /*
@ -598,21 +618,22 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
* But citus size functions shouldn't include them, like PG. * But citus size functions shouldn't include them, like PG.
*/ */
bool optimizePartitionCalculations = false; bool optimizePartitionCalculations = false;
StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements( StringInfo relationSizeQuery = GenerateSizeQueryOnMultiplePlacements(
shardIntervalsOnNode, shardIntervalsOnNode,
indexId,
sizeQueryType, sizeQueryType,
optimizePartitionCalculations); optimizePartitionCalculations);
MultiConnection *connection = GetNodeConnection(connectionFlag, workerNodeName, MultiConnection *connection = GetNodeConnection(connectionFlag, workerNodeName,
workerNodePort); workerNodePort);
int queryResult = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data, int queryResult = ExecuteOptionalRemoteCommand(connection, relationSizeQuery->data,
&result); &result);
if (queryResult != 0) if (queryResult != 0)
{ {
ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE), ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("could not connect to %s:%d to get size of " errmsg("could not connect to %s:%d to get size of "
"table \"%s\"", "relation \"%s\"",
workerNodeName, workerNodePort, workerNodeName, workerNodePort,
get_rel_name(relationId)))); get_rel_name(relationId))));
@ -626,19 +647,19 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
ClearResults(connection, failOnError); ClearResults(connection, failOnError);
ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE), ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("cannot parse size of table \"%s\" from %s:%d", errmsg("cannot parse size of relation \"%s\" from %s:%d",
get_rel_name(relationId), workerNodeName, get_rel_name(relationId), workerNodeName,
workerNodePort))); workerNodePort)));
return false; return false;
} }
StringInfo tableSizeStringInfo = (StringInfo) linitial(sizeList); StringInfo relationSizeStringInfo = (StringInfo) linitial(sizeList);
char *tableSizeString = tableSizeStringInfo->data; char *relationSizeString = relationSizeStringInfo->data;
if (strlen(tableSizeString) > 0) if (strlen(relationSizeString) > 0)
{ {
*tableSize = SafeStringToUint64(tableSizeString); *relationSize = SafeStringToUint64(relationSizeString);
} }
else else
{ {
@ -647,7 +668,7 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
* being executed. For this case we get an empty string as table size. * being executed. For this case we get an empty string as table size.
* We can take that as zero to prevent any unnecessary errors. * We can take that as zero to prevent any unnecessary errors.
*/ */
*tableSize = 0; *relationSize = 0;
} }
PQclear(result); PQclear(result);
@ -732,7 +753,7 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId)
/* /*
* GenerateSizeQueryOnMultiplePlacements generates a select size query to get * GenerateSizeQueryOnMultiplePlacements generates a select size query to get
* size of multiple tables. Note that, different size functions supported by PG * size of multiple relations. Note that, different size functions supported by PG
* are also supported by this function changing the size query type given as the * are also supported by this function changing the size query type given as the
* last parameter to function. Depending on the sizeQueryType enum parameter, the * last parameter to function. Depending on the sizeQueryType enum parameter, the
* generated query will call one of the functions: pg_relation_size, * generated query will call one of the functions: pg_relation_size,
@ -740,9 +761,13 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId)
* This function uses UDFs named worker_partitioned_*_size for partitioned tables, * This function uses UDFs named worker_partitioned_*_size for partitioned tables,
* if the parameter optimizePartitionCalculations is true. The UDF to be called is * if the parameter optimizePartitionCalculations is true. The UDF to be called is
* determined by the parameter sizeQueryType. * determined by the parameter sizeQueryType.
*
* indexId is provided if we're interested in the size of an index, not the whole
* table.
*/ */
StringInfo StringInfo
GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList, GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
Oid indexId,
SizeQueryType sizeQueryType, SizeQueryType sizeQueryType,
bool optimizePartitionCalculations) bool optimizePartitionCalculations)
{ {
@ -766,16 +791,20 @@ GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
*/ */
continue; continue;
} }
/* we need to build the shard relation name, being an index or table */
Oid objectId = OidIsValid(indexId) ? indexId : shardInterval->relationId;
uint64 shardId = shardInterval->shardId; uint64 shardId = shardInterval->shardId;
Oid schemaId = get_rel_namespace(shardInterval->relationId); Oid schemaId = get_rel_namespace(objectId);
char *schemaName = get_namespace_name(schemaId); char *schemaName = get_namespace_name(schemaId);
char *shardName = get_rel_name(shardInterval->relationId); char *shardName = get_rel_name(objectId);
AppendShardIdToName(&shardName, shardId); AppendShardIdToName(&shardName, shardId);
char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName); char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
char *quotedShardName = quote_literal_cstr(shardQualifiedName); char *quotedShardName = quote_literal_cstr(shardQualifiedName);
/* for partitoned tables, we will call worker_partitioned_... size functions */ /* for partitioned tables, we will call worker_partitioned_... size functions */
if (optimizePartitionCalculations && PartitionedTable(shardInterval->relationId)) if (optimizePartitionCalculations && PartitionedTable(shardInterval->relationId))
{ {
partitionedShardNames = lappend(partitionedShardNames, quotedShardName); partitionedShardNames = lappend(partitionedShardNames, quotedShardName);
@ -1010,7 +1039,7 @@ AppendShardIdNameValues(StringInfo selectQuery, ShardInterval *shardInterval)
/* /*
* ErrorIfNotSuitableToGetSize determines whether the table is suitable to find * ErrorIfNotSuitableToGetSize determines whether the relation is suitable to find
* its' size with internal functions. * its' size with internal functions.
*/ */
static void static void
@ -1018,11 +1047,32 @@ ErrorIfNotSuitableToGetSize(Oid relationId)
{ {
if (!IsCitusTable(relationId)) if (!IsCitusTable(relationId))
{ {
char *relationName = get_rel_name(relationId); Oid relKind = get_rel_relkind(relationId);
char *escapedQueryString = quote_literal_cstr(relationName); if (relKind != RELKIND_INDEX && relKind != RELKIND_PARTITIONED_INDEX)
ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), {
errmsg("cannot calculate the size because relation %s is not " char *relationName = get_rel_name(relationId);
"distributed", escapedQueryString))); char *escapedRelationName = quote_literal_cstr(relationName);
ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg(
"cannot calculate the size because relation %s "
"is not distributed",
escapedRelationName)));
}
bool missingOk = false;
Oid indexId = relationId;
relationId = IndexGetRelation(relationId, missingOk);
if (!IsCitusTable(relationId))
{
char *tableName = get_rel_name(relationId);
char *escapedTableName = quote_literal_cstr(tableName);
char *indexName = get_rel_name(indexId);
char *escapedIndexName = quote_literal_cstr(indexName);
ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg(
"cannot calculate the size because table %s for "
"index %s is not distributed",
escapedTableName, escapedIndexName)));
}
} }
} }

View File

@ -792,7 +792,12 @@ ShardListSizeInBytes(List *shardList, char *workerNodeName, uint32
/* we skip child tables of a partitioned table if this boolean variable is true */ /* we skip child tables of a partitioned table if this boolean variable is true */
bool optimizePartitionCalculations = true; bool optimizePartitionCalculations = true;
/* we're interested in whole table, not a particular index */
Oid indexId = InvalidOid;
StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(shardList, StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(shardList,
indexId,
TOTAL_RELATION_SIZE, TOTAL_RELATION_SIZE,
optimizePartitionCalculations); optimizePartitionCalculations);

View File

@ -342,6 +342,7 @@ extern void LookupTaskPlacementHostAndPort(ShardPlacement *taskPlacement, char *
int *nodePort); int *nodePort);
extern bool IsDummyPlacement(ShardPlacement *taskPlacement); extern bool IsDummyPlacement(ShardPlacement *taskPlacement);
extern StringInfo GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList, extern StringInfo GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
Oid indexId,
SizeQueryType sizeQueryType, SizeQueryType sizeQueryType,
bool optimizePartitionCalculations); bool optimizePartitionCalculations);
extern List * RemoveCoordinatorPlacementIfNotSingleNode(List *placementList); extern List * RemoveCoordinatorPlacementIfNotSingleNode(List *placementList);

View File

@ -177,6 +177,7 @@ DEPS = {
), ),
"grant_on_schema_propagation": TestDeps("minimal_schedule"), "grant_on_schema_propagation": TestDeps("minimal_schedule"),
"propagate_extension_commands": TestDeps("minimal_schedule"), "propagate_extension_commands": TestDeps("minimal_schedule"),
"multi_size_queries": TestDeps("base_schedule", ["multi_copy"]),
} }

View File

@ -226,7 +226,7 @@ step s1-drop: DROP TABLE drop_hash;
step s2-table-size: SELECT citus_total_relation_size('drop_hash'); <waiting ...> step s2-table-size: SELECT citus_total_relation_size('drop_hash'); <waiting ...>
step s1-commit: COMMIT; step s1-commit: COMMIT;
step s2-table-size: <... completed> step s2-table-size: <... completed>
ERROR: could not compute table size: relation does not exist ERROR: could not compute relation size: relation does not exist
step s2-commit: COMMIT; step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash; step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist ERROR: relation "drop_hash" does not exist

View File

@ -1258,3 +1258,9 @@ SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHER
t t
(1 row) (1 row)
-- Grant all on public schema to public
--
-- That's the default on Postgres versions < 15 and we want to
-- keep permissions compatible accross versions, in regression
-- tests.
GRANT ALL ON SCHEMA public TO PUBLIC;

View File

@ -7,19 +7,25 @@
SET citus.next_shard_id TO 1390000; SET citus.next_shard_id TO 1390000;
-- Tests with invalid relation IDs -- Tests with invalid relation IDs
SELECT citus_table_size(1); SELECT citus_table_size(1);
ERROR: could not compute table size: relation does not exist ERROR: could not compute relation size: relation does not exist
SELECT citus_relation_size(1); SELECT citus_relation_size(1);
ERROR: could not compute table size: relation does not exist ERROR: could not compute relation size: relation does not exist
SELECT citus_total_relation_size(1); SELECT citus_total_relation_size(1);
ERROR: could not compute table size: relation does not exist ERROR: could not compute relation size: relation does not exist
-- Tests with non-distributed table -- Tests with non-distributed table
CREATE TABLE non_distributed_table (x int); CREATE TABLE non_distributed_table (x int primary key);
SELECT citus_table_size('non_distributed_table'); SELECT citus_table_size('non_distributed_table');
ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed
SELECT citus_relation_size('non_distributed_table'); SELECT citus_relation_size('non_distributed_table');
ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed
SELECT citus_total_relation_size('non_distributed_table'); SELECT citus_total_relation_size('non_distributed_table');
ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed
SELECT citus_table_size('non_distributed_table_pkey');
ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed
SELECT citus_relation_size('non_distributed_table_pkey');
ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed
SELECT citus_total_relation_size('non_distributed_table_pkey');
ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed
DROP TABLE non_distributed_table; DROP TABLE non_distributed_table;
-- fix broken placements via disabling the node -- fix broken placements via disabling the node
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
@ -31,24 +37,70 @@ SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2,
-- Tests on distributed table with replication factor > 1 -- Tests on distributed table with replication factor > 1
VACUUM (FULL) lineitem_hash_part; VACUUM (FULL) lineitem_hash_part;
SELECT citus_table_size('lineitem_hash_part'); SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part');
citus_table_size ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
3801088 t
(1 row) (1 row)
SELECT citus_relation_size('lineitem_hash_part'); SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part');
citus_relation_size ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
3801088 t
(1 row) (1 row)
SELECT citus_total_relation_size('lineitem_hash_part'); SELECT citus_relation_size('lineitem_hash_part') > 0;
citus_total_relation_size ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
3801088 t
(1 row) (1 row)
CREATE INDEX lineitem_hash_part_idx ON lineitem_hash_part(l_orderkey);
VACUUM (FULL) lineitem_hash_part;
SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part');
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part');
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_relation_size('lineitem_hash_part') > 0;
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_relation_size('lineitem_hash_part_idx') <= citus_table_size('lineitem_hash_part_idx');
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_table_size('lineitem_hash_part_idx') <= citus_total_relation_size('lineitem_hash_part_idx');
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_relation_size('lineitem_hash_part_idx') > 0;
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT citus_total_relation_size('lineitem_hash_part') >=
citus_table_size('lineitem_hash_part') + citus_table_size('lineitem_hash_part_idx');
?column?
---------------------------------------------------------------------
t
(1 row)
DROP INDEX lineitem_hash_part_idx;
VACUUM (FULL) customer_copy_hash; VACUUM (FULL) customer_copy_hash;
-- Tests on distributed tables with streaming replication. -- Tests on distributed tables with streaming replication.
SELECT citus_table_size('customer_copy_hash'); SELECT citus_table_size('customer_copy_hash');
@ -72,10 +124,10 @@ SELECT citus_total_relation_size('customer_copy_hash');
-- Make sure we can get multiple sizes in a single query -- Make sure we can get multiple sizes in a single query
SELECT citus_table_size('customer_copy_hash'), SELECT citus_table_size('customer_copy_hash'),
citus_table_size('customer_copy_hash'), citus_table_size('customer_copy_hash'),
citus_table_size('supplier'); citus_table_size('customer_copy_hash');
citus_table_size | citus_table_size | citus_table_size citus_table_size | citus_table_size | citus_table_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
548864 | 548864 | 655360 548864 | 548864 | 548864
(1 row) (1 row)
CREATE INDEX index_1 on customer_copy_hash(c_custkey); CREATE INDEX index_1 on customer_copy_hash(c_custkey);
@ -99,6 +151,24 @@ SELECT citus_total_relation_size('customer_copy_hash');
2646016 2646016
(1 row) (1 row)
SELECT citus_table_size('index_1');
citus_table_size
---------------------------------------------------------------------
1048576
(1 row)
SELECT citus_relation_size('index_1');
citus_relation_size
---------------------------------------------------------------------
1048576
(1 row)
SELECT citus_total_relation_size('index_1');
citus_total_relation_size
---------------------------------------------------------------------
1048576
(1 row)
-- Tests on reference table -- Tests on reference table
VACUUM (FULL) supplier; VACUUM (FULL) supplier;
SELECT citus_table_size('supplier'); SELECT citus_table_size('supplier');
@ -139,6 +209,74 @@ SELECT citus_total_relation_size('supplier');
688128 688128
(1 row) (1 row)
SELECT citus_table_size('index_2');
citus_table_size
---------------------------------------------------------------------
122880
(1 row)
SELECT citus_relation_size('index_2');
citus_relation_size
---------------------------------------------------------------------
122880
(1 row)
SELECT citus_total_relation_size('index_2');
citus_total_relation_size
---------------------------------------------------------------------
122880
(1 row)
-- Test on partitioned table
CREATE TABLE split_me (dist_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
CREATE INDEX ON split_me(dist_col);
-- create 2 partitions
CREATE TABLE m PARTITION OF split_me FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
CREATE TABLE e PARTITION OF split_me FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
INSERT INTO split_me SELECT 1, '2018-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 360) i;
INSERT INTO split_me SELECT 2, '2019-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 180) i;
-- before citus
SELECT citus_relation_size('split_me');
ERROR: cannot calculate the size because relation 'split_me' is not distributed
SELECT citus_relation_size('split_me_dist_col_idx');
ERROR: cannot calculate the size because table 'split_me' for index 'split_me_dist_col_idx' is not distributed
SELECT citus_relation_size('m');
ERROR: cannot calculate the size because relation 'm' is not distributed
SELECT citus_relation_size('m_dist_col_idx');
ERROR: cannot calculate the size because table 'm' for index 'm_dist_col_idx' is not distributed
-- distribute the table(s)
SELECT create_distributed_table('split_me', 'dist_col');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- after citus
SELECT citus_relation_size('split_me');
citus_relation_size
---------------------------------------------------------------------
0
(1 row)
SELECT citus_relation_size('split_me_dist_col_idx');
citus_relation_size
---------------------------------------------------------------------
0
(1 row)
SELECT citus_relation_size('m');
citus_relation_size
---------------------------------------------------------------------
32768
(1 row)
SELECT citus_relation_size('m_dist_col_idx');
citus_relation_size
---------------------------------------------------------------------
81920
(1 row)
DROP TABLE split_me;
-- Test inside the transaction -- Test inside the transaction
BEGIN; BEGIN;
ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL; ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL;

View File

@ -133,12 +133,6 @@ ORDER BY 1, 2;
validatable_constraint_8000016 | t validatable_constraint_8000016 | t
(10 rows) (10 rows)
DROP TABLE constrained_table; SET client_min_messages TO WARNING;
DROP TABLE referenced_table CASCADE;
DROP TABLE referencing_table;
DROP SCHEMA validate_constraint CASCADE; DROP SCHEMA validate_constraint CASCADE;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to type constraint_validity
drop cascades to view constraint_validations_in_workers
drop cascades to view constraint_validations
SET search_path TO DEFAULT; SET search_path TO DEFAULT;

View File

@ -204,7 +204,8 @@ test: citus_copy_shard_placement
# multi_utilities cannot be run in parallel with other tests because it checks # multi_utilities cannot be run in parallel with other tests because it checks
# global locks # global locks
test: multi_utilities test: multi_utilities
test: foreign_key_to_reference_table validate_constraint test: foreign_key_to_reference_table
test: validate_constraint
test: multi_repartition_udt multi_repartitioned_subquery_udf multi_subtransactions test: multi_repartition_udt multi_repartitioned_subquery_udf multi_subtransactions
test: multi_modifying_xacts test: multi_modifying_xacts
@ -300,7 +301,8 @@ test: replicate_reference_tables_to_coordinator
test: citus_local_tables test: citus_local_tables
test: mixed_relkind_tests test: mixed_relkind_tests
test: multi_row_router_insert create_distributed_table_concurrently test: multi_row_router_insert create_distributed_table_concurrently
test: multi_reference_table citus_local_tables_queries test: multi_reference_table
test: citus_local_tables_queries
test: citus_local_table_triggers test: citus_local_table_triggers
test: coordinator_shouldhaveshards test: coordinator_shouldhaveshards
test: local_shard_utility_command_execution test: local_shard_utility_command_execution

View File

@ -154,7 +154,8 @@ test: multi_outer_join
# --- # ---
test: multi_complex_count_distinct test: multi_complex_count_distinct
test: multi_upsert multi_simple_queries test: multi_upsert multi_simple_queries
test: foreign_key_to_reference_table validate_constraint test: foreign_key_to_reference_table
test: validate_constraint
# --------- # ---------
# creates hash and range-partitioned tables and performs COPY # creates hash and range-partitioned tables and performs COPY

View File

@ -150,7 +150,9 @@ test: multi_outer_join
test: multi_create_fdw test: multi_create_fdw
test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list
test: multi_upsert multi_simple_queries multi_data_types test: multi_upsert multi_simple_queries multi_data_types
test: multi_utilities foreign_key_to_reference_table validate_constraint test: multi_utilities
test: foreign_key_to_reference_table
test: validate_constraint
test: multi_repartition_udt multi_repartitioned_subquery_udf test: multi_repartition_udt multi_repartitioned_subquery_udf
# --------- # ---------

View File

@ -530,3 +530,10 @@ RESET citus.metadata_sync_mode;
-- verify that at the end of this file, all primary nodes have metadata synced -- verify that at the end of this file, all primary nodes have metadata synced
SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
-- Grant all on public schema to public
--
-- That's the default on Postgres versions < 15 and we want to
-- keep permissions compatible accross versions, in regression
-- tests.
GRANT ALL ON SCHEMA public TO PUBLIC;

View File

@ -13,10 +13,15 @@ SELECT citus_relation_size(1);
SELECT citus_total_relation_size(1); SELECT citus_total_relation_size(1);
-- Tests with non-distributed table -- Tests with non-distributed table
CREATE TABLE non_distributed_table (x int); CREATE TABLE non_distributed_table (x int primary key);
SELECT citus_table_size('non_distributed_table'); SELECT citus_table_size('non_distributed_table');
SELECT citus_relation_size('non_distributed_table'); SELECT citus_relation_size('non_distributed_table');
SELECT citus_total_relation_size('non_distributed_table'); SELECT citus_total_relation_size('non_distributed_table');
SELECT citus_table_size('non_distributed_table_pkey');
SELECT citus_relation_size('non_distributed_table_pkey');
SELECT citus_total_relation_size('non_distributed_table_pkey');
DROP TABLE non_distributed_table; DROP TABLE non_distributed_table;
-- fix broken placements via disabling the node -- fix broken placements via disabling the node
@ -26,9 +31,25 @@ SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2,
-- Tests on distributed table with replication factor > 1 -- Tests on distributed table with replication factor > 1
VACUUM (FULL) lineitem_hash_part; VACUUM (FULL) lineitem_hash_part;
SELECT citus_table_size('lineitem_hash_part'); SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part');
SELECT citus_relation_size('lineitem_hash_part'); SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part');
SELECT citus_total_relation_size('lineitem_hash_part'); SELECT citus_relation_size('lineitem_hash_part') > 0;
CREATE INDEX lineitem_hash_part_idx ON lineitem_hash_part(l_orderkey);
VACUUM (FULL) lineitem_hash_part;
SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part');
SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part');
SELECT citus_relation_size('lineitem_hash_part') > 0;
SELECT citus_relation_size('lineitem_hash_part_idx') <= citus_table_size('lineitem_hash_part_idx');
SELECT citus_table_size('lineitem_hash_part_idx') <= citus_total_relation_size('lineitem_hash_part_idx');
SELECT citus_relation_size('lineitem_hash_part_idx') > 0;
SELECT citus_total_relation_size('lineitem_hash_part') >=
citus_table_size('lineitem_hash_part') + citus_table_size('lineitem_hash_part_idx');
DROP INDEX lineitem_hash_part_idx;
VACUUM (FULL) customer_copy_hash; VACUUM (FULL) customer_copy_hash;
@ -40,7 +61,7 @@ SELECT citus_total_relation_size('customer_copy_hash');
-- Make sure we can get multiple sizes in a single query -- Make sure we can get multiple sizes in a single query
SELECT citus_table_size('customer_copy_hash'), SELECT citus_table_size('customer_copy_hash'),
citus_table_size('customer_copy_hash'), citus_table_size('customer_copy_hash'),
citus_table_size('supplier'); citus_table_size('customer_copy_hash');
CREATE INDEX index_1 on customer_copy_hash(c_custkey); CREATE INDEX index_1 on customer_copy_hash(c_custkey);
VACUUM (FULL) customer_copy_hash; VACUUM (FULL) customer_copy_hash;
@ -50,6 +71,10 @@ SELECT citus_table_size('customer_copy_hash');
SELECT citus_relation_size('customer_copy_hash'); SELECT citus_relation_size('customer_copy_hash');
SELECT citus_total_relation_size('customer_copy_hash'); SELECT citus_total_relation_size('customer_copy_hash');
SELECT citus_table_size('index_1');
SELECT citus_relation_size('index_1');
SELECT citus_total_relation_size('index_1');
-- Tests on reference table -- Tests on reference table
VACUUM (FULL) supplier; VACUUM (FULL) supplier;
@ -64,6 +89,38 @@ SELECT citus_table_size('supplier');
SELECT citus_relation_size('supplier'); SELECT citus_relation_size('supplier');
SELECT citus_total_relation_size('supplier'); SELECT citus_total_relation_size('supplier');
SELECT citus_table_size('index_2');
SELECT citus_relation_size('index_2');
SELECT citus_total_relation_size('index_2');
-- Test on partitioned table
CREATE TABLE split_me (dist_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
CREATE INDEX ON split_me(dist_col);
-- create 2 partitions
CREATE TABLE m PARTITION OF split_me FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
CREATE TABLE e PARTITION OF split_me FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
INSERT INTO split_me SELECT 1, '2018-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 360) i;
INSERT INTO split_me SELECT 2, '2019-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 180) i;
-- before citus
SELECT citus_relation_size('split_me');
SELECT citus_relation_size('split_me_dist_col_idx');
SELECT citus_relation_size('m');
SELECT citus_relation_size('m_dist_col_idx');
-- distribute the table(s)
SELECT create_distributed_table('split_me', 'dist_col');
-- after citus
SELECT citus_relation_size('split_me');
SELECT citus_relation_size('split_me_dist_col_idx');
SELECT citus_relation_size('m');
SELECT citus_relation_size('m_dist_col_idx');
DROP TABLE split_me;
-- Test inside the transaction -- Test inside the transaction
BEGIN; BEGIN;
ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL; ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL;

View File

@ -116,9 +116,6 @@ SELECT *
FROM constraint_validations_in_workers FROM constraint_validations_in_workers
ORDER BY 1, 2; ORDER BY 1, 2;
DROP TABLE constrained_table; SET client_min_messages TO WARNING;
DROP TABLE referenced_table CASCADE;
DROP TABLE referencing_table;
DROP SCHEMA validate_constraint CASCADE; DROP SCHEMA validate_constraint CASCADE;
SET search_path TO DEFAULT; SET search_path TO DEFAULT;