mirror of https://github.com/citusdata/citus.git
Merge branch 'main' into fix-deparse-stmt-qualify
commit
15e65dbb11
1128
.circleci/config.yml
1128
.circleci/config.yml
File diff suppressed because it is too large
Load Diff
|
@ -27,9 +27,9 @@ jobs:
|
||||||
style_checker_image_name: "citus/stylechecker"
|
style_checker_image_name: "citus/stylechecker"
|
||||||
style_checker_tools_version: "0.8.18"
|
style_checker_tools_version: "0.8.18"
|
||||||
image_suffix: "-v9d71045"
|
image_suffix: "-v9d71045"
|
||||||
pg14_version: "14.9"
|
pg14_version: '{ "major": "14", "full": "14.9" }'
|
||||||
pg15_version: "15.4"
|
pg15_version: '{ "major": "15", "full": "15.4" }'
|
||||||
pg16_version: "16.0"
|
pg16_version: '{ "major": "16", "full": "16.0" }'
|
||||||
upgrade_pg_versions: "14.9-15.4-16.0"
|
upgrade_pg_versions: "14.9-15.4-16.0"
|
||||||
steps:
|
steps:
|
||||||
# Since GHA jobs needs at least one step we use a noop step here.
|
# Since GHA jobs needs at least one step we use a noop step here.
|
||||||
|
@ -93,7 +93,7 @@ jobs:
|
||||||
run: ci/check_migration_files.sh
|
run: ci/check_migration_files.sh
|
||||||
build:
|
build:
|
||||||
needs: params
|
needs: params
|
||||||
name: Build for PG ${{ matrix.pg_version}}
|
name: Build for PG${{ fromJson(matrix.pg_version).major }}
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
@ -107,7 +107,7 @@ jobs:
|
||||||
- ${{ needs.params.outputs.pg16_version }}
|
- ${{ needs.params.outputs.pg16_version }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container:
|
container:
|
||||||
image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ matrix.image_suffix }}"
|
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
|
||||||
options: --user root
|
options: --user root
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v3.5.0
|
||||||
|
@ -124,7 +124,7 @@ jobs:
|
||||||
./build-${{ env.PG_MAJOR }}/*
|
./build-${{ env.PG_MAJOR }}/*
|
||||||
./install-${{ env.PG_MAJOR }}.tar
|
./install-${{ env.PG_MAJOR }}.tar
|
||||||
test-citus:
|
test-citus:
|
||||||
name: PG${{ matrix.pg_version }} - ${{ matrix.make }}
|
name: PG${{ fromJson(matrix.pg_version).major }} - ${{ matrix.make }}
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
@ -211,7 +211,7 @@ jobs:
|
||||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container:
|
container:
|
||||||
image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ needs.params.outputs.image_suffix }}"
|
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||||
options: --user root --dns=8.8.8.8
|
options: --user root --dns=8.8.8.8
|
||||||
# Due to Github creates a default network for each job, we need to use
|
# Due to Github creates a default network for each job, we need to use
|
||||||
# --dns= to have similar DNS settings as our other CI systems or local
|
# --dns= to have similar DNS settings as our other CI systems or local
|
||||||
|
@ -228,17 +228,17 @@ jobs:
|
||||||
- uses: "./.github/actions/save_logs_and_results"
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
if: always()
|
if: always()
|
||||||
with:
|
with:
|
||||||
folder: ${{ matrix.pg_version }}_${{ matrix.make }}
|
folder: ${{ fromJson(matrix.pg_version).major }}_${{ matrix.make }}
|
||||||
- uses: "./.github/actions/upload_coverage"
|
- uses: "./.github/actions/upload_coverage"
|
||||||
if: always()
|
if: always()
|
||||||
with:
|
with:
|
||||||
flags: ${{ env.PG_MAJOR }}_${{ matrix.suite }}_${{ matrix.make }}
|
flags: ${{ env.PG_MAJOR }}_${{ matrix.suite }}_${{ matrix.make }}
|
||||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
test-arbitrary-configs:
|
test-arbitrary-configs:
|
||||||
name: PG${{ matrix.pg_version }} - check-arbitrary-configs-${{ matrix.parallel }}
|
name: PG${{ fromJson(matrix.pg_version).major }} - check-arbitrary-configs-${{ matrix.parallel }}
|
||||||
runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"]
|
runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"]
|
||||||
container:
|
container:
|
||||||
image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ needs.params.outputs.image_suffix }}"
|
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||||
options: --user root
|
options: --user root
|
||||||
needs:
|
needs:
|
||||||
- params
|
- params
|
||||||
|
@ -333,10 +333,10 @@ jobs:
|
||||||
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
|
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
|
||||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
test-citus-upgrade:
|
test-citus-upgrade:
|
||||||
name: PG${{ needs.params.outputs.pg14_version }} - check-citus-upgrade
|
name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container:
|
container:
|
||||||
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ needs.params.outputs.pg14_version }}${{ needs.params.outputs.image_suffix }}"
|
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||||
options: --user root
|
options: --user root
|
||||||
needs:
|
needs:
|
||||||
- params
|
- params
|
||||||
|
@ -383,7 +383,7 @@ jobs:
|
||||||
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
|
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.params.outputs.test_image_name }}:${{ needs.params.outputs.pg16_version }}${{ needs.params.outputs.image_suffix }}
|
image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }}
|
||||||
needs:
|
needs:
|
||||||
- params
|
- params
|
||||||
- test-citus
|
- test-citus
|
||||||
|
@ -478,7 +478,7 @@ jobs:
|
||||||
name: Test flakyness
|
name: Test flakyness
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container:
|
container:
|
||||||
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ needs.params.outputs.pg16_version }}${{ needs.params.outputs.image_suffix }}
|
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }}
|
||||||
options: --user root
|
options: --user root
|
||||||
env:
|
env:
|
||||||
runs: 8
|
runs: 8
|
||||||
|
|
|
@ -24,9 +24,11 @@ jobs:
|
||||||
- name: Get Postgres Versions
|
- name: Get Postgres Versions
|
||||||
id: get-postgres-versions
|
id: get-postgres-versions
|
||||||
run: |
|
run: |
|
||||||
# Postgres versions are stored in .github/workflows/build_and_test.yml file in "pg[pg-version]_version"
|
set -euxo pipefail
|
||||||
# format. Below command extracts the versions and get the unique values.
|
# Postgres versions are stored in .github/workflows/build_and_test.yml
|
||||||
pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE 'pg[0-9]+_version: "[0-9.]+"' | sed -E 's/pg([0-9]+)_version: "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',')
|
# file in json strings with major and full keys.
|
||||||
|
# Below command extracts the versions and get the unique values.
|
||||||
|
pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE '"major": "[0-9]+", "full": "[0-9.]+"' | sed -E 's/"major": "([0-9]+)", "full": "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',')
|
||||||
pg_versions_array="[ ${pg_versions} ]"
|
pg_versions_array="[ ${pg_versions} ]"
|
||||||
echo "Supported PG Versions: ${pg_versions_array}"
|
echo "Supported PG Versions: ${pg_versions_array}"
|
||||||
# Below line is needed to set the output variable to be used in the next job
|
# Below line is needed to set the output variable to be used in the next job
|
||||||
|
|
|
@ -14,8 +14,8 @@ ci_scripts=$(
|
||||||
grep -v -E '^(ci_helpers.sh|fix_style.sh)$'
|
grep -v -E '^(ci_helpers.sh|fix_style.sh)$'
|
||||||
)
|
)
|
||||||
for script in $ci_scripts; do
|
for script in $ci_scripts; do
|
||||||
if ! grep "\\bci/$script\\b" .circleci/config.yml > /dev/null; then
|
if ! grep "\\bci/$script\\b" -r .github > /dev/null; then
|
||||||
echo "ERROR: CI script with name \"$script\" is not actually used in .circleci/config.yml"
|
echo "ERROR: CI script with name \"$script\" is not actually used in .github folder"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then
|
if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then
|
||||||
|
|
|
@ -1,96 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Testing this script locally requires you to set the following environment
|
|
||||||
# variables:
|
|
||||||
# CIRCLE_BRANCH, GIT_USERNAME and GIT_TOKEN
|
|
||||||
|
|
||||||
# fail if trying to reference a variable that is not set.
|
|
||||||
set -u
|
|
||||||
# exit immediately if a command fails
|
|
||||||
set -e
|
|
||||||
# Fail on pipe failures
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
PR_BRANCH="${CIRCLE_BRANCH}"
|
|
||||||
ENTERPRISE_REMOTE="https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/citusdata/citus-enterprise"
|
|
||||||
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
source ci/ci_helpers.sh
|
|
||||||
|
|
||||||
# List executed commands. This is done so debugging this script is easier when
|
|
||||||
# it fails. It's explicitly done after git remote add so username and password
|
|
||||||
# are not shown in CI output (even though it's also filtered out by CircleCI)
|
|
||||||
set -x
|
|
||||||
|
|
||||||
check_compile () {
|
|
||||||
echo "INFO: checking if merged code can be compiled"
|
|
||||||
./configure --without-libcurl
|
|
||||||
make -j10
|
|
||||||
}
|
|
||||||
|
|
||||||
# Clone current git repo (which should be community) to a temporary working
|
|
||||||
# directory and go there
|
|
||||||
GIT_DIR_ROOT="$(git rev-parse --show-toplevel)"
|
|
||||||
TMP_GIT_DIR="$(mktemp --directory -t citus-merge-check.XXXXXXXXX)"
|
|
||||||
git clone "$GIT_DIR_ROOT" "$TMP_GIT_DIR"
|
|
||||||
cd "$TMP_GIT_DIR"
|
|
||||||
|
|
||||||
# Fails in CI without this
|
|
||||||
git config user.email "citus-bot@microsoft.com"
|
|
||||||
git config user.name "citus bot"
|
|
||||||
|
|
||||||
# Disable "set -x" temporarily, because $ENTERPRISE_REMOTE contains passwords
|
|
||||||
{ set +x ; } 2> /dev/null
|
|
||||||
git remote add enterprise "$ENTERPRISE_REMOTE"
|
|
||||||
set -x
|
|
||||||
|
|
||||||
git remote set-url --push enterprise no-pushing
|
|
||||||
|
|
||||||
# Fetch enterprise-master
|
|
||||||
git fetch enterprise enterprise-master
|
|
||||||
|
|
||||||
|
|
||||||
git checkout "enterprise/enterprise-master"
|
|
||||||
|
|
||||||
if git merge --no-commit "origin/$PR_BRANCH"; then
|
|
||||||
echo "INFO: community PR branch could be merged into enterprise-master"
|
|
||||||
# check that we can compile after the merge
|
|
||||||
if check_compile; then
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "WARN: Failed to compile after community PR branch was merged into enterprise"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# undo partial merge
|
|
||||||
git merge --abort
|
|
||||||
|
|
||||||
# If we have a conflict on enterprise merge on the master branch, we have a problem.
|
|
||||||
# Provide an error message to indicate that enterprise merge is needed to fix this check.
|
|
||||||
if [[ $PR_BRANCH = master ]]; then
|
|
||||||
echo "ERROR: Master branch has merge conflicts with enterprise-master."
|
|
||||||
echo "Try re-running this CI job after merging your changes into enterprise-master."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! git fetch enterprise "$PR_BRANCH" ; then
|
|
||||||
echo "ERROR: enterprise/$PR_BRANCH was not found and community PR branch could not be merged into enterprise-master"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Show the top commit of the enterprise PR branch to make debugging easier
|
|
||||||
git log -n 1 "enterprise/$PR_BRANCH"
|
|
||||||
|
|
||||||
# Check that this branch contains the top commit of the current community PR
|
|
||||||
# branch. If it does not it means it's not up to date with the current PR, so
|
|
||||||
# the enterprise branch should be updated.
|
|
||||||
if ! git merge-base --is-ancestor "origin/$PR_BRANCH" "enterprise/$PR_BRANCH" ; then
|
|
||||||
echo "ERROR: enterprise/$PR_BRANCH is not up to date with community PR branch"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Now check if we can merge the enterprise PR into enterprise-master without
|
|
||||||
# issues.
|
|
||||||
git merge --no-commit "enterprise/$PR_BRANCH"
|
|
||||||
# check that we can compile after the merge
|
|
||||||
check_compile
|
|
|
@ -481,6 +481,7 @@ _PG_init(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
InitializeMaintenanceDaemon();
|
InitializeMaintenanceDaemon();
|
||||||
|
InitializeMaintenanceDaemonForMainDb();
|
||||||
|
|
||||||
/* initialize coordinated transaction management */
|
/* initialize coordinated transaction management */
|
||||||
InitializeTransactionManagement();
|
InitializeTransactionManagement();
|
||||||
|
@ -1820,6 +1821,16 @@ RegisterCitusConfigVariables(void)
|
||||||
GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE | GUC_UNIT_MS,
|
GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE | GUC_UNIT_MS,
|
||||||
NULL, NULL, NULL);
|
NULL, NULL, NULL);
|
||||||
|
|
||||||
|
DefineCustomStringVariable(
|
||||||
|
"citus.main_db",
|
||||||
|
gettext_noop("Which database is designated as the main_db"),
|
||||||
|
NULL,
|
||||||
|
&MainDb,
|
||||||
|
"",
|
||||||
|
PGC_POSTMASTER,
|
||||||
|
GUC_STANDARD,
|
||||||
|
NULL, NULL, NULL);
|
||||||
|
|
||||||
DefineCustomIntVariable(
|
DefineCustomIntVariable(
|
||||||
"citus.max_adaptive_executor_pool_size",
|
"citus.max_adaptive_executor_pool_size",
|
||||||
gettext_noop("Sets the maximum number of connections per worker node used by "
|
gettext_noop("Sets the maximum number of connections per worker node used by "
|
||||||
|
|
|
@ -99,6 +99,7 @@ int Recover2PCInterval = 60000;
|
||||||
int DeferShardDeleteInterval = 15000;
|
int DeferShardDeleteInterval = 15000;
|
||||||
int BackgroundTaskQueueCheckInterval = 5000;
|
int BackgroundTaskQueueCheckInterval = 5000;
|
||||||
int MaxBackgroundTaskExecutors = 4;
|
int MaxBackgroundTaskExecutors = 4;
|
||||||
|
char *MainDb = "";
|
||||||
|
|
||||||
/* config variables for metadata sync timeout */
|
/* config variables for metadata sync timeout */
|
||||||
int MetadataSyncInterval = 60000;
|
int MetadataSyncInterval = 60000;
|
||||||
|
@ -112,7 +113,7 @@ static MaintenanceDaemonControlData *MaintenanceDaemonControl = NULL;
|
||||||
* activated.
|
* activated.
|
||||||
*/
|
*/
|
||||||
static HTAB *MaintenanceDaemonDBHash;
|
static HTAB *MaintenanceDaemonDBHash;
|
||||||
|
static ErrorContextCallback errorCallback = { 0 };
|
||||||
static volatile sig_atomic_t got_SIGHUP = false;
|
static volatile sig_atomic_t got_SIGHUP = false;
|
||||||
static volatile sig_atomic_t got_SIGTERM = false;
|
static volatile sig_atomic_t got_SIGTERM = false;
|
||||||
|
|
||||||
|
@ -125,6 +126,8 @@ static void MaintenanceDaemonShmemExit(int code, Datum arg);
|
||||||
static void MaintenanceDaemonErrorContext(void *arg);
|
static void MaintenanceDaemonErrorContext(void *arg);
|
||||||
static bool MetadataSyncTriggeredCheckAndReset(MaintenanceDaemonDBData *dbData);
|
static bool MetadataSyncTriggeredCheckAndReset(MaintenanceDaemonDBData *dbData);
|
||||||
static void WarnMaintenanceDaemonNotStarted(void);
|
static void WarnMaintenanceDaemonNotStarted(void);
|
||||||
|
static MaintenanceDaemonDBData * GetMaintenanceDaemonDBHashEntry(Oid databaseId,
|
||||||
|
bool *found);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* InitializeMaintenanceDaemon, called at server start, is responsible for
|
* InitializeMaintenanceDaemon, called at server start, is responsible for
|
||||||
|
@ -139,6 +142,82 @@ InitializeMaintenanceDaemon(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GetMaintenanceDaemonDBHashEntry searches the MaintenanceDaemonDBHash for the
|
||||||
|
* databaseId. It returns the entry if found or creates a new entry and initializes
|
||||||
|
* the value with zeroes.
|
||||||
|
*/
|
||||||
|
MaintenanceDaemonDBData *
|
||||||
|
GetMaintenanceDaemonDBHashEntry(Oid databaseId, bool *found)
|
||||||
|
{
|
||||||
|
MaintenanceDaemonDBData *dbData = (MaintenanceDaemonDBData *) hash_search(
|
||||||
|
MaintenanceDaemonDBHash,
|
||||||
|
&MyDatabaseId,
|
||||||
|
HASH_ENTER_NULL,
|
||||||
|
found);
|
||||||
|
|
||||||
|
if (!dbData)
|
||||||
|
{
|
||||||
|
elog(LOG,
|
||||||
|
"cannot create or find the maintenance deamon hash entry for database %u",
|
||||||
|
databaseId);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!*found)
|
||||||
|
{
|
||||||
|
/* ensure the values in MaintenanceDaemonDBData are zero */
|
||||||
|
memset(((char *) dbData) + sizeof(Oid), 0,
|
||||||
|
sizeof(MaintenanceDaemonDBData) - sizeof(Oid));
|
||||||
|
}
|
||||||
|
|
||||||
|
return dbData;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* InitializeMaintenanceDaemonForMainDb is called in _PG_Init
|
||||||
|
* at which stage we are not in a transaction or have databaseOid
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
InitializeMaintenanceDaemonForMainDb(void)
|
||||||
|
{
|
||||||
|
if (strcmp(MainDb, "") == 0)
|
||||||
|
{
|
||||||
|
elog(LOG, "There is no designated Main database.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
BackgroundWorker worker;
|
||||||
|
|
||||||
|
memset(&worker, 0, sizeof(worker));
|
||||||
|
|
||||||
|
|
||||||
|
strcpy_s(worker.bgw_name, sizeof(worker.bgw_name),
|
||||||
|
"Citus Maintenance Daemon for Main DB");
|
||||||
|
|
||||||
|
/* request ability to connect to target database */
|
||||||
|
worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No point in getting started before able to run query, but we do
|
||||||
|
* want to get started on Hot-Standby.
|
||||||
|
*/
|
||||||
|
worker.bgw_start_time = BgWorkerStart_ConsistentState;
|
||||||
|
|
||||||
|
/* Restart after a bit after errors, but don't bog the system. */
|
||||||
|
worker.bgw_restart_time = 5;
|
||||||
|
strcpy_s(worker.bgw_library_name,
|
||||||
|
sizeof(worker.bgw_library_name), "citus");
|
||||||
|
strcpy_s(worker.bgw_function_name, sizeof(worker.bgw_library_name),
|
||||||
|
"CitusMaintenanceDaemonMain");
|
||||||
|
|
||||||
|
worker.bgw_main_arg = (Datum) 0;
|
||||||
|
|
||||||
|
RegisterBackgroundWorker(&worker);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* InitializeMaintenanceDaemonBackend, called at backend start and
|
* InitializeMaintenanceDaemonBackend, called at backend start and
|
||||||
* configuration changes, is responsible for starting a per-database
|
* configuration changes, is responsible for starting a per-database
|
||||||
|
@ -148,31 +227,20 @@ void
|
||||||
InitializeMaintenanceDaemonBackend(void)
|
InitializeMaintenanceDaemonBackend(void)
|
||||||
{
|
{
|
||||||
Oid extensionOwner = CitusExtensionOwner();
|
Oid extensionOwner = CitusExtensionOwner();
|
||||||
bool found;
|
bool found = false;
|
||||||
|
|
||||||
LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE);
|
LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE);
|
||||||
|
|
||||||
MaintenanceDaemonDBData *dbData = (MaintenanceDaemonDBData *) hash_search(
|
MaintenanceDaemonDBData *dbData = GetMaintenanceDaemonDBHashEntry(MyDatabaseId,
|
||||||
MaintenanceDaemonDBHash,
|
|
||||||
&MyDatabaseId,
|
|
||||||
HASH_ENTER_NULL,
|
|
||||||
&found);
|
&found);
|
||||||
|
|
||||||
if (dbData == NULL)
|
if (dbData == NULL)
|
||||||
{
|
{
|
||||||
WarnMaintenanceDaemonNotStarted();
|
WarnMaintenanceDaemonNotStarted();
|
||||||
LWLockRelease(&MaintenanceDaemonControl->lock);
|
LWLockRelease(&MaintenanceDaemonControl->lock);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!found)
|
|
||||||
{
|
|
||||||
/* ensure the values in MaintenanceDaemonDBData are zero */
|
|
||||||
memset(((char *) dbData) + sizeof(Oid), 0,
|
|
||||||
sizeof(MaintenanceDaemonDBData) - sizeof(Oid));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IsMaintenanceDaemon)
|
if (IsMaintenanceDaemon)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -271,41 +339,71 @@ WarnMaintenanceDaemonNotStarted(void)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CitusMaintenanceDaemonMain is the maintenance daemon's main routine, it'll
|
* ConnectToDatabase connects to the database for the given databaseOid.
|
||||||
* be started by the background worker infrastructure. If it errors out,
|
* if databaseOid is 0, connects to MainDb and then creates a hash entry.
|
||||||
* it'll be restarted after a few seconds.
|
* If a hash entry cannot be created for MainDb it exits the process requesting a restart.
|
||||||
|
* However for regular databases, it exits without requesting a restart since another
|
||||||
|
* subsequent backend is expected to start the Maintenance Daemon.
|
||||||
|
* If the found hash entry has a valid workerPid, it exits
|
||||||
|
* without requesting a restart since there is already a daemon running.
|
||||||
*/
|
*/
|
||||||
void
|
static MaintenanceDaemonDBData *
|
||||||
CitusMaintenanceDaemonMain(Datum main_arg)
|
ConnectToDatabase(Oid databaseOid)
|
||||||
{
|
{
|
||||||
Oid databaseOid = DatumGetObjectId(main_arg);
|
MaintenanceDaemonDBData *myDbData = NULL;
|
||||||
TimestampTz nextStatsCollectionTime USED_WITH_LIBCURL_ONLY =
|
|
||||||
TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 60 * 1000);
|
|
||||||
bool retryStatsCollection USED_WITH_LIBCURL_ONLY = false;
|
|
||||||
TimestampTz lastRecoveryTime = 0;
|
|
||||||
TimestampTz lastShardCleanTime = 0;
|
|
||||||
TimestampTz lastStatStatementsPurgeTime = 0;
|
|
||||||
TimestampTz nextMetadataSyncTime = 0;
|
|
||||||
|
|
||||||
/* state kept for the background tasks queue monitor */
|
|
||||||
TimestampTz lastBackgroundTaskQueueCheck = GetCurrentTimestamp();
|
|
||||||
BackgroundWorkerHandle *backgroundTasksQueueBgwHandle = NULL;
|
|
||||||
bool backgroundTasksQueueWarnedForLock = false;
|
|
||||||
|
|
||||||
/*
|
bool isMainDb = false;
|
||||||
* We do metadata sync in a separate background worker. We need its
|
|
||||||
* handle to be able to check its status.
|
|
||||||
*/
|
|
||||||
BackgroundWorkerHandle *metadataSyncBgwHandle = NULL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Look up this worker's configuration.
|
|
||||||
*/
|
|
||||||
LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE);
|
LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE);
|
||||||
|
|
||||||
MaintenanceDaemonDBData *myDbData = (MaintenanceDaemonDBData *)
|
|
||||||
|
if (databaseOid == 0)
|
||||||
|
{
|
||||||
|
char *databaseName = MainDb;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since we cannot query databaseOid without initializing Postgres
|
||||||
|
* first, connect to the database by name.
|
||||||
|
*/
|
||||||
|
BackgroundWorkerInitializeConnection(databaseName, NULL, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now we have a valid MyDatabaseId.
|
||||||
|
* Insert the hash entry for the database to the Maintenance Deamon Hash.
|
||||||
|
*/
|
||||||
|
bool found = false;
|
||||||
|
|
||||||
|
myDbData = GetMaintenanceDaemonDBHashEntry(MyDatabaseId, &found);
|
||||||
|
|
||||||
|
if (!myDbData)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If an entry cannot be created,
|
||||||
|
* return code of 1 requests worker restart
|
||||||
|
* Since BackgroundWorker for the MainDb is only registered
|
||||||
|
* once during server startup, we need to retry.
|
||||||
|
*/
|
||||||
|
proc_exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (found && myDbData->workerPid != 0)
|
||||||
|
{
|
||||||
|
/* Another maintenance daemon is running.*/
|
||||||
|
|
||||||
|
proc_exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
databaseOid = MyDatabaseId;
|
||||||
|
myDbData->userOid = GetSessionUserId();
|
||||||
|
isMainDb = true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
myDbData = (MaintenanceDaemonDBData *)
|
||||||
hash_search(MaintenanceDaemonDBHash, &databaseOid,
|
hash_search(MaintenanceDaemonDBHash, &databaseOid,
|
||||||
HASH_FIND, NULL);
|
HASH_FIND, NULL);
|
||||||
|
|
||||||
if (!myDbData)
|
if (!myDbData)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -329,8 +427,9 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
||||||
|
|
||||||
proc_exit(0);
|
proc_exit(0);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
before_shmem_exit(MaintenanceDaemonShmemExit, main_arg);
|
before_shmem_exit(MaintenanceDaemonShmemExit, ObjectIdGetDatum(databaseOid));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Signal that I am the maintenance daemon now.
|
* Signal that I am the maintenance daemon now.
|
||||||
|
@ -356,25 +455,55 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
||||||
|
|
||||||
LWLockRelease(&MaintenanceDaemonControl->lock);
|
LWLockRelease(&MaintenanceDaemonControl->lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* Setup error context so log messages can be properly attributed. Some of
|
|
||||||
* them otherwise sound like they might be from a normal user connection.
|
|
||||||
* Do so before setting up signals etc, so we never exit without the
|
|
||||||
* context setup.
|
|
||||||
*/
|
|
||||||
ErrorContextCallback errorCallback = { 0 };
|
|
||||||
memset(&errorCallback, 0, sizeof(errorCallback));
|
memset(&errorCallback, 0, sizeof(errorCallback));
|
||||||
errorCallback.callback = MaintenanceDaemonErrorContext;
|
errorCallback.callback = MaintenanceDaemonErrorContext;
|
||||||
errorCallback.arg = (void *) myDbData;
|
errorCallback.arg = (void *) myDbData;
|
||||||
errorCallback.previous = error_context_stack;
|
errorCallback.previous = error_context_stack;
|
||||||
error_context_stack = &errorCallback;
|
error_context_stack = &errorCallback;
|
||||||
|
|
||||||
|
|
||||||
elog(LOG, "starting maintenance daemon on database %u user %u",
|
elog(LOG, "starting maintenance daemon on database %u user %u",
|
||||||
databaseOid, myDbData->userOid);
|
databaseOid, myDbData->userOid);
|
||||||
|
|
||||||
|
if (!isMainDb)
|
||||||
|
{
|
||||||
/* connect to database, after that we can actually access catalogs */
|
/* connect to database, after that we can actually access catalogs */
|
||||||
BackgroundWorkerInitializeConnectionByOid(databaseOid, myDbData->userOid, 0);
|
BackgroundWorkerInitializeConnectionByOid(databaseOid, myDbData->userOid, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
return myDbData;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CitusMaintenanceDaemonMain is the maintenance daemon's main routine, it'll
|
||||||
|
* be started by the background worker infrastructure. If it errors out,
|
||||||
|
* it'll be restarted after a few seconds.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
CitusMaintenanceDaemonMain(Datum main_arg)
|
||||||
|
{
|
||||||
|
Oid databaseOid = DatumGetObjectId(main_arg);
|
||||||
|
TimestampTz nextStatsCollectionTime USED_WITH_LIBCURL_ONLY =
|
||||||
|
TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 60 * 1000);
|
||||||
|
bool retryStatsCollection USED_WITH_LIBCURL_ONLY = false;
|
||||||
|
TimestampTz lastRecoveryTime = 0;
|
||||||
|
TimestampTz lastShardCleanTime = 0;
|
||||||
|
TimestampTz lastStatStatementsPurgeTime = 0;
|
||||||
|
TimestampTz nextMetadataSyncTime = 0;
|
||||||
|
|
||||||
|
/* state kept for the background tasks queue monitor */
|
||||||
|
TimestampTz lastBackgroundTaskQueueCheck = GetCurrentTimestamp();
|
||||||
|
BackgroundWorkerHandle *backgroundTasksQueueBgwHandle = NULL;
|
||||||
|
bool backgroundTasksQueueWarnedForLock = false;
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We do metadata sync in a separate background worker. We need its
|
||||||
|
* handle to be able to check its status.
|
||||||
|
*/
|
||||||
|
BackgroundWorkerHandle *metadataSyncBgwHandle = NULL;
|
||||||
|
|
||||||
|
MaintenanceDaemonDBData *myDbData = ConnectToDatabase(databaseOid);
|
||||||
|
|
||||||
/* make worker recognizable in pg_stat_activity */
|
/* make worker recognizable in pg_stat_activity */
|
||||||
pgstat_report_appname("Citus Maintenance Daemon");
|
pgstat_report_appname("Citus Maintenance Daemon");
|
||||||
|
@ -383,7 +512,7 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
||||||
* Terminate orphaned metadata sync daemons spawned from previously terminated
|
* Terminate orphaned metadata sync daemons spawned from previously terminated
|
||||||
* or crashed maintenanced instances.
|
* or crashed maintenanced instances.
|
||||||
*/
|
*/
|
||||||
SignalMetadataSyncDaemon(databaseOid, SIGTERM);
|
SignalMetadataSyncDaemon(MyDatabaseId, SIGTERM);
|
||||||
|
|
||||||
/* enter main loop */
|
/* enter main loop */
|
||||||
while (!got_SIGTERM)
|
while (!got_SIGTERM)
|
||||||
|
@ -945,7 +1074,7 @@ MaintenanceDaemonShmemExit(int code, Datum arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* MaintenanceDaemonSigTermHandler calls proc_exit(0) */
|
/* MaintenanceDaemonSigTermHandler sets the got_SIGTERM flag.*/
|
||||||
static void
|
static void
|
||||||
MaintenanceDaemonSigTermHandler(SIGNAL_ARGS)
|
MaintenanceDaemonSigTermHandler(SIGNAL_ARGS)
|
||||||
{
|
{
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
|
|
||||||
/* config variable for */
|
/* config variable for */
|
||||||
extern double DistributedDeadlockDetectionTimeoutFactor;
|
extern double DistributedDeadlockDetectionTimeoutFactor;
|
||||||
|
extern char *MainDb;
|
||||||
|
|
||||||
extern void StopMaintenanceDaemon(Oid databaseId);
|
extern void StopMaintenanceDaemon(Oid databaseId);
|
||||||
extern void TriggerNodeMetadataSync(Oid databaseId);
|
extern void TriggerNodeMetadataSync(Oid databaseId);
|
||||||
|
@ -27,6 +28,7 @@ extern void InitializeMaintenanceDaemon(void);
|
||||||
extern size_t MaintenanceDaemonShmemSize(void);
|
extern size_t MaintenanceDaemonShmemSize(void);
|
||||||
extern void MaintenanceDaemonShmemInit(void);
|
extern void MaintenanceDaemonShmemInit(void);
|
||||||
extern void InitializeMaintenanceDaemonBackend(void);
|
extern void InitializeMaintenanceDaemonBackend(void);
|
||||||
|
extern void InitializeMaintenanceDaemonForMainDb(void);
|
||||||
extern bool LockCitusExtension(void);
|
extern bool LockCitusExtension(void);
|
||||||
|
|
||||||
extern PGDLLEXPORT void CitusMaintenanceDaemonMain(Datum main_arg);
|
extern PGDLLEXPORT void CitusMaintenanceDaemonMain(Datum main_arg);
|
||||||
|
|
|
@ -453,6 +453,9 @@ def cleanup_test_leftovers(nodes):
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
node.cleanup_schemas()
|
node.cleanup_schemas()
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
node.cleanup_databases()
|
||||||
|
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
node.cleanup_users()
|
node.cleanup_users()
|
||||||
|
|
||||||
|
@ -753,6 +756,7 @@ class Postgres(QueryRunner):
|
||||||
self.subscriptions = set()
|
self.subscriptions = set()
|
||||||
self.publications = set()
|
self.publications = set()
|
||||||
self.replication_slots = set()
|
self.replication_slots = set()
|
||||||
|
self.databases = set()
|
||||||
self.schemas = set()
|
self.schemas = set()
|
||||||
self.users = set()
|
self.users = set()
|
||||||
|
|
||||||
|
@ -993,6 +997,10 @@ class Postgres(QueryRunner):
|
||||||
args = sql.SQL("")
|
args = sql.SQL("")
|
||||||
self.sql(sql.SQL("CREATE USER {} {}").format(sql.Identifier(name), args))
|
self.sql(sql.SQL("CREATE USER {} {}").format(sql.Identifier(name), args))
|
||||||
|
|
||||||
|
def create_database(self, name):
|
||||||
|
self.databases.add(name)
|
||||||
|
self.sql(sql.SQL("CREATE DATABASE {}").format(sql.Identifier(name)))
|
||||||
|
|
||||||
def create_schema(self, name):
|
def create_schema(self, name):
|
||||||
self.schemas.add(name)
|
self.schemas.add(name)
|
||||||
self.sql(sql.SQL("CREATE SCHEMA {}").format(sql.Identifier(name)))
|
self.sql(sql.SQL("CREATE SCHEMA {}").format(sql.Identifier(name)))
|
||||||
|
@ -1020,6 +1028,12 @@ class Postgres(QueryRunner):
|
||||||
for user in self.users:
|
for user in self.users:
|
||||||
self.sql(sql.SQL("DROP USER IF EXISTS {}").format(sql.Identifier(user)))
|
self.sql(sql.SQL("DROP USER IF EXISTS {}").format(sql.Identifier(user)))
|
||||||
|
|
||||||
|
def cleanup_databases(self):
|
||||||
|
for database in self.databases:
|
||||||
|
self.sql(
|
||||||
|
sql.SQL("DROP DATABASE IF EXISTS {}").format(sql.Identifier(database))
|
||||||
|
)
|
||||||
|
|
||||||
def cleanup_schemas(self):
|
def cleanup_schemas(self):
|
||||||
for schema in self.schemas:
|
for schema in self.schemas:
|
||||||
self.sql(
|
self.sql(
|
||||||
|
|
|
@ -0,0 +1,74 @@
|
||||||
|
# This test checks that once citus.main_db is set and the
|
||||||
|
# server is restarted. A Citus Maintenance Daemon for the main_db
|
||||||
|
# is launched. This should happen even if there is no query run
|
||||||
|
# in main_db yet.
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
def wait_until_maintenance_deamons_start(deamoncount, cluster):
|
||||||
|
i = 0
|
||||||
|
n = 0
|
||||||
|
|
||||||
|
while i < 10:
|
||||||
|
i += 1
|
||||||
|
n = cluster.coordinator.sql_value(
|
||||||
|
"SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';"
|
||||||
|
)
|
||||||
|
|
||||||
|
if n == deamoncount:
|
||||||
|
break
|
||||||
|
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
assert n == deamoncount
|
||||||
|
|
||||||
|
|
||||||
|
def test_set_maindb(cluster_factory):
|
||||||
|
cluster = cluster_factory(0)
|
||||||
|
|
||||||
|
# Test that once citus.main_db is set to a database name
|
||||||
|
# there are two maintenance deamons running upon restart.
|
||||||
|
# One maintenance deamon for the database of the current connection
|
||||||
|
# and one for the citus.main_db.
|
||||||
|
cluster.coordinator.create_database("mymaindb")
|
||||||
|
cluster.coordinator.configure("citus.main_db='mymaindb'")
|
||||||
|
cluster.coordinator.restart()
|
||||||
|
|
||||||
|
assert cluster.coordinator.sql_value("SHOW citus.main_db;") == "mymaindb"
|
||||||
|
|
||||||
|
wait_until_maintenance_deamons_start(2, cluster)
|
||||||
|
|
||||||
|
assert (
|
||||||
|
cluster.coordinator.sql_value(
|
||||||
|
"SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname='mymaindb';"
|
||||||
|
)
|
||||||
|
== 1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test that once citus.main_db is set to empty string
|
||||||
|
# there is only one maintenance deamon for the database
|
||||||
|
# of the current connection.
|
||||||
|
cluster.coordinator.configure("citus.main_db=''")
|
||||||
|
cluster.coordinator.restart()
|
||||||
|
assert cluster.coordinator.sql_value("SHOW citus.main_db;") == ""
|
||||||
|
|
||||||
|
wait_until_maintenance_deamons_start(1, cluster)
|
||||||
|
|
||||||
|
# Test that after citus.main_db is dropped. The maintenance
|
||||||
|
# deamon for this database is terminated.
|
||||||
|
cluster.coordinator.configure("citus.main_db='mymaindb'")
|
||||||
|
cluster.coordinator.restart()
|
||||||
|
assert cluster.coordinator.sql_value("SHOW citus.main_db;") == "mymaindb"
|
||||||
|
|
||||||
|
wait_until_maintenance_deamons_start(2, cluster)
|
||||||
|
|
||||||
|
cluster.coordinator.sql("DROP DATABASE mymaindb;")
|
||||||
|
|
||||||
|
wait_until_maintenance_deamons_start(1, cluster)
|
||||||
|
|
||||||
|
assert (
|
||||||
|
cluster.coordinator.sql_value(
|
||||||
|
"SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname='mymaindb';"
|
||||||
|
)
|
||||||
|
== 0
|
||||||
|
)
|
|
@ -133,12 +133,6 @@ ORDER BY 1, 2;
|
||||||
validatable_constraint_8000016 | t
|
validatable_constraint_8000016 | t
|
||||||
(10 rows)
|
(10 rows)
|
||||||
|
|
||||||
DROP TABLE constrained_table;
|
SET client_min_messages TO WARNING;
|
||||||
DROP TABLE referenced_table CASCADE;
|
|
||||||
DROP TABLE referencing_table;
|
|
||||||
DROP SCHEMA validate_constraint CASCADE;
|
DROP SCHEMA validate_constraint CASCADE;
|
||||||
NOTICE: drop cascades to 3 other objects
|
|
||||||
DETAIL: drop cascades to type constraint_validity
|
|
||||||
drop cascades to view constraint_validations_in_workers
|
|
||||||
drop cascades to view constraint_validations
|
|
||||||
SET search_path TO DEFAULT;
|
SET search_path TO DEFAULT;
|
||||||
|
|
|
@ -201,7 +201,8 @@ test: citus_copy_shard_placement
|
||||||
# multi_utilities cannot be run in parallel with other tests because it checks
|
# multi_utilities cannot be run in parallel with other tests because it checks
|
||||||
# global locks
|
# global locks
|
||||||
test: multi_utilities
|
test: multi_utilities
|
||||||
test: foreign_key_to_reference_table validate_constraint
|
test: foreign_key_to_reference_table
|
||||||
|
test: validate_constraint
|
||||||
test: multi_repartition_udt multi_repartitioned_subquery_udf multi_subtransactions
|
test: multi_repartition_udt multi_repartitioned_subquery_udf multi_subtransactions
|
||||||
|
|
||||||
test: multi_modifying_xacts
|
test: multi_modifying_xacts
|
||||||
|
|
|
@ -154,7 +154,8 @@ test: multi_outer_join
|
||||||
# ---
|
# ---
|
||||||
test: multi_complex_count_distinct
|
test: multi_complex_count_distinct
|
||||||
test: multi_upsert multi_simple_queries
|
test: multi_upsert multi_simple_queries
|
||||||
test: foreign_key_to_reference_table validate_constraint
|
test: foreign_key_to_reference_table
|
||||||
|
test: validate_constraint
|
||||||
|
|
||||||
# ---------
|
# ---------
|
||||||
# creates hash and range-partitioned tables and performs COPY
|
# creates hash and range-partitioned tables and performs COPY
|
||||||
|
|
|
@ -150,7 +150,9 @@ test: multi_outer_join
|
||||||
test: multi_create_fdw
|
test: multi_create_fdw
|
||||||
test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list
|
test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list
|
||||||
test: multi_upsert multi_simple_queries multi_data_types
|
test: multi_upsert multi_simple_queries multi_data_types
|
||||||
test: multi_utilities foreign_key_to_reference_table validate_constraint
|
test: multi_utilities
|
||||||
|
test: foreign_key_to_reference_table
|
||||||
|
test: validate_constraint
|
||||||
test: multi_repartition_udt multi_repartitioned_subquery_udf
|
test: multi_repartition_udt multi_repartitioned_subquery_udf
|
||||||
|
|
||||||
# ---------
|
# ---------
|
||||||
|
|
|
@ -116,9 +116,6 @@ SELECT *
|
||||||
FROM constraint_validations_in_workers
|
FROM constraint_validations_in_workers
|
||||||
ORDER BY 1, 2;
|
ORDER BY 1, 2;
|
||||||
|
|
||||||
DROP TABLE constrained_table;
|
SET client_min_messages TO WARNING;
|
||||||
DROP TABLE referenced_table CASCADE;
|
|
||||||
DROP TABLE referencing_table;
|
|
||||||
|
|
||||||
DROP SCHEMA validate_constraint CASCADE;
|
DROP SCHEMA validate_constraint CASCADE;
|
||||||
SET search_path TO DEFAULT;
|
SET search_path TO DEFAULT;
|
||||||
|
|
Loading…
Reference in New Issue