mirror of https://github.com/citusdata/citus.git
Merge branch 'citusdata:main' into 7244/multi_db_connection_improvements
commit
bcd1bb3805
|
@ -61,7 +61,7 @@ jobs:
|
||||||
- name: Check Snapshots
|
- name: Check Snapshots
|
||||||
run: |
|
run: |
|
||||||
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Check C Style
|
- name: Check C Style
|
||||||
|
@ -117,7 +117,7 @@ jobs:
|
||||||
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
|
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
|
||||||
options: --user root
|
options: --user root
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- name: Expose $PG_MAJOR to Github Env
|
- name: Expose $PG_MAJOR to Github Env
|
||||||
run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||||
shell: bash
|
shell: bash
|
||||||
|
@ -227,7 +227,7 @@ jobs:
|
||||||
- params
|
- params
|
||||||
- build
|
- build
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- uses: "./.github/actions/setup_extension"
|
- uses: "./.github/actions/setup_extension"
|
||||||
- name: Run Test
|
- name: Run Test
|
||||||
run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }}
|
run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }}
|
||||||
|
@ -261,7 +261,7 @@ jobs:
|
||||||
- ${{ needs.params.outputs.pg16_version }}
|
- ${{ needs.params.outputs.pg16_version }}
|
||||||
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
|
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- uses: "./.github/actions/setup_extension"
|
- uses: "./.github/actions/setup_extension"
|
||||||
- name: Test arbitrary configs
|
- name: Test arbitrary configs
|
||||||
run: |-
|
run: |-
|
||||||
|
@ -311,7 +311,7 @@ jobs:
|
||||||
old_pg_major: ${{ matrix.old_pg_major }}
|
old_pg_major: ${{ matrix.old_pg_major }}
|
||||||
new_pg_major: ${{ matrix.new_pg_major }}
|
new_pg_major: ${{ matrix.new_pg_major }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- uses: "./.github/actions/setup_extension"
|
- uses: "./.github/actions/setup_extension"
|
||||||
with:
|
with:
|
||||||
pg_major: "${{ env.old_pg_major }}"
|
pg_major: "${{ env.old_pg_major }}"
|
||||||
|
@ -349,7 +349,7 @@ jobs:
|
||||||
- params
|
- params
|
||||||
- build
|
- build
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- uses: "./.github/actions/setup_extension"
|
- uses: "./.github/actions/setup_extension"
|
||||||
with:
|
with:
|
||||||
skip_installation: true
|
skip_installation: true
|
||||||
|
@ -413,7 +413,7 @@ jobs:
|
||||||
needs:
|
needs:
|
||||||
- build
|
- build
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- uses: azure/login@v1
|
- uses: azure/login@v1
|
||||||
with:
|
with:
|
||||||
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||||
|
@ -431,7 +431,7 @@ jobs:
|
||||||
needs:
|
needs:
|
||||||
- build
|
- build
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- uses: azure/login@v1
|
- uses: azure/login@v1
|
||||||
with:
|
with:
|
||||||
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||||
|
@ -450,7 +450,7 @@ jobs:
|
||||||
outputs:
|
outputs:
|
||||||
json: ${{ steps.parallelization.outputs.json }}
|
json: ${{ steps.parallelization.outputs.json }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- uses: "./.github/actions/parallelization"
|
- uses: "./.github/actions/parallelization"
|
||||||
id: parallelization
|
id: parallelization
|
||||||
with:
|
with:
|
||||||
|
@ -463,7 +463,7 @@ jobs:
|
||||||
outputs:
|
outputs:
|
||||||
tests: ${{ steps.detect-regression-tests.outputs.tests }}
|
tests: ${{ steps.detect-regression-tests.outputs.tests }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Detect regression tests need to be ran
|
- name: Detect regression tests need to be ran
|
||||||
|
@ -514,7 +514,7 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
|
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- uses: "./.github/actions/setup_extension"
|
- uses: "./.github/actions/setup_extension"
|
||||||
- name: Run minimal tests
|
- name: Run minimal tests
|
||||||
run: |-
|
run: |-
|
||||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v2
|
uses: github/codeql-action/init@v2
|
||||||
|
|
|
@ -28,7 +28,7 @@ jobs:
|
||||||
image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
|
image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
|
||||||
options: --user root
|
options: --user root
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- name: Configure, Build, and Install
|
- name: Configure, Build, and Install
|
||||||
run: |
|
run: |
|
||||||
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||||
|
@ -46,7 +46,7 @@ jobs:
|
||||||
outputs:
|
outputs:
|
||||||
json: ${{ steps.parallelization.outputs.json }}
|
json: ${{ steps.parallelization.outputs.json }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- uses: "./.github/actions/parallelization"
|
- uses: "./.github/actions/parallelization"
|
||||||
id: parallelization
|
id: parallelization
|
||||||
with:
|
with:
|
||||||
|
@ -67,7 +67,7 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }}
|
matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- uses: "./.github/actions/setup_extension"
|
- uses: "./.github/actions/setup_extension"
|
||||||
- name: Run minimal tests
|
- name: Run minimal tests
|
||||||
run: |-
|
run: |-
|
||||||
|
|
|
@ -395,7 +395,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
|
||||||
bool showCurrentBackendDetails = showAllBackends;
|
bool showCurrentBackendDetails = showAllBackends;
|
||||||
BackendData *currentBackend =
|
BackendData *currentBackend =
|
||||||
&backendManagementShmemData->backends[backendIndex];
|
&backendManagementShmemData->backends[backendIndex];
|
||||||
PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex];
|
PGPROC *currentProc = GetPGProcByNumber(backendIndex);
|
||||||
|
|
||||||
/* to work on data after releasing g spinlock to protect against errors */
|
/* to work on data after releasing g spinlock to protect against errors */
|
||||||
uint64 transactionNumber = 0;
|
uint64 transactionNumber = 0;
|
||||||
|
@ -420,7 +420,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
|
||||||
}
|
}
|
||||||
|
|
||||||
Oid databaseId = currentBackend->databaseId;
|
Oid databaseId = currentBackend->databaseId;
|
||||||
int backendPid = ProcGlobal->allProcs[backendIndex].pid;
|
int backendPid = GetPGProcByNumber(backendIndex)->pid;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We prefer to use worker_query instead of distributedCommandOriginator in
|
* We prefer to use worker_query instead of distributedCommandOriginator in
|
||||||
|
@ -1279,7 +1279,7 @@ ActiveDistributedTransactionNumbers(void)
|
||||||
/* build list of starting procs */
|
/* build list of starting procs */
|
||||||
for (int curBackend = 0; curBackend < MaxBackends; curBackend++)
|
for (int curBackend = 0; curBackend < MaxBackends; curBackend++)
|
||||||
{
|
{
|
||||||
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
|
PGPROC *currentProc = GetPGProcByNumber(curBackend);
|
||||||
BackendData currentBackendData;
|
BackendData currentBackendData;
|
||||||
|
|
||||||
if (currentProc->pid == 0)
|
if (currentProc->pid == 0)
|
||||||
|
|
|
@ -375,7 +375,7 @@ AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode)
|
||||||
|
|
||||||
for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
||||||
{
|
{
|
||||||
PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex];
|
PGPROC *currentProc = GetPGProcByNumber(backendIndex);
|
||||||
BackendData currentBackendData;
|
BackendData currentBackendData;
|
||||||
|
|
||||||
/* we're not interested in processes that are not active or waiting on a lock */
|
/* we're not interested in processes that are not active or waiting on a lock */
|
||||||
|
|
|
@ -559,7 +559,7 @@ BuildLocalWaitGraph(bool onlyDistributedTx)
|
||||||
/* build list of starting procs */
|
/* build list of starting procs */
|
||||||
for (int curBackend = 0; curBackend < totalProcs; curBackend++)
|
for (int curBackend = 0; curBackend < totalProcs; curBackend++)
|
||||||
{
|
{
|
||||||
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
|
PGPROC *currentProc = GetPGProcByNumber(curBackend);
|
||||||
BackendData currentBackendData;
|
BackendData currentBackendData;
|
||||||
|
|
||||||
if (currentProc->pid == 0)
|
if (currentProc->pid == 0)
|
||||||
|
|
|
@ -504,6 +504,11 @@ FilterShardsFromPgclass(Node *node, void *context)
|
||||||
static bool
|
static bool
|
||||||
HasRangeTableRef(Node *node, int *varno)
|
HasRangeTableRef(Node *node, int *varno)
|
||||||
{
|
{
|
||||||
|
if (node == NULL)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (IsA(node, RangeTblRef))
|
if (IsA(node, RangeTblRef))
|
||||||
{
|
{
|
||||||
RangeTblRef *rangeTblRef = (RangeTblRef *) node;
|
RangeTblRef *rangeTblRef = (RangeTblRef *) node;
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job"
|
||||||
|
-- along with their details. This modification includes a fix for a null pointer exception that occurred
|
||||||
|
-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604.
|
||||||
|
select
|
||||||
|
ct.conname as constraint_name,
|
||||||
|
a.attname as column_name,
|
||||||
|
fc.relname as foreign_table_name,
|
||||||
|
fns.nspname as foreign_table_schema
|
||||||
|
from
|
||||||
|
(SELECT ct.conname, ct.conrelid, ct.confrelid, ct.conkey, ct.contype,
|
||||||
|
ct.confkey, generate_subscripts(ct.conkey, 1) AS s
|
||||||
|
FROM pg_constraint ct
|
||||||
|
) AS ct
|
||||||
|
inner join pg_class c on c.oid=ct.conrelid
|
||||||
|
inner join pg_namespace ns on c.relnamespace=ns.oid
|
||||||
|
inner join pg_attribute a on a.attrelid=ct.conrelid and a.attnum =
|
||||||
|
ct.conkey[ct.s]
|
||||||
|
left join pg_class fc on fc.oid=ct.confrelid
|
||||||
|
left join pg_namespace fns on fc.relnamespace=fns.oid
|
||||||
|
left join pg_attribute fa on fa.attrelid=ct.confrelid and fa.attnum =
|
||||||
|
ct.confkey[ct.s]
|
||||||
|
where
|
||||||
|
ct.contype='f'
|
||||||
|
and fc.relname='pg_dist_background_job'
|
||||||
|
and ns.nspname='pg_catalog'
|
||||||
|
order by
|
||||||
|
fns.nspname, fc.relname, a.attnum;
|
||||||
|
constraint_name | column_name | foreign_table_name | foreign_table_schema
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
pg_dist_background_task_job_id_fkey | job_id | pg_dist_background_job | pg_catalog
|
||||||
|
pg_dist_background_task_depend_job_id_fkey | job_id | pg_dist_background_job | pg_catalog
|
||||||
|
(2 rows)
|
||||||
|
|
|
@ -79,7 +79,7 @@ test: multi_basic_queries cross_join multi_complex_expressions multi_subquery mu
|
||||||
test: multi_subquery_complex_reference_clause multi_subquery_window_functions multi_view multi_sql_function multi_prepare_sql
|
test: multi_subquery_complex_reference_clause multi_subquery_window_functions multi_view multi_sql_function multi_prepare_sql
|
||||||
test: sql_procedure multi_function_in_join row_types materialized_view
|
test: sql_procedure multi_function_in_join row_types materialized_view
|
||||||
test: multi_subquery_in_where_reference_clause adaptive_executor propagate_set_commands geqo
|
test: multi_subquery_in_where_reference_clause adaptive_executor propagate_set_commands geqo
|
||||||
test: forcedelegation_functions
|
test: forcedelegation_functions system_queries
|
||||||
# this should be run alone as it gets too many clients
|
# this should be run alone as it gets too many clients
|
||||||
test: join_pushdown
|
test: join_pushdown
|
||||||
test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message
|
test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job"
|
||||||
|
-- along with their details. This modification includes a fix for a null pointer exception that occurred
|
||||||
|
-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604.
|
||||||
|
select
|
||||||
|
ct.conname as constraint_name,
|
||||||
|
a.attname as column_name,
|
||||||
|
fc.relname as foreign_table_name,
|
||||||
|
fns.nspname as foreign_table_schema
|
||||||
|
from
|
||||||
|
(SELECT ct.conname, ct.conrelid, ct.confrelid, ct.conkey, ct.contype,
|
||||||
|
ct.confkey, generate_subscripts(ct.conkey, 1) AS s
|
||||||
|
FROM pg_constraint ct
|
||||||
|
) AS ct
|
||||||
|
inner join pg_class c on c.oid=ct.conrelid
|
||||||
|
inner join pg_namespace ns on c.relnamespace=ns.oid
|
||||||
|
inner join pg_attribute a on a.attrelid=ct.conrelid and a.attnum =
|
||||||
|
ct.conkey[ct.s]
|
||||||
|
left join pg_class fc on fc.oid=ct.confrelid
|
||||||
|
left join pg_namespace fns on fc.relnamespace=fns.oid
|
||||||
|
left join pg_attribute fa on fa.attrelid=ct.confrelid and fa.attnum =
|
||||||
|
ct.confkey[ct.s]
|
||||||
|
where
|
||||||
|
ct.contype='f'
|
||||||
|
and fc.relname='pg_dist_background_job'
|
||||||
|
and ns.nspname='pg_catalog'
|
||||||
|
order by
|
||||||
|
fns.nspname, fc.relname, a.attnum;
|
Loading…
Reference in New Issue