From fcc72d8a23da8e0f7ef91bf77077abba9796867d Mon Sep 17 00:00:00 2001 From: Evgeny Nechayev Date: Tue, 28 May 2024 03:39:13 +0300 Subject: [PATCH 1/3] =?UTF-8?q?Use=20macro=20wrapper=20to=20access=20PGPRO?= =?UTF-8?q?C=20data,=20which=20allow=20to=20improve=20compa=E2=80=A6=20(#7?= =?UTF-8?q?607)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DESCRIPTION: Use macro wrapper to access PGPROC data, to improve compatibility with PostgreSQL forks. --- src/backend/distributed/transaction/backend_data.c | 6 +++--- .../transaction/distributed_deadlock_detection.c | 2 +- src/backend/distributed/transaction/lock_graph.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 5f868f548..67acadd29 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -395,7 +395,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto bool showCurrentBackendDetails = showAllBackends; BackendData *currentBackend = &backendManagementShmemData->backends[backendIndex]; - PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex]; + PGPROC *currentProc = GetPGProcByNumber(backendIndex); /* to work on data after releasing g spinlock to protect against errors */ uint64 transactionNumber = 0; @@ -420,7 +420,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto } Oid databaseId = currentBackend->databaseId; - int backendPid = ProcGlobal->allProcs[backendIndex].pid; + int backendPid = GetPGProcByNumber(backendIndex)->pid; /* * We prefer to use worker_query instead of distributedCommandOriginator in @@ -1279,7 +1279,7 @@ ActiveDistributedTransactionNumbers(void) /* build list of starting procs */ for (int curBackend = 0; curBackend < MaxBackends; curBackend++) { - PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; + PGPROC *currentProc = GetPGProcByNumber(curBackend); BackendData currentBackendData; if (currentProc->pid == 0) diff --git a/src/backend/distributed/transaction/distributed_deadlock_detection.c b/src/backend/distributed/transaction/distributed_deadlock_detection.c index 27bb48ee3..5e8060a4f 100644 --- a/src/backend/distributed/transaction/distributed_deadlock_detection.c +++ b/src/backend/distributed/transaction/distributed_deadlock_detection.c @@ -375,7 +375,7 @@ AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode) for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) { - PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex]; + PGPROC *currentProc = GetPGProcByNumber(backendIndex); BackendData currentBackendData; /* we're not interested in processes that are not active or waiting on a lock */ diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index b55a72843..695df2bf4 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -559,7 +559,7 @@ BuildLocalWaitGraph(bool onlyDistributedTx) /* build list of starting procs */ for (int curBackend = 0; curBackend < totalProcs; curBackend++) { - PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; + PGPROC *currentProc = GetPGProcByNumber(curBackend); BackendData currentBackendData; if (currentProc->pid == 0) From 0ab42e7a802113fac1427802cf886870a1eeea87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Tue, 28 May 2024 11:03:38 +0300 Subject: [PATCH 2/3] Adds null check for node in HasRangeTableRef (#7609) DESCRIPTION: Adds null check for node in HasRangeTableRef to prevent errors --- .../worker/worker_shard_visibility.c | 5 +++ src/test/regress/expected/system_queries.out | 33 +++++++++++++++++++ src/test/regress/multi_schedule | 2 +- src/test/regress/sql/system_queries.sql | 27 +++++++++++++++ 4 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 src/test/regress/expected/system_queries.out create mode 100644 src/test/regress/sql/system_queries.sql diff --git a/src/backend/distributed/worker/worker_shard_visibility.c b/src/backend/distributed/worker/worker_shard_visibility.c index ccd1a897c..3725800c3 100644 --- a/src/backend/distributed/worker/worker_shard_visibility.c +++ b/src/backend/distributed/worker/worker_shard_visibility.c @@ -504,6 +504,11 @@ FilterShardsFromPgclass(Node *node, void *context) static bool HasRangeTableRef(Node *node, int *varno) { + if (node == NULL) + { + return false; + } + if (IsA(node, RangeTblRef)) { RangeTblRef *rangeTblRef = (RangeTblRef *) node; diff --git a/src/test/regress/expected/system_queries.out b/src/test/regress/expected/system_queries.out new file mode 100644 index 000000000..cd2aef4d2 --- /dev/null +++ b/src/test/regress/expected/system_queries.out @@ -0,0 +1,33 @@ +-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job" +-- along with their details. This modification includes a fix for a null pointer exception that occurred +-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604. +select + ct.conname as constraint_name, + a.attname as column_name, + fc.relname as foreign_table_name, + fns.nspname as foreign_table_schema +from + (SELECT ct.conname, ct.conrelid, ct.confrelid, ct.conkey, ct.contype, +ct.confkey, generate_subscripts(ct.conkey, 1) AS s + FROM pg_constraint ct + ) AS ct + inner join pg_class c on c.oid=ct.conrelid + inner join pg_namespace ns on c.relnamespace=ns.oid + inner join pg_attribute a on a.attrelid=ct.conrelid and a.attnum = +ct.conkey[ct.s] + left join pg_class fc on fc.oid=ct.confrelid + left join pg_namespace fns on fc.relnamespace=fns.oid + left join pg_attribute fa on fa.attrelid=ct.confrelid and fa.attnum = +ct.confkey[ct.s] +where + ct.contype='f' + and fc.relname='pg_dist_background_job' + and ns.nspname='pg_catalog' +order by + fns.nspname, fc.relname, a.attnum; + constraint_name | column_name | foreign_table_name | foreign_table_schema +--------------------------------------------------------------------- + pg_dist_background_task_job_id_fkey | job_id | pg_dist_background_job | pg_catalog + pg_dist_background_task_depend_job_id_fkey | job_id | pg_dist_background_job | pg_catalog +(2 rows) + diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index af5921e60..fca36f5ab 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -79,7 +79,7 @@ test: multi_basic_queries cross_join multi_complex_expressions multi_subquery mu test: multi_subquery_complex_reference_clause multi_subquery_window_functions multi_view multi_sql_function multi_prepare_sql test: sql_procedure multi_function_in_join row_types materialized_view test: multi_subquery_in_where_reference_clause adaptive_executor propagate_set_commands geqo -test: forcedelegation_functions +test: forcedelegation_functions system_queries # this should be run alone as it gets too many clients test: join_pushdown test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message diff --git a/src/test/regress/sql/system_queries.sql b/src/test/regress/sql/system_queries.sql new file mode 100644 index 000000000..1e1d86876 --- /dev/null +++ b/src/test/regress/sql/system_queries.sql @@ -0,0 +1,27 @@ +-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job" +-- along with their details. This modification includes a fix for a null pointer exception that occurred +-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604. +select + ct.conname as constraint_name, + a.attname as column_name, + fc.relname as foreign_table_name, + fns.nspname as foreign_table_schema +from + (SELECT ct.conname, ct.conrelid, ct.confrelid, ct.conkey, ct.contype, +ct.confkey, generate_subscripts(ct.conkey, 1) AS s + FROM pg_constraint ct + ) AS ct + inner join pg_class c on c.oid=ct.conrelid + inner join pg_namespace ns on c.relnamespace=ns.oid + inner join pg_attribute a on a.attrelid=ct.conrelid and a.attnum = +ct.conkey[ct.s] + left join pg_class fc on fc.oid=ct.confrelid + left join pg_namespace fns on fc.relnamespace=fns.oid + left join pg_attribute fa on fa.attrelid=ct.confrelid and fa.attnum = +ct.confkey[ct.s] +where + ct.contype='f' + and fc.relname='pg_dist_background_job' + and ns.nspname='pg_catalog' +order by + fns.nspname, fc.relname, a.attnum; From 2874d7af4691377217e65e06694e85d0411db688 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Fri, 31 May 2024 20:52:17 +0300 Subject: [PATCH 3/3] Updates github checkout actions to v4 (#7611) Updates checkout plugin for github actions to v4. Can not update the version for check-sql-snapshots since new plugin causes below error in the docker image this step is using . Please refer to: https://github.com/citusdata/citus/actions/runs/9286197994/job/25552373953 Error: ``` /__e/node20/bin/node: /lib/x86_64-linux-gnu/libm.so.6: version `GLIBC_2.27' not found (required by /__e/node20/bin/node) /__e/node20/bin/node: /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.28' not found (required by /__e/node20/bin/node) /__e/node20/bin/node: /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.25' not found (required by /__e/node20/bin/node) ``` --- .github/workflows/build_and_test.yml | 22 +++++++++++----------- .github/workflows/codeql.yml | 2 +- .github/workflows/flaky_test_debugging.yml | 6 +++--- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 2541296cd..cd4995e20 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -61,7 +61,7 @@ jobs: - name: Check Snapshots run: | git config --global --add safe.directory ${GITHUB_WORKSPACE} - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Check C Style @@ -117,7 +117,7 @@ jobs: image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" options: --user root steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - name: Expose $PG_MAJOR to Github Env run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV shell: bash @@ -227,7 +227,7 @@ jobs: - params - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Run Test run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }} @@ -261,7 +261,7 @@ jobs: - ${{ needs.params.outputs.pg16_version }} parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Test arbitrary configs run: |- @@ -311,7 +311,7 @@ jobs: old_pg_major: ${{ matrix.old_pg_major }} new_pg_major: ${{ matrix.new_pg_major }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" with: pg_major: "${{ env.old_pg_major }}" @@ -349,7 +349,7 @@ jobs: - params - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" with: skip_installation: true @@ -413,7 +413,7 @@ jobs: needs: - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: azure/login@v1 with: creds: ${{ secrets.AZURE_CREDENTIALS }} @@ -431,7 +431,7 @@ jobs: needs: - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: azure/login@v1 with: creds: ${{ secrets.AZURE_CREDENTIALS }} @@ -450,7 +450,7 @@ jobs: outputs: json: ${{ steps.parallelization.outputs.json }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/parallelization" id: parallelization with: @@ -463,7 +463,7 @@ jobs: outputs: tests: ${{ steps.detect-regression-tests.outputs.tests }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Detect regression tests need to be ran @@ -514,7 +514,7 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 6478abf4b..027f5a048 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Initialize CodeQL uses: github/codeql-action/init@v2 diff --git a/.github/workflows/flaky_test_debugging.yml b/.github/workflows/flaky_test_debugging.yml index a744edc3b..7135f99fa 100644 --- a/.github/workflows/flaky_test_debugging.yml +++ b/.github/workflows/flaky_test_debugging.yml @@ -28,7 +28,7 @@ jobs: image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }} options: --user root steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - name: Configure, Build, and Install run: | echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV @@ -46,7 +46,7 @@ jobs: outputs: json: ${{ steps.parallelization.outputs.json }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/parallelization" id: parallelization with: @@ -67,7 +67,7 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |-