From e4ac3e6d9a423b88c023a277dea017986bc5a519 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Mon, 13 Nov 2023 15:05:38 +0300 Subject: [PATCH 01/11] Bump PG versions to latest minors 14.10, 15.5, 16.1 (#7336) Postgres got minor updates on Nov9, this starts using the images with the latest version for our tests, namely 14.10, 15.5 and 16.1. These minor updates were compatible with Citus. Sister PR: https://github.com/citusdata/the-process/pull/152 --- .devcontainer/Dockerfile | 8 ++++---- .github/workflows/build_and_test.yml | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 1c1a2f083..c2cab0272 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -68,7 +68,7 @@ USER citus # build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions FROM base AS pg14 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.9 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.10 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg15 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.4 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.5 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg16 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.0 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.1 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -210,7 +210,7 @@ COPY --chown=citus:citus .psqlrc . RUN sudo chown --from=root:root citus:citus -R ~ # sets default pg version -RUN pgenv switch 16.0 +RUN pgenv switch 16.1 # make connecting to the coordinator easy ENV PGPORT=9700 diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index e938e3904..6b33c658f 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -31,11 +31,11 @@ jobs: pgupgrade_image_name: "citus/pgupgradetester" style_checker_image_name: "citus/stylechecker" style_checker_tools_version: "0.8.18" - image_suffix: "-v9d71045" - pg14_version: '{ "major": "14", "full": "14.9" }' - pg15_version: '{ "major": "15", "full": "15.4" }' - pg16_version: '{ "major": "16", "full": "16.0" }' - upgrade_pg_versions: "14.9-15.4-16.0" + image_suffix: "-vbd8441d" + pg14_version: '{ "major": "14", "full": "14.10" }' + pg15_version: '{ "major": "15", "full": "15.5" }' + pg16_version: '{ "major": "16", "full": "16.1" }' + upgrade_pg_versions: "14.10-15.5-16.1" steps: # Since GHA jobs needs at least one step we use a noop step here. - name: Set up parameters From cdef2d522413a87d055dabfa2f4e46d570a08900 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Tue, 14 Nov 2023 12:49:15 +0300 Subject: [PATCH 02/11] Random tests refactoring (#7342) While investigating replication slots leftovers in PR https://github.com/citusdata/citus/pull/7338, I ran into the following refactoring/cleanup that can be done in our test suite: - Add separate test to remove non default nodes - Remove coordinator removal from `add_coordinator` test Use `remove_coordinator_from_metadata` test where needed - Don't print nodeids in `multi_multiuser_auth` and `multi_poolinfo_usage` tests - Use `startswith` when checking for isolation or failure tests - Add some dependencies accordingly in `run_test.py` for running flaky test schedules --- src/test/regress/citus_tests/run_test.py | 70 ++++++++----------- src/test/regress/expected/add_coordinator.out | 7 -- .../regress/expected/multi_multiuser_auth.out | 14 +--- .../regress/expected/multi_poolinfo_usage.out | 14 +--- .../multi_tenant_isolation_nonblocking.out | 6 -- .../expected/remove_non_default_nodes.out | 13 ++++ .../worker_split_binary_copy_test.out | 37 ---------- src/test/regress/multi_1_schedule | 1 + src/test/regress/split_schedule | 1 + src/test/regress/sql/add_coordinator.sql | 2 - src/test/regress/sql/multi_multiuser_auth.sql | 4 +- src/test/regress/sql/multi_poolinfo_usage.sql | 4 +- .../multi_tenant_isolation_nonblocking.sql | 3 - .../regress/sql/remove_non_default_nodes.sql | 8 +++ .../sql/worker_split_binary_copy_test.sql | 8 --- 15 files changed, 60 insertions(+), 132 deletions(-) create mode 100644 src/test/regress/expected/remove_non_default_nodes.out create mode 100644 src/test/regress/sql/remove_non_default_nodes.sql diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index b902a7998..be3529c19 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -135,20 +135,10 @@ DEPS = { ), "alter_role_propagation": TestDeps("minimal_schedule"), "background_rebalance": TestDeps( - None, - [ - "multi_test_helpers", - "multi_cluster_management", - ], - worker_count=3, + None, ["multi_test_helpers", "multi_cluster_management"], worker_count=3 ), "background_rebalance_parallel": TestDeps( - None, - [ - "multi_test_helpers", - "multi_cluster_management", - ], - worker_count=6, + None, ["multi_test_helpers", "multi_cluster_management"], worker_count=6 ), "function_propagation": TestDeps("minimal_schedule"), "citus_shards": TestDeps("minimal_schedule"), @@ -165,30 +155,17 @@ DEPS = { ), "schema_based_sharding": TestDeps("minimal_schedule"), "multi_sequence_default": TestDeps( - None, - [ - "multi_test_helpers", - "multi_cluster_management", - "multi_table_ddl", - ], + None, ["multi_test_helpers", "multi_cluster_management", "multi_table_ddl"] ), "grant_on_schema_propagation": TestDeps("minimal_schedule"), "propagate_extension_commands": TestDeps("minimal_schedule"), "multi_size_queries": TestDeps("base_schedule", ["multi_copy"]), "multi_mx_node_metadata": TestDeps( - None, - [ - "multi_extension", - "multi_test_helpers", - "multi_test_helpers_superuser", - ], + None, ["multi_extension", "multi_test_helpers", "multi_test_helpers_superuser"] ), "multi_mx_function_table_reference": TestDeps( None, - [ - "multi_cluster_management", - "remove_coordinator_from_metadata", - ], + ["multi_cluster_management", "remove_coordinator_from_metadata"], # because it queries node group id and it changes as we add / remove nodes repeatable=False, ), @@ -201,15 +178,25 @@ DEPS = { ], ), "metadata_sync_helpers": TestDeps( - None, - [ - "multi_mx_node_metadata", - "multi_cluster_management", - ], + None, ["multi_mx_node_metadata", "multi_cluster_management"] ), - "multi_utilities": TestDeps( + "multi_utilities": TestDeps("minimal_schedule", ["multi_data_types"]), + "multi_tenant_isolation_nonblocking": TestDeps( + "minimal_schedule", ["multi_data_types", "remove_coordinator_from_metadata"] + ), + "remove_non_default_nodes": TestDeps( + None, ["multi_mx_node_metadata", "multi_cluster_management"], repeatable=False + ), + "citus_split_shard_columnar_partitioned": TestDeps( + "minimal_schedule", ["remove_coordinator_from_metadata"] + ), + "add_coordinator": TestDeps( + "minimal_schedule", ["remove_coordinator_from_metadata"], repeatable=False + ), + "multi_multiuser_auth": TestDeps( "minimal_schedule", - ["multi_data_types"], + ["multi_create_table", "multi_create_users", "multi_multiuser_load_data"], + repeatable=False, ), } @@ -303,9 +290,13 @@ def run_schedule_with_multiregress(test_name, schedule, dependencies, args): worker_count = needed_worker_count(test_name, dependencies) # find suitable make recipe - if dependencies.schedule == "base_isolation_schedule" or "isolation" in test_name: + if dependencies.schedule == "base_isolation_schedule" or test_name.startswith( + "isolation" + ): make_recipe = "check-isolation-custom-schedule" - elif dependencies.schedule == "failure_base_schedule" or "failure" in test_name: + elif dependencies.schedule == "failure_base_schedule" or test_name.startswith( + "failure" + ): make_recipe = "check-failure-custom-schedule" else: make_recipe = "check-custom-schedule" @@ -418,10 +409,7 @@ def test_dependencies(test_name, test_schedule, schedule_line, args): if "upgrade_columnar_before" not in before_tests: before_tests.append("upgrade_columnar_before") - return TestDeps( - default_base_schedule(test_schedule, args), - before_tests, - ) + return TestDeps(default_base_schedule(test_schedule, args), before_tests) # before_ tests leave stuff around on purpose for the after tests. So they # are not repeatable by definition. diff --git a/src/test/regress/expected/add_coordinator.out b/src/test/regress/expected/add_coordinator.out index 499669385..01f3a682d 100644 --- a/src/test/regress/expected/add_coordinator.out +++ b/src/test/regress/expected/add_coordinator.out @@ -2,13 +2,6 @@ -- ADD_COORDINATOR -- -- node trying to add itself without specifying groupid => 0 should error out --- first remove the coordinator to for testing master_add_node for coordinator -SELECT master_remove_node('localhost', :master_port); - master_remove_node ---------------------------------------------------------------------- - -(1 row) - SELECT master_add_node('localhost', :master_port); ERROR: Node cannot add itself as a worker. HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636); diff --git a/src/test/regress/expected/multi_multiuser_auth.out b/src/test/regress/expected/multi_multiuser_auth.out index 8dd9b8ba7..7a72eeba1 100644 --- a/src/test/regress/expected/multi_multiuser_auth.out +++ b/src/test/regress/expected/multi_multiuser_auth.out @@ -12,19 +12,9 @@ \set bob_worker_1_pw triplex-royalty-warranty-stand-cheek \set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile \set bob_fallback_pw :bob_worker_1_pw -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; - worker_1_id ---------------------------------------------------------------------- - 17 -(1 row) - +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; - worker_2_id ---------------------------------------------------------------------- - 35 -(1 row) - +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset -- alice is a superuser so she can update own password CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER; diff --git a/src/test/regress/expected/multi_poolinfo_usage.out b/src/test/regress/expected/multi_poolinfo_usage.out index ee98f0df7..53dfca24e 100644 --- a/src/test/regress/expected/multi_poolinfo_usage.out +++ b/src/test/regress/expected/multi_poolinfo_usage.out @@ -6,19 +6,9 @@ -- Test of ability to override host/port for a node SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 20000000; -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; - worker_1_id ---------------------------------------------------------------------- - 17 -(1 row) - +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; - worker_2_id ---------------------------------------------------------------------- - 35 -(1 row) - +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset CREATE TABLE lotsa_connections (id integer, name text); SELECT create_distributed_table('lotsa_connections', 'id'); diff --git a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out index 3ec16e6ee..dbd15b056 100644 --- a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out +++ b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out @@ -1275,9 +1275,3 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0; TRUNCATE TABLE pg_catalog.pg_dist_colocation; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; -SELECT citus_set_coordinator_host('localhost'); - citus_set_coordinator_host ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/remove_non_default_nodes.out b/src/test/regress/expected/remove_non_default_nodes.out new file mode 100644 index 000000000..7645af708 --- /dev/null +++ b/src/test/regress/expected/remove_non_default_nodes.out @@ -0,0 +1,13 @@ +-- The default nodes for the citus test suite are coordinator and 2 worker nodes +-- Which we identify with master_port, worker_1_port, worker_2_port. +-- When needed in some tests, GetLocalNodeId() does not behave correctly, +-- So we remove the non default nodes. This tests expects the non default nodes +-- to not have any active placements. +SELECT any_value(citus_remove_node('localhost', nodeport)) +FROM pg_dist_node +WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port); + any_value +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/worker_split_binary_copy_test.out b/src/test/regress/expected/worker_split_binary_copy_test.out index f23dc2043..e161b7f67 100644 --- a/src/test/regress/expected/worker_split_binary_copy_test.out +++ b/src/test/regress/expected/worker_split_binary_copy_test.out @@ -3,43 +3,6 @@ SET search_path TO worker_split_binary_copy_test; SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 81060000; --- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly. -SELECT citus_remove_node('localhost', 8887); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9995); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9992); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9998); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9997); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 8888); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -- BEGIN: Create distributed table and insert data. CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy ( l_orderkey bigint not null, diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 5b93c9e8b..287f4557a 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -295,6 +295,7 @@ test: multi_foreign_key_relation_graph # Replicating reference tables to coordinator. Add coordinator to pg_dist_node # and rerun some of the tests. # -------- +test: remove_coordinator_from_metadata test: add_coordinator test: replicate_reference_tables_to_coordinator test: citus_local_tables diff --git a/src/test/regress/split_schedule b/src/test/regress/split_schedule index b47acd828..53c422eab 100644 --- a/src/test/regress/split_schedule +++ b/src/test/regress/split_schedule @@ -10,6 +10,7 @@ test: foreign_key_to_reference_table # Split tests go here. test: split_shard test: worker_split_copy_test +test: remove_non_default_nodes test: worker_split_binary_copy_test test: worker_split_text_copy_test test: citus_split_shard_by_split_points_negative diff --git a/src/test/regress/sql/add_coordinator.sql b/src/test/regress/sql/add_coordinator.sql index 81b77bfcd..2dba78064 100644 --- a/src/test/regress/sql/add_coordinator.sql +++ b/src/test/regress/sql/add_coordinator.sql @@ -3,8 +3,6 @@ -- -- node trying to add itself without specifying groupid => 0 should error out --- first remove the coordinator to for testing master_add_node for coordinator -SELECT master_remove_node('localhost', :master_port); SELECT master_add_node('localhost', :master_port); SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset diff --git a/src/test/regress/sql/multi_multiuser_auth.sql b/src/test/regress/sql/multi_multiuser_auth.sql index 43cb3c11f..1cd566b50 100644 --- a/src/test/regress/sql/multi_multiuser_auth.sql +++ b/src/test/regress/sql/multi_multiuser_auth.sql @@ -16,9 +16,9 @@ \set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile \set bob_fallback_pw :bob_worker_1_pw -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset -- alice is a superuser so she can update own password diff --git a/src/test/regress/sql/multi_poolinfo_usage.sql b/src/test/regress/sql/multi_poolinfo_usage.sql index da039cfca..2fbaed2ed 100644 --- a/src/test/regress/sql/multi_poolinfo_usage.sql +++ b/src/test/regress/sql/multi_poolinfo_usage.sql @@ -7,9 +7,9 @@ SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 20000000; -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset CREATE TABLE lotsa_connections (id integer, name text); diff --git a/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql b/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql index 1299c9282..f74835108 100644 --- a/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql +++ b/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql @@ -607,6 +607,3 @@ TRUNCATE TABLE pg_catalog.pg_dist_colocation; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; - -SELECT citus_set_coordinator_host('localhost'); - diff --git a/src/test/regress/sql/remove_non_default_nodes.sql b/src/test/regress/sql/remove_non_default_nodes.sql new file mode 100644 index 000000000..4175e87dc --- /dev/null +++ b/src/test/regress/sql/remove_non_default_nodes.sql @@ -0,0 +1,8 @@ +-- The default nodes for the citus test suite are coordinator and 2 worker nodes +-- Which we identify with master_port, worker_1_port, worker_2_port. +-- When needed in some tests, GetLocalNodeId() does not behave correctly, +-- So we remove the non default nodes. This tests expects the non default nodes +-- to not have any active placements. +SELECT any_value(citus_remove_node('localhost', nodeport)) +FROM pg_dist_node +WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port); diff --git a/src/test/regress/sql/worker_split_binary_copy_test.sql b/src/test/regress/sql/worker_split_binary_copy_test.sql index 489ff9dc4..d6ca3c9df 100644 --- a/src/test/regress/sql/worker_split_binary_copy_test.sql +++ b/src/test/regress/sql/worker_split_binary_copy_test.sql @@ -4,14 +4,6 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 81060000; --- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly. -SELECT citus_remove_node('localhost', 8887); -SELECT citus_remove_node('localhost', 9995); -SELECT citus_remove_node('localhost', 9992); -SELECT citus_remove_node('localhost', 9998); -SELECT citus_remove_node('localhost', 9997); -SELECT citus_remove_node('localhost', 8888); - -- BEGIN: Create distributed table and insert data. CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy ( l_orderkey bigint not null, From a960799dfbdbbb4d66686bfada1997ea0cfc4e88 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Tue, 14 Nov 2023 18:50:54 +0300 Subject: [PATCH 03/11] Clean up leftover replication slots in tests (#7338) This commit fixes the flakiness in `logical_replication` and `citus_non_blocking_split_shard_cleanup` tests. The flakiness was related to leftover replication slots. Below is a flaky example for each test: logical_replication https://github.com/citusdata/citus/actions/runs/6721324131/attempts/1#summary-18267030604 citus_non_blocking_split_shard_cleanup https://github.com/citusdata/citus/actions/runs/6721324131/attempts/1#summary-18267006967 ```diff -- Replication slots should be cleaned up SELECT slot_name FROM pg_replication_slots; slot_name --------------------------------- -(0 rows) + citus_shard_split_slot_19_10_17 +(1 row) ``` The tests by themselves are not flaky: 32 flaky test schedules each with 20 runs run successfully. https://github.com/citusdata/citus/actions/runs/6822020127?pr=7338 The conclusion is that: 1. `multi_tenant_isolation_nonblocking` is the problematic test running before `logical_replication` in the `enterprise_schedule`, so I added a cleanup at the end of `multi_tenant_isolation_nonblocking`. https://github.com/citusdata/citus/actions/runs/6824334614/attempts/1#summary-18560127461 2. `citus_split_shard_by_split_points_negative` is the problematic test running before `citus_non_blocking_split_shards_cleanup` in the split schedule. Also added cleanup line. For details on the investigation of leftover replication slots, please check the PR https://github.com/citusdata/citus/pull/7338 --- .../citus_split_shard_by_split_points_negative.out | 6 ++++++ .../expected/multi_tenant_isolation_nonblocking.out | 7 +++++++ .../sql/citus_split_shard_by_split_points_negative.sql | 1 + .../regress/sql/multi_tenant_isolation_nonblocking.sql | 3 +++ 4 files changed, 17 insertions(+) diff --git a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out index 85b1fc3ee..6a4265f81 100644 --- a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out +++ b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out @@ -135,4 +135,10 @@ NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to table citus_split_shard_by_split_points_negative.range_paritioned_table_to_split drop cascades to table citus_split_shard_by_split_points_negative.table_to_split drop cascades to table citus_split_shard_by_split_points_negative.table_to_split_replication_factor_2 +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + --END : Cleanup diff --git a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out index dbd15b056..3daac7dac 100644 --- a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out +++ b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out @@ -1275,3 +1275,10 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0; TRUNCATE TABLE pg_catalog.pg_dist_colocation; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; +-- make sure we don't have any replication objects leftover on the nodes +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql index fe37777c7..4c180052f 100644 --- a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql +++ b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql @@ -113,4 +113,5 @@ SELECT citus_split_shard_by_split_points( --BEGIN : Cleanup \c - postgres - :master_port DROP SCHEMA "citus_split_shard_by_split_points_negative" CASCADE; +SELECT public.wait_for_resource_cleanup(); --END : Cleanup diff --git a/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql b/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql index f74835108..994f29f0a 100644 --- a/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql +++ b/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql @@ -607,3 +607,6 @@ TRUNCATE TABLE pg_catalog.pg_dist_colocation; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; + +-- make sure we don't have any replication objects leftover on the nodes +SELECT public.wait_for_resource_cleanup(); From c6fbb72c0269f9debf65e36b9aafbcfaff3fe23a Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Wed, 15 Nov 2023 13:28:43 +0300 Subject: [PATCH 04/11] Fix flaky multi_prepare_plsql (#7346) Simple need of an `ORDER BY` clause Ran into this twice this week already! https://github.com/citusdata/citus/actions/runs/6849701315/attempts/1#summary-18622563506 https://github.com/citusdata/citus/actions/runs/6875051160/attempts/1#summary-18698009952 ```diff SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup'; nspname | typname -------------+------------------------- - public | prepare_ddl_type_backup otherschema | prepare_ddl_type_backup + public | prepare_ddl_type_backup (2 rows) ``` --- src/test/regress/citus_tests/run_test.py | 1 + src/test/regress/expected/multi_prepare_plsql.out | 5 +++-- src/test/regress/multi_schedule | 3 ++- src/test/regress/sql/multi_prepare_plsql.sql | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index be3529c19..158a44ef6 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -198,6 +198,7 @@ DEPS = { ["multi_create_table", "multi_create_users", "multi_multiuser_load_data"], repeatable=False, ), + "multi_prepare_plsql": TestDeps("base_schedule"), } diff --git a/src/test/regress/expected/multi_prepare_plsql.out b/src/test/regress/expected/multi_prepare_plsql.out index 74c9835ff..a87b47a34 100644 --- a/src/test/regress/expected/multi_prepare_plsql.out +++ b/src/test/regress/expected/multi_prepare_plsql.out @@ -1317,11 +1317,11 @@ SELECT type_ddl_plpgsql(); (1 row) -- find all renamed types to verify the schema name didn't leak, nor a crash happened -SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup'; +SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1; nspname | typname --------------------------------------------------------------------- - public | prepare_ddl_type_backup otherschema | prepare_ddl_type_backup + public | prepare_ddl_type_backup (2 rows) DROP TYPE prepare_ddl_type_backup; @@ -1332,6 +1332,7 @@ DROP FUNCTION ddl_in_plpgsql(); DROP FUNCTION copy_in_plpgsql(); DROP TABLE prepare_ddl; DROP TABLE local_ddl; +DROP TABLE plpgsql_table; DROP SCHEMA otherschema; -- clean-up functions DROP FUNCTION plpgsql_test_1(); diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 65a272566..2c8f7b085 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -83,7 +83,8 @@ test: forcedelegation_functions # this should be run alone as it gets too many clients test: join_pushdown test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message -test: multi_agg_distinct multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction +test: multi_agg_distinct +test: multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction test: multi_reference_table multi_select_for_update relation_access_tracking pg13_with_ties test: custom_aggregate_support aggregate_support tdigest_aggregate_support test: multi_average_expression multi_working_columns multi_having_pushdown having_subquery diff --git a/src/test/regress/sql/multi_prepare_plsql.sql b/src/test/regress/sql/multi_prepare_plsql.sql index 8589e5b5a..e71e2818e 100644 --- a/src/test/regress/sql/multi_prepare_plsql.sql +++ b/src/test/regress/sql/multi_prepare_plsql.sql @@ -624,7 +624,7 @@ CREATE TYPE prepare_ddl_type AS (x int, y int); SELECT type_ddl_plpgsql(); -- find all renamed types to verify the schema name didn't leak, nor a crash happened -SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup'; +SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1; DROP TYPE prepare_ddl_type_backup; RESET search_path; @@ -635,6 +635,7 @@ DROP FUNCTION ddl_in_plpgsql(); DROP FUNCTION copy_in_plpgsql(); DROP TABLE prepare_ddl; DROP TABLE local_ddl; +DROP TABLE plpgsql_table; DROP SCHEMA otherschema; -- clean-up functions From 0d1f18862be68350a2f784bcbe98344ddcad8a6d Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Thu, 16 Nov 2023 13:12:30 +0300 Subject: [PATCH 05/11] Propagates SECURITY LABEL ON ROLE stmt (#7304) We propagate `SECURITY LABEL [for provider] ON ROLE rolename IS labelname` to the worker nodes. We also make sure to run the relevant `SecLabelStmt` commands on a newly added node by looking at roles found in `pg_shseclabel`. See official docs for explanation on how this command works: https://www.postgresql.org/docs/current/sql-security-label.html This command stores the role label in the `pg_shseclabel` catalog table. This commit also fixes the regex string in `check_gucs_are_alphabetically_sorted.sh` script such that it escapes the dot. Previously it was looking for all strings starting with "citus" instead of "citus." as it should. To test this feature, I currently make use of a special GUC to control label provider registration in PG_init when creating the Citus extension. --- ci/check_gucs_are_alphabetically_sorted.sh | 2 +- gucs.out | 133 ++++++++++++++ .../commands/distribute_object_ops.c | 14 ++ src/backend/distributed/commands/role.c | 71 ++++++- src/backend/distributed/commands/seclabel.c | 125 +++++++++++++ .../deparser/deparse_seclabel_stmts.c | 78 ++++++++ .../distributed/operations/shard_rebalancer.c | 2 +- .../replication/multi_logical_replication.c | 4 +- src/backend/distributed/shared_library_init.c | 18 +- src/include/distributed/commands.h | 5 + src/include/distributed/deparser.h | 3 + src/include/distributed/shard_rebalancer.h | 2 +- .../regress/expected/multi_test_helpers.out | 30 +++ src/test/regress/expected/seclabel.out | 173 ++++++++++++++++++ src/test/regress/multi_1_schedule | 1 + src/test/regress/pg_regress_multi.pl | 7 +- src/test/regress/sql/multi_test_helpers.sql | 31 ++++ src/test/regress/sql/seclabel.sql | 87 +++++++++ 18 files changed, 774 insertions(+), 12 deletions(-) create mode 100644 gucs.out create mode 100644 src/backend/distributed/commands/seclabel.c create mode 100644 src/backend/distributed/deparser/deparse_seclabel_stmts.c create mode 100644 src/test/regress/expected/seclabel.out create mode 100644 src/test/regress/sql/seclabel.sql diff --git a/ci/check_gucs_are_alphabetically_sorted.sh b/ci/check_gucs_are_alphabetically_sorted.sh index a769ae4fb..763b5305f 100755 --- a/ci/check_gucs_are_alphabetically_sorted.sh +++ b/ci/check_gucs_are_alphabetically_sorted.sh @@ -5,6 +5,6 @@ set -euo pipefail source ci/ci_helpers.sh # extract citus gucs in the form of "citus.X" -grep -o -E "(\.*\"citus.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out +grep -o -E "(\.*\"citus\.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out sort -c gucs.out rm gucs.out diff --git a/gucs.out b/gucs.out new file mode 100644 index 000000000..8501e6c1f --- /dev/null +++ b/gucs.out @@ -0,0 +1,133 @@ +"citus.all_modifications_commutative", +"citus.allow_modifications_from_workers_to_replicated_tables", +"citus.allow_nested_distributed_execution", +"citus.allow_unsafe_constraints", +"citus.allow_unsafe_locks_from_workers", +"citus.background_task_queue_interval", +"citus.check_available_space_before_move", +"citus.cluster_name", +"citus.coordinator_aggregation_strategy", +"citus.copy_switchover_threshold", +"citus.count_distinct_error_rate", +"citus.cpu_priority", +"citus.cpu_priority_for_logical_replication_senders", +"citus.create_object_propagation", +"citus.defer_drop_after_shard_move", +"citus.defer_drop_after_shard_split", +"citus.defer_shard_delete_interval", +"citus.desired_percent_disk_available_after_move", +"citus.distributed_deadlock_detection_factor", +"citus.enable_alter_database_owner", +"citus.enable_alter_role_propagation", +"citus.enable_alter_role_set_propagation", +"citus.enable_binary_protocol", +"citus.enable_change_data_capture", +"citus.enable_cluster_clock", +"citus.enable_cost_based_connection_establishment", +"citus.enable_create_role_propagation", +"citus.enable_create_type_propagation", +"citus.enable_ddl_propagation", +"citus.enable_deadlock_prevention", +"citus.enable_fast_path_router_planner", +"citus.enable_local_execution", +"citus.enable_local_reference_table_foreign_keys", +"citus.enable_manual_changes_to_shards", +"citus.enable_manual_metadata_changes_for_user", +"citus.enable_metadata_sync", +"citus.enable_non_colocated_router_query_pushdown", +"citus.enable_repartition_joins", +"citus.enable_repartitioned_insert_select", +"citus.enable_router_execution", +"citus.enable_schema_based_sharding", +"citus.enable_single_hash_repartition_joins", +"citus.enable_statistics_collection", +"citus.enable_unique_job_ids", +"citus.enable_unsafe_triggers", +"citus.enable_unsupported_feature_messages", +"citus.enable_version_checks", +"citus.enforce_foreign_key_restrictions", +"citus.enforce_object_restrictions_for_local_objects", +"citus.executor_slow_start_interval", +"citus.explain_all_tasks", +"citus.explain_analyze_sort_method", +"citus.explain_distributed_queries", +"citus.force_max_query_parallelization", +"citus.function_opens_transaction_block", +"citus.grep_remote_commands", +"citus.hide_citus_dependent_objects", +"citus.hide_shards_from_app_name_prefixes", +"citus.isolation_test_session_process_id", +"citus.isolation_test_session_remote_process_id", +"citus.limit_clause_row_fetch_count", +"citus.local_copy_flush_threshold", +"citus.local_hostname", +"citus.local_shared_pool_size", +"citus.local_table_join_policy", +"citus.log_distributed_deadlock_detection", +"citus.log_intermediate_results", +"citus.log_local_commands", +"citus.log_multi_join_order", +"citus.log_remote_commands", +"citus.logical_replication_timeout", +"citus.main_db", +"citus.max_adaptive_executor_pool_size", +"citus.max_background_task_executors", +"citus.max_background_task_executors_per_node", +"citus.max_cached_connection_lifetime", +"citus.max_cached_conns_per_worker", +"citus.max_client_connections", +"citus.max_high_priority_background_processes", +"citus.max_intermediate_result_size", +"citus.max_matview_size_to_auto_recreate", +"citus.max_rebalancer_logged_ignored_moves", +"citus.max_shared_pool_size", +"citus.max_worker_nodes_tracked", +"citus.metadata_sync_interval", +"citus.metadata_sync_mode", +"citus.metadata_sync_retry_interval", +"citus.mitmfifo", +"citus.multi_shard_modify_mode", +"citus.multi_task_query_log_level", +"citus.next_cleanup_record_id", +"citus.next_operation_id", +"citus.next_placement_id", +"citus.next_shard_id", +"citus.node_connection_timeout", +"citus.node_conninfo", +"citus.override_table_visibility", +"citus.prevent_incomplete_connection_establishment", +"citus.propagate_session_settings_for_loopback_connection", +"citus.propagate_set_commands", +"citus.rebalancer_by_disk_size_base_cost", +"citus.recover_2pc_interval", +"citus.remote_copy_flush_threshold", +"citus.remote_task_check_interval", +"citus.repartition_join_bucket_count_per_node", +"citus.replicate_reference_tables_on_activate", +"citus.replication_model", +"citus.running_under_citus_test_suite", +"citus.select_opens_transaction_block", +"citus.shard_count", +"citus.shard_replication_factor", +"citus.show_shards_for_app_name_prefixes", +"citus.skip_advisory_lock_permission_checks", +"citus.skip_constraint_validation", +"citus.skip_jsonb_validation_in_copy", +"citus.sort_returning", +"citus.stat_statements_max", +"citus.stat_statements_purge_interval", +"citus.stat_statements_track", +"citus.stat_tenants_limit", +"citus.stat_tenants_log_level", +"citus.stat_tenants_period", +"citus.stat_tenants_track", +"citus.stat_tenants_untracked_sample_rate", +"citus.subquery_pushdown", +"citus.task_assignment_policy", +"citus.task_executor_type", +"citus.use_citus_managed_tables", +"citus.use_secondary_nodes", +"citus.values_materialization_threshold", +"citus.version", +"citus.worker_min_messages", +"citus.writable_standby_coordinator", diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index f27e4cca6..72ea5beb4 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -374,6 +374,15 @@ static DistributeObjectOps Any_Rename = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps Any_SecLabel = { + .deparse = DeparseSecLabelStmt, + .qualify = NULL, + .preprocess = NULL, + .postprocess = PostprocessSecLabelStmt, + .operationType = DIST_OPS_ALTER, + .address = SecLabelStmtObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps Attribute_Rename = { .deparse = DeparseRenameAttributeStmt, .qualify = QualifyRenameAttributeStmt, @@ -2020,6 +2029,11 @@ GetDistributeObjectOps(Node *node) return &Vacuum_Analyze; } + case T_SecLabelStmt: + { + return &Any_SecLabel; + } + case T_RenameStmt: { RenameStmt *stmt = castNode(RenameStmt, node); diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index a2da3bf81..3177c73a0 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -23,6 +23,7 @@ #include "catalog/pg_auth_members.h" #include "catalog/pg_authid.h" #include "catalog/pg_db_role_setting.h" +#include "catalog/pg_shseclabel.h" #include "catalog/pg_type.h" #include "catalog/objectaddress.h" #include "commands/dbcommands.h" @@ -65,6 +66,7 @@ static DefElem * makeDefElemBool(char *name, bool value); static List * GenerateRoleOptionsList(HeapTuple tuple); static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options); static List * GenerateGrantRoleStmtsOfRole(Oid roleid); +static List * GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename); static void EnsureSequentialModeForRoleDDL(void); static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple, @@ -515,13 +517,14 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) { HeapTuple roleTuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleOid)); Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(roleTuple)); + char *rolename = pstrdup(NameStr(role->rolname)); CreateRoleStmt *createRoleStmt = NULL; if (EnableCreateRolePropagation) { createRoleStmt = makeNode(CreateRoleStmt); createRoleStmt->stmt_type = ROLESTMT_ROLE; - createRoleStmt->role = pstrdup(NameStr(role->rolname)); + createRoleStmt->role = rolename; createRoleStmt->options = GenerateRoleOptionsList(roleTuple); } @@ -532,7 +535,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) alterRoleStmt->role = makeNode(RoleSpec); alterRoleStmt->role->roletype = ROLESPEC_CSTRING; alterRoleStmt->role->location = -1; - alterRoleStmt->role->rolename = pstrdup(NameStr(role->rolname)); + alterRoleStmt->role->rolename = rolename; alterRoleStmt->action = 1; alterRoleStmt->options = GenerateRoleOptionsList(roleTuple); } @@ -544,7 +547,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) { /* add a worker_create_or_alter_role command if any of them are set */ char *createOrAlterRoleQuery = CreateCreateOrAlterRoleCommand( - pstrdup(NameStr(role->rolname)), + rolename, createRoleStmt, alterRoleStmt); @@ -566,6 +569,20 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) { completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); } + + /* + * append SECURITY LABEL ON ROLE commands for this specific user + * When we propagate user creation, we also want to make sure that we propagate + * all the security labels it has been given. For this, we check pg_shseclabel + * for the ROLE entry corresponding to roleOid, and generate the relevant + * SecLabel stmts to be run in the new node. + */ + List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename); + stmt = NULL; + foreach_ptr(stmt, secLabelOnRoleStmts) + { + completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); + } } return completeRoleList; @@ -895,6 +912,54 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) } +/* + * GenerateSecLabelOnRoleStmts generates the SecLabelStmts for the role + * whose oid is roleid. + */ +static List * +GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename) +{ + List *secLabelStmts = NIL; + + /* + * Note that roles are shared database objects, therefore their + * security labels are stored in pg_shseclabel instead of pg_seclabel. + */ + Relation pg_shseclabel = table_open(SharedSecLabelRelationId, AccessShareLock); + ScanKeyData skey[1]; + ScanKeyInit(&skey[0], Anum_pg_shseclabel_objoid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(roleid)); + SysScanDesc scan = systable_beginscan(pg_shseclabel, SharedSecLabelObjectIndexId, + true, NULL, 1, &skey[0]); + + HeapTuple tuple = NULL; + while (HeapTupleIsValid(tuple = systable_getnext(scan))) + { + SecLabelStmt *secLabelStmt = makeNode(SecLabelStmt); + secLabelStmt->objtype = OBJECT_ROLE; + secLabelStmt->object = (Node *) makeString(pstrdup(rolename)); + + Datum datumArray[Natts_pg_shseclabel]; + bool isNullArray[Natts_pg_shseclabel]; + + heap_deform_tuple(tuple, RelationGetDescr(pg_shseclabel), datumArray, + isNullArray); + + secLabelStmt->provider = TextDatumGetCString( + datumArray[Anum_pg_shseclabel_provider - 1]); + secLabelStmt->label = TextDatumGetCString( + datumArray[Anum_pg_shseclabel_label - 1]); + + secLabelStmts = lappend(secLabelStmts, secLabelStmt); + } + + systable_endscan(scan); + table_close(pg_shseclabel, AccessShareLock); + + return secLabelStmts; +} + + /* * PreprocessCreateRoleStmt creates a worker_create_or_alter_role query for the * role that is being created. With that query we can create the role in the diff --git a/src/backend/distributed/commands/seclabel.c b/src/backend/distributed/commands/seclabel.c new file mode 100644 index 000000000..208d186c7 --- /dev/null +++ b/src/backend/distributed/commands/seclabel.c @@ -0,0 +1,125 @@ +/*------------------------------------------------------------------------- + * + * seclabel.c + * + * This file contains the logic of SECURITY LABEL statement propagation. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/deparser.h" +#include "distributed/log_utils.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata/distobject.h" + + +/* + * PostprocessSecLabelStmt prepares the commands that need to be run on all workers to assign + * security labels on distributed objects, currently supporting just Role objects. + * It also ensures that all object dependencies exist on all + * nodes for the object in the SecLabelStmt. + */ +List * +PostprocessSecLabelStmt(Node *node, const char *queryString) +{ + if (!ShouldPropagate()) + { + return NIL; + } + + SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node); + + List *objectAddresses = GetObjectAddressListFromParseTree(node, false, true); + if (!IsAnyObjectDistributed(objectAddresses)) + { + return NIL; + } + + if (secLabelStmt->objtype != OBJECT_ROLE) + { + /* + * If we are not in the coordinator, we don't want to interrupt the security + * label command with notices, the user expects that from the worker node + * the command will not be propagated + */ + if (EnableUnsupportedFeatureMessages && IsCoordinator()) + { + ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands whose " + "object type is not role"), + errhint("Connect to worker nodes directly to manually " + "run the same SECURITY LABEL command."))); + } + return NIL; + } + + if (!EnableCreateRolePropagation) + { + return NIL; + } + + EnsureCoordinator(); + EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses); + + const char *sql = DeparseTreeNode((Node *) secLabelStmt); + + List *commandList = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList); +} + + +/* + * SecLabelStmtObjectAddress returns the object address of the object on + * which this statement operates (secLabelStmt->object). Note that it has no limitation + * on the object type being OBJECT_ROLE. This is intentionally implemented like this + * since it is fairly simple to implement and we might extend SECURITY LABEL propagation + * in the future to include more object types. + */ +List * +SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) +{ + SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node); + + Relation rel = NULL; + ObjectAddress address = get_object_address(secLabelStmt->objtype, + secLabelStmt->object, &rel, + AccessShareLock, missing_ok); + if (rel != NULL) + { + relation_close(rel, AccessShareLock); + } + + ObjectAddress *addressPtr = palloc0(sizeof(ObjectAddress)); + *addressPtr = address; + return list_make1(addressPtr); +} + + +/* + * citus_test_object_relabel is a dummy function for check_object_relabel_type hook. + * It is meant to be used in tests combined with citus_test_register_label_provider + */ +void +citus_test_object_relabel(const ObjectAddress *object, const char *seclabel) +{ + if (seclabel == NULL || + strcmp(seclabel, "citus_unclassified") == 0 || + strcmp(seclabel, "citus_classified") == 0 || + strcmp(seclabel, "citus '!unclassified") == 0) + { + return; + } + + ereport(ERROR, + (errcode(ERRCODE_INVALID_NAME), + errmsg("'%s' is not a valid security label for Citus tests.", seclabel))); +} diff --git a/src/backend/distributed/deparser/deparse_seclabel_stmts.c b/src/backend/distributed/deparser/deparse_seclabel_stmts.c new file mode 100644 index 000000000..a1aa047cc --- /dev/null +++ b/src/backend/distributed/deparser/deparse_seclabel_stmts.c @@ -0,0 +1,78 @@ +/*------------------------------------------------------------------------- + * + * deparse_seclabel_stmts.c + * All routines to deparse SECURITY LABEL statements. + * + * Copyright (c), Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "distributed/deparser.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" + +static void AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt); + +/* + * DeparseSecLabelStmt builds and returns a string representing of the + * SecLabelStmt for application on a remote server. + */ +char * +DeparseSecLabelStmt(Node *node) +{ + SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node); + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + AppendSecLabelStmt(&buf, secLabelStmt); + + return buf.data; +} + + +/* + * AppendSecLabelStmt generates the string representation of the + * SecLabelStmt and appends it to the buffer. + */ +static void +AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt) +{ + appendStringInfoString(buf, "SECURITY LABEL "); + + if (stmt->provider != NULL) + { + appendStringInfo(buf, "FOR %s ", quote_identifier(stmt->provider)); + } + + appendStringInfoString(buf, "ON "); + + switch (stmt->objtype) + { + case OBJECT_ROLE: + { + appendStringInfo(buf, "ROLE %s ", quote_identifier(strVal(stmt->object))); + break; + } + + /* normally, we shouldn't reach this */ + default: + { + ereport(ERROR, (errmsg("unsupported security label statement for" + " deparsing"))); + } + } + + appendStringInfoString(buf, "IS "); + + if (stmt->label != NULL) + { + appendStringInfo(buf, "%s", quote_literal_cstr(stmt->label)); + } + else + { + appendStringInfoString(buf, "NULL"); + } +} diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index d339ac56a..213d135e6 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -317,7 +317,7 @@ PG_FUNCTION_INFO_V1(citus_rebalance_start); PG_FUNCTION_INFO_V1(citus_rebalance_stop); PG_FUNCTION_INFO_V1(citus_rebalance_wait); -bool RunningUnderIsolationTest = false; +bool RunningUnderCitusTestSuite = false; int MaxRebalancerLoggedIgnoredMoves = 5; int RebalancerByDiskSizeBaseCost = 100 * 1024 * 1024; bool PropagateSessionSettingsForLoopbackConnection = false; diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index 97f6fdb3d..48571d7c4 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -1143,7 +1143,7 @@ ConflictWithIsolationTestingBeforeCopy(void) const bool sessionLock = false; const bool dontWait = false; - if (RunningUnderIsolationTest) + if (RunningUnderCitusTestSuite) { SET_LOCKTAG_ADVISORY(tag, MyDatabaseId, SHARD_MOVE_ADVISORY_LOCK_SECOND_KEY, @@ -1177,7 +1177,7 @@ ConflictWithIsolationTestingAfterCopy(void) const bool sessionLock = false; const bool dontWait = false; - if (RunningUnderIsolationTest) + if (RunningUnderCitusTestSuite) { SET_LOCKTAG_ADVISORY(tag, MyDatabaseId, SHARD_MOVE_ADVISORY_LOCK_FIRST_KEY, diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 9b5768ee7..22037c82b 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -29,6 +29,7 @@ #include "citus_version.h" #include "commands/explain.h" #include "commands/extension.h" +#include "commands/seclabel.h" #include "common/string.h" #include "executor/executor.h" #include "distributed/backend_data.h" @@ -574,6 +575,16 @@ _PG_init(void) INIT_COLUMNAR_SYMBOL(PGFunction, columnar_storage_info); INIT_COLUMNAR_SYMBOL(PGFunction, columnar_store_memory_stats); INIT_COLUMNAR_SYMBOL(PGFunction, test_columnar_storage_write_new_page); + + /* + * This part is only for SECURITY LABEL tests + * mimicking what an actual security label provider would do + */ + if (RunningUnderCitusTestSuite) + { + register_label_provider("citus '!tests_label_provider", + citus_test_object_relabel); + } } @@ -2305,13 +2316,14 @@ RegisterCitusConfigVariables(void) WarnIfReplicationModelIsSet, NULL, NULL); DefineCustomBoolVariable( - "citus.running_under_isolation_test", + "citus.running_under_citus_test_suite", gettext_noop( "Only useful for testing purposes, when set to true, Citus does some " - "tricks to implement useful isolation tests with rebalancing. Should " + "tricks to implement useful isolation tests with rebalancing. It also " + "registers a dummy label provider for SECURITY LABEL tests. Should " "never be set to true on production systems "), gettext_noop("for details of the tricks implemented, refer to the source code"), - &RunningUnderIsolationTest, + &RunningUnderCitusTestSuite, false, PGC_SUSET, GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE, diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 43429278f..df98e66a5 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -521,6 +521,11 @@ extern List * AlterSchemaOwnerStmtObjectAddress(Node *node, bool missing_ok, extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +/* seclabel.c - forward declarations*/ +extern List * PostprocessSecLabelStmt(Node *node, const char *queryString); +extern List * SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +extern void citus_test_object_relabel(const ObjectAddress *object, const char *seclabel); + /* sequence.c - forward declarations */ extern List * PreprocessAlterSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 95d948bc9..aae526df4 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -260,6 +260,9 @@ extern void QualifyRenameTextSearchDictionaryStmt(Node *node); extern void QualifyTextSearchConfigurationCommentStmt(Node *node); extern void QualifyTextSearchDictionaryCommentStmt(Node *node); +/* forward declarations for deparse_seclabel_stmts.c */ +extern char * DeparseSecLabelStmt(Node *node); + /* forward declarations for deparse_sequence_stmts.c */ extern char * DeparseDropSequenceStmt(Node *node); extern char * DeparseRenameSequenceStmt(Node *node); diff --git a/src/include/distributed/shard_rebalancer.h b/src/include/distributed/shard_rebalancer.h index 38ce4f485..345748ced 100644 --- a/src/include/distributed/shard_rebalancer.h +++ b/src/include/distributed/shard_rebalancer.h @@ -189,7 +189,7 @@ typedef struct RebalancePlanFunctions extern char *VariablesToBePassedToNewConnections; extern int MaxRebalancerLoggedIgnoredMoves; extern int RebalancerByDiskSizeBaseCost; -extern bool RunningUnderIsolationTest; +extern bool RunningUnderCitusTestSuite; extern bool PropagateSessionSettingsForLoopbackConnection; extern int MaxBackgroundTaskExecutorsPerNode; diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index b8758e561..4b621b968 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -526,3 +526,33 @@ BEGIN RETURN result; END; $func$ LANGUAGE plpgsql; +-- Returns pg_seclabels entries from all nodes in the cluster for which +-- the object name is the input. +CREATE OR REPLACE FUNCTION get_citus_tests_label_provider_labels(object_name text, + master_port INTEGER DEFAULT 57636, + worker_1_port INTEGER DEFAULT 57637, + worker_2_port INTEGER DEFAULT 57638) +RETURNS TABLE ( + node_type text, + result text +) +AS $func$ +DECLARE + pg_seclabels_cmd TEXT := 'SELECT to_jsonb(q.*) FROM (' || + 'SELECT provider, objtype, label FROM pg_seclabels ' || + 'WHERE objname = ''' || object_name || ''') q'; +BEGIN + RETURN QUERY + SELECT + CASE + WHEN nodeport = master_port THEN 'coordinator' + WHEN nodeport = worker_1_port THEN 'worker_1' + WHEN nodeport = worker_2_port THEN 'worker_2' + ELSE 'unexpected_node' + END AS node_type, + a.result + FROM run_command_on_all_nodes(pg_seclabels_cmd) a + JOIN pg_dist_node USING (nodeid) + ORDER BY node_type; +END; +$func$ LANGUAGE plpgsql; diff --git a/src/test/regress/expected/seclabel.out b/src/test/regress/expected/seclabel.out new file mode 100644 index 000000000..f826de44b --- /dev/null +++ b/src/test/regress/expected/seclabel.out @@ -0,0 +1,173 @@ +-- +-- SECLABEL +-- +-- Test suite for SECURITY LABEL ON ROLE statements +-- +-- first we remove one of the worker nodes to be able to test +-- citus_add_node later +SELECT citus_remove_node('localhost', :worker_2_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +-- create two roles, one with characters that need escaping +CREATE ROLE user1; +CREATE ROLE "user 2"; +-- check an invalid label for our current dummy hook citus_test_object_relabel +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'invalid_label'; +ERROR: 'invalid_label' is not a valid security label for Citus tests. +-- if we disable metadata_sync, the command will not be propagated +SET citus.enable_metadata_sync TO off; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +RESET citus.enable_metadata_sync; +-- check that we only support propagating for roles +SET citus.shard_replication_factor to 1; +-- distributed table +CREATE TABLE a (a int); +SELECT create_distributed_table('a', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- distributed view +CREATE VIEW v_dist AS SELECT * FROM a; +-- distributed function +CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$ + BEGIN RAISE NOTICE '%', $1; END; $$; +SECURITY LABEL ON TABLE a IS 'citus_classified'; +NOTICE: not propagating SECURITY LABEL commands whose object type is not role +HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command. +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +NOTICE: not propagating SECURITY LABEL commands whose object type is not role +HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command. +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; +NOTICE: not propagating SECURITY LABEL commands whose object type is not role +HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +\c - - - :worker_1_port +SECURITY LABEL ON TABLE a IS 'citus_classified'; +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; +\c - - - :master_port +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"} +(2 rows) + +DROP TABLE a CASCADE; +NOTICE: drop cascades to view v_dist +DROP FUNCTION notice; +-- test that SECURITY LABEL statement is actually propagated for ROLES +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- we have exactly one provider loaded, so we may not include the provider in the command +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SECURITY LABEL ON ROLE user1 IS NULL; +NOTICE: issuing SECURITY LABEL ON ROLE user1 IS NULL +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +NOTICE: issuing SECURITY LABEL ON ROLE user1 IS 'citus_unclassified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +\c - - - :worker_1_port +-- command not allowed from worker node +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified'; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +\c - - - :master_port +RESET citus.log_remote_commands; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +-- add a new node and check that it also propagates the SECURITY LABEL statement to the new node +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); +NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT worker_create_or_alter_role('user 2', 'CREATE ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +-- cleanup +RESET citus.log_remote_commands; +DROP ROLE user1, "user 2"; diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 287f4557a..aed751aa0 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -32,6 +32,7 @@ test: propagate_extension_commands test: escape_extension_name test: ref_citus_local_fkeys test: alter_database_owner +test: seclabel test: distributed_triggers test: create_single_shard_table # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 4cc022198..fbadcfc62 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -510,6 +510,12 @@ if($vanillatest) # we disable some restrictions for local objects like local views to not break postgres vanilla test behaviour. push(@pgOptions, "citus.enforce_object_restrictions_for_local_objects=false"); } +else +{ + # We currently need this config for isolation tests and security label tests + # this option loads a security label provider, which we don't want in vanilla tests + push(@pgOptions, "citus.running_under_citus_test_suite=true"); +} if ($useMitmproxy) { @@ -560,7 +566,6 @@ if($isolationtester) push(@pgOptions, "citus.metadata_sync_interval=1000"); push(@pgOptions, "citus.metadata_sync_retry_interval=100"); push(@pgOptions, "client_min_messages='warning'"); # pg12 introduced notice showing during isolation tests - push(@pgOptions, "citus.running_under_isolation_test=true"); # Disable all features of the maintenance daemon. Otherwise queries might # randomly show temporarily as "waiting..." because they are waiting for the diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index f7c97f1b2..7f0346d14 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -550,3 +550,34 @@ BEGIN RETURN result; END; $func$ LANGUAGE plpgsql; + +-- Returns pg_seclabels entries from all nodes in the cluster for which +-- the object name is the input. +CREATE OR REPLACE FUNCTION get_citus_tests_label_provider_labels(object_name text, + master_port INTEGER DEFAULT 57636, + worker_1_port INTEGER DEFAULT 57637, + worker_2_port INTEGER DEFAULT 57638) +RETURNS TABLE ( + node_type text, + result text +) +AS $func$ +DECLARE + pg_seclabels_cmd TEXT := 'SELECT to_jsonb(q.*) FROM (' || + 'SELECT provider, objtype, label FROM pg_seclabels ' || + 'WHERE objname = ''' || object_name || ''') q'; +BEGIN + RETURN QUERY + SELECT + CASE + WHEN nodeport = master_port THEN 'coordinator' + WHEN nodeport = worker_1_port THEN 'worker_1' + WHEN nodeport = worker_2_port THEN 'worker_2' + ELSE 'unexpected_node' + END AS node_type, + a.result + FROM run_command_on_all_nodes(pg_seclabels_cmd) a + JOIN pg_dist_node USING (nodeid) + ORDER BY node_type; +END; +$func$ LANGUAGE plpgsql; diff --git a/src/test/regress/sql/seclabel.sql b/src/test/regress/sql/seclabel.sql new file mode 100644 index 000000000..e523fc1da --- /dev/null +++ b/src/test/regress/sql/seclabel.sql @@ -0,0 +1,87 @@ +-- +-- SECLABEL +-- +-- Test suite for SECURITY LABEL ON ROLE statements +-- + +-- first we remove one of the worker nodes to be able to test +-- citus_add_node later +SELECT citus_remove_node('localhost', :worker_2_port); + +-- create two roles, one with characters that need escaping +CREATE ROLE user1; +CREATE ROLE "user 2"; + +-- check an invalid label for our current dummy hook citus_test_object_relabel +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'invalid_label'; + +-- if we disable metadata_sync, the command will not be propagated +SET citus.enable_metadata_sync TO off; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +RESET citus.enable_metadata_sync; + +-- check that we only support propagating for roles +SET citus.shard_replication_factor to 1; +-- distributed table +CREATE TABLE a (a int); +SELECT create_distributed_table('a', 'a'); +-- distributed view +CREATE VIEW v_dist AS SELECT * FROM a; +-- distributed function +CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$ + BEGIN RAISE NOTICE '%', $1; END; $$; + +SECURITY LABEL ON TABLE a IS 'citus_classified'; +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + +\c - - - :worker_1_port +SECURITY LABEL ON TABLE a IS 'citus_classified'; +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; + +\c - - - :master_port +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + +DROP TABLE a CASCADE; +DROP FUNCTION notice; + +-- test that SECURITY LABEL statement is actually propagated for ROLES +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; + +-- we have exactly one provider loaded, so we may not include the provider in the command +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL ON ROLE user1 IS NULL; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; + +\c - - - :worker_1_port +-- command not allowed from worker node +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified'; + +\c - - - :master_port +RESET citus.log_remote_commands; + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +-- add a new node and check that it also propagates the SECURITY LABEL statement to the new node +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +-- cleanup +RESET citus.log_remote_commands; +DROP ROLE user1, "user 2"; From 5efd3f181aa9a88de6d29d4bc74f4adedfe93d0e Mon Sep 17 00:00:00 2001 From: Hanefi Onaldi Date: Thu, 16 Nov 2023 14:12:17 +0300 Subject: [PATCH 06/11] Fix wrong PR links in changelog (#7350) When preparing changelog for 12.1.1 release, I accidentally swapped the PR numbers for the two commits. This commit fixes the changelog to point to the correct PRs. --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 686e78dd1..8d979c104 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,10 @@ ### citus v12.1.1 (November 9, 2023) ### * Fixes leaking of memory and memory contexts in Citus foreign key cache - (#7219) + (#7236) * Makes sure to disallow creating a replicated distributed table concurrently - (#7236) + (#7219) ### citus v12.1.0 (September 12, 2023) ### From 55d500de8d43b1e2bb9086f3ea1f424cacf3fd1c Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Thu, 16 Nov 2023 14:51:31 +0300 Subject: [PATCH 07/11] Remove accidentally added gucs.out (#7349) --- .gitignore | 3 ++ gucs.out | 133 ----------------------------------------------------- 2 files changed, 3 insertions(+), 133 deletions(-) delete mode 100644 gucs.out diff --git a/.gitignore b/.gitignore index df447746a..e636392ac 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,6 @@ lib*.pc # style related temporary outputs *.uncrustify .venv + +# added output when modifying check_gucs_are_alphabetically_sorted.sh +guc.out diff --git a/gucs.out b/gucs.out deleted file mode 100644 index 8501e6c1f..000000000 --- a/gucs.out +++ /dev/null @@ -1,133 +0,0 @@ -"citus.all_modifications_commutative", -"citus.allow_modifications_from_workers_to_replicated_tables", -"citus.allow_nested_distributed_execution", -"citus.allow_unsafe_constraints", -"citus.allow_unsafe_locks_from_workers", -"citus.background_task_queue_interval", -"citus.check_available_space_before_move", -"citus.cluster_name", -"citus.coordinator_aggregation_strategy", -"citus.copy_switchover_threshold", -"citus.count_distinct_error_rate", -"citus.cpu_priority", -"citus.cpu_priority_for_logical_replication_senders", -"citus.create_object_propagation", -"citus.defer_drop_after_shard_move", -"citus.defer_drop_after_shard_split", -"citus.defer_shard_delete_interval", -"citus.desired_percent_disk_available_after_move", -"citus.distributed_deadlock_detection_factor", -"citus.enable_alter_database_owner", -"citus.enable_alter_role_propagation", -"citus.enable_alter_role_set_propagation", -"citus.enable_binary_protocol", -"citus.enable_change_data_capture", -"citus.enable_cluster_clock", -"citus.enable_cost_based_connection_establishment", -"citus.enable_create_role_propagation", -"citus.enable_create_type_propagation", -"citus.enable_ddl_propagation", -"citus.enable_deadlock_prevention", -"citus.enable_fast_path_router_planner", -"citus.enable_local_execution", -"citus.enable_local_reference_table_foreign_keys", -"citus.enable_manual_changes_to_shards", -"citus.enable_manual_metadata_changes_for_user", -"citus.enable_metadata_sync", -"citus.enable_non_colocated_router_query_pushdown", -"citus.enable_repartition_joins", -"citus.enable_repartitioned_insert_select", -"citus.enable_router_execution", -"citus.enable_schema_based_sharding", -"citus.enable_single_hash_repartition_joins", -"citus.enable_statistics_collection", -"citus.enable_unique_job_ids", -"citus.enable_unsafe_triggers", -"citus.enable_unsupported_feature_messages", -"citus.enable_version_checks", -"citus.enforce_foreign_key_restrictions", -"citus.enforce_object_restrictions_for_local_objects", -"citus.executor_slow_start_interval", -"citus.explain_all_tasks", -"citus.explain_analyze_sort_method", -"citus.explain_distributed_queries", -"citus.force_max_query_parallelization", -"citus.function_opens_transaction_block", -"citus.grep_remote_commands", -"citus.hide_citus_dependent_objects", -"citus.hide_shards_from_app_name_prefixes", -"citus.isolation_test_session_process_id", -"citus.isolation_test_session_remote_process_id", -"citus.limit_clause_row_fetch_count", -"citus.local_copy_flush_threshold", -"citus.local_hostname", -"citus.local_shared_pool_size", -"citus.local_table_join_policy", -"citus.log_distributed_deadlock_detection", -"citus.log_intermediate_results", -"citus.log_local_commands", -"citus.log_multi_join_order", -"citus.log_remote_commands", -"citus.logical_replication_timeout", -"citus.main_db", -"citus.max_adaptive_executor_pool_size", -"citus.max_background_task_executors", -"citus.max_background_task_executors_per_node", -"citus.max_cached_connection_lifetime", -"citus.max_cached_conns_per_worker", -"citus.max_client_connections", -"citus.max_high_priority_background_processes", -"citus.max_intermediate_result_size", -"citus.max_matview_size_to_auto_recreate", -"citus.max_rebalancer_logged_ignored_moves", -"citus.max_shared_pool_size", -"citus.max_worker_nodes_tracked", -"citus.metadata_sync_interval", -"citus.metadata_sync_mode", -"citus.metadata_sync_retry_interval", -"citus.mitmfifo", -"citus.multi_shard_modify_mode", -"citus.multi_task_query_log_level", -"citus.next_cleanup_record_id", -"citus.next_operation_id", -"citus.next_placement_id", -"citus.next_shard_id", -"citus.node_connection_timeout", -"citus.node_conninfo", -"citus.override_table_visibility", -"citus.prevent_incomplete_connection_establishment", -"citus.propagate_session_settings_for_loopback_connection", -"citus.propagate_set_commands", -"citus.rebalancer_by_disk_size_base_cost", -"citus.recover_2pc_interval", -"citus.remote_copy_flush_threshold", -"citus.remote_task_check_interval", -"citus.repartition_join_bucket_count_per_node", -"citus.replicate_reference_tables_on_activate", -"citus.replication_model", -"citus.running_under_citus_test_suite", -"citus.select_opens_transaction_block", -"citus.shard_count", -"citus.shard_replication_factor", -"citus.show_shards_for_app_name_prefixes", -"citus.skip_advisory_lock_permission_checks", -"citus.skip_constraint_validation", -"citus.skip_jsonb_validation_in_copy", -"citus.sort_returning", -"citus.stat_statements_max", -"citus.stat_statements_purge_interval", -"citus.stat_statements_track", -"citus.stat_tenants_limit", -"citus.stat_tenants_log_level", -"citus.stat_tenants_period", -"citus.stat_tenants_track", -"citus.stat_tenants_untracked_sample_rate", -"citus.subquery_pushdown", -"citus.task_assignment_policy", -"citus.task_executor_type", -"citus.use_citus_managed_tables", -"citus.use_secondary_nodes", -"citus.values_materialization_threshold", -"citus.version", -"citus.worker_min_messages", -"citus.writable_standby_coordinator", From 32b0fc23f5b5f439f62defbd80cb09b5294e97ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Fri, 17 Nov 2023 08:51:56 +0300 Subject: [PATCH 08/11] Removes unnecessary package installations in packaging pipelines (#7341) With the recent changes in packaging images, linux package installations to execute validate_output is unnecessary now. In this PR, I removed them to make the pipeline more effective. - [x] Remove the test warning before merge --- .github/packaging/validate_build_output.sh | 5 ++++- .github/workflows/packaging-test-pipelines.yml | 10 ---------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/.github/packaging/validate_build_output.sh b/.github/packaging/validate_build_output.sh index 64098811e..dab301aa5 100755 --- a/.github/packaging/validate_build_output.sh +++ b/.github/packaging/validate_build_output.sh @@ -32,7 +32,10 @@ python3 -m pip install -r tools/packaging_automation/requirements.txt echo "Package type: ${package_type}" echo "OS version: $(get_rpm_os_version)" - # if os version is centos 7 or oracle linux 7, then remove urllib3 with pip uninstall and install urllib3<2.0.0 with pip install + # For RHEL 7, we need to install urllib3<2 due to below execution error + # ImportError: urllib3 v2.0 only supports OpenSSL 1.1.1+, currently the 'ssl' + # module is compiled with 'OpenSSL 1.0.2k-fips 26 Jan 2017'. + # See: https://github.com/urllib3/urllib3/issues/2168 if [[ ${package_type} == "rpm" && $(get_rpm_os_version) == 7* ]]; then python3 -m pip uninstall -y urllib3 python3 -m pip install 'urllib3<2' diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 51bd82503..4ae741a91 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -112,11 +112,6 @@ jobs: PACKAGING_DOCKER_IMAGE: ${{ matrix.packaging_docker_image }} run: | echo "Postgres version: ${POSTGRES_VERSION}" - - ## Install required packages to execute packaging tools for rpm based distros - yum install python3-pip python3-devel postgresql-devel -y - python3 -m pip install wheel - ./.github/packaging/validate_build_output.sh "rpm" deb_build_tests: @@ -192,9 +187,4 @@ jobs: PACKAGING_DOCKER_IMAGE: ${{ matrix.packaging_docker_image }} run: | echo "Postgres version: ${POSTGRES_VERSION}" - - apt-get update -y - ## Install required packages to execute packaging tools for deb based distros - apt-get install python3-dev python3-pip -y - apt-get purge -y python3-yaml ./.github/packaging/validate_build_output.sh "deb" From e14e8667ccecc5d9e1e5a349f8acad7644b56703 Mon Sep 17 00:00:00 2001 From: Japin Li Date: Fri, 17 Nov 2023 18:01:23 +0800 Subject: [PATCH 09/11] Fix redundant variable declaration (#7353) The `$workerCount` declare twice in src/test/regress/pg_regress_multi.pl. --- src/test/regress/pg_regress_multi.pl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index fbadcfc62..3acde4c3c 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -90,7 +90,6 @@ my $workerCount = 2; my $serversAreShutdown = "TRUE"; my $usingWindows = 0; my $mitmPid = 0; -my $workerCount = 2; if ($Config{osname} eq "MSWin32") { From c88bf5ff1ce175c24664eabae33fc49f0aac5969 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Fri, 17 Nov 2023 15:11:38 +0300 Subject: [PATCH 10/11] Cleanup leftover replication slots in publication test (#7354) --- src/test/regress/expected/publication.out | 7 +++++++ src/test/regress/expected/publication_0.out | 6 ++++++ src/test/regress/sql/publication.sql | 3 +++ 3 files changed, 16 insertions(+) diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index c761efb3e..2df4e59d3 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -267,6 +267,7 @@ SET client_min_messages TO ERROR; DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; +SELECT public.wait_for_resource_cleanup(); \q \endif -- recreate a mixed publication @@ -544,3 +545,9 @@ DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; DROP SCHEMA publication2 CASCADE; +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/publication_0.out b/src/test/regress/expected/publication_0.out index 14fa94d17..e768a1d41 100644 --- a/src/test/regress/expected/publication_0.out +++ b/src/test/regress/expected/publication_0.out @@ -267,4 +267,10 @@ SET client_min_messages TO ERROR; DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + \q diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql index 06bdc39fe..70baf6726 100644 --- a/src/test/regress/sql/publication.sql +++ b/src/test/regress/sql/publication.sql @@ -195,6 +195,7 @@ SET client_min_messages TO ERROR; DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; +SELECT public.wait_for_resource_cleanup(); \q \endif @@ -391,3 +392,5 @@ DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; DROP SCHEMA publication2 CASCADE; + +SELECT public.wait_for_resource_cleanup(); From cedcc220bff9fce22e27142b95ece754b54b1009 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Fri, 17 Nov 2023 18:58:06 +0300 Subject: [PATCH 11/11] Fixes flaky VACUUM (freeze, process toast true) result (#7348) https://app.circleci.com/pipelines/github/citusdata/citus/34550/workflows/5b802f66-2666-4623-a209-6d7799f7ee5f/jobs/1229153 ```diff VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class WHERE oid=:reltoastrelid::regclass; frozen_performed ------------------ - t + f (1 row) ``` Process toast option in vacuum was introduced in PG14. The failing test was supposed to be a part of `multi_utilities.sql`, but it was included in `pg14.sql` to avoid alternative output for PG13. See https://github.com/citusdata/citus/commit/ba62c0a1488b0dd160b4c64a45f01fe9bfd76cce#diff-ed03478f693155e2fe092e9ad356bf884dc097f554e8d75eff562d52bbcf7a75L255-L272 for reference. However, now that we don't support PG13 anymore, we can move this test to `multi_utilities.sql`. Moving the test, plus inserting data before running vacuum freeze such that the freeze is more meaningful and not flaky, fixes the flakiness problem of the test. --- src/test/regress/expected/multi_utilities.out | 28 +++++++++++++++++++ src/test/regress/expected/pg14.out | 27 +----------------- src/test/regress/sql/multi_utilities.sql | 21 ++++++++++++++ src/test/regress/sql/pg14.sql | 20 +------------ 4 files changed, 51 insertions(+), 45 deletions(-) diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index d2b0940ed..5faab87d7 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -424,6 +424,34 @@ FROM pg_total_relation_size('local_vacuum_table') s ; 35000000 (1 row) +-- vacuum (process_toast true) should be vacuuming toast tables (default is true) +select reltoastrelid from pg_class where relname='local_vacuum_table' +\gset +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; +SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; + frozen_performed +--------------------------------------------------------------------- + t +(1 row) + +delete from local_vacuum_table; +-- vacuum (process_toast false) should not be vacuuming toast tables (default is true) +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; +SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; + frozen_not_performed +--------------------------------------------------------------------- + t +(1 row) + +delete from local_vacuum_table; -- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true) insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index 1b1d80df2..badd23240 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -71,32 +71,6 @@ NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx --- vacuum (process_toast true) should be vacuuming toast tables (default is true) -CREATE TABLE local_vacuum_table(name text); -select reltoastrelid from pg_class where relname='local_vacuum_table' -\gset -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; -SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - frozen_performed ---------------------------------------------------------------------- - t -(1 row) - --- vacuum (process_toast false) should not be vacuuming toast tables (default is true) -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; -SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - frozen_not_performed ---------------------------------------------------------------------- - t -(1 row) - -DROP TABLE local_vacuum_table; SET citus.log_remote_commands TO OFF; create table dist(a int, b int); select create_distributed_table('dist','a'); @@ -1492,4 +1466,5 @@ DROP TABLE compression_and_defaults, compression_and_generated_col; set client_min_messages to error; drop extension postgres_fdw cascade; drop schema pg14 cascade; +DROP ROLE role_1, r1; reset client_min_messages; diff --git a/src/test/regress/sql/multi_utilities.sql b/src/test/regress/sql/multi_utilities.sql index 1124b9890..668e1f32f 100644 --- a/src/test/regress/sql/multi_utilities.sql +++ b/src/test/regress/sql/multi_utilities.sql @@ -272,6 +272,27 @@ VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 49999999 THEN 35000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; +-- vacuum (process_toast true) should be vacuuming toast tables (default is true) +select reltoastrelid from pg_class where relname='local_vacuum_table' +\gset + +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; +SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; +delete from local_vacuum_table; + +-- vacuum (process_toast false) should not be vacuuming toast tables (default is true) +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; +SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; +delete from local_vacuum_table; + -- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true) insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql index 8d3f430ce..47eb67930 100644 --- a/src/test/regress/sql/pg14.sql +++ b/src/test/regress/sql/pg14.sql @@ -22,25 +22,6 @@ VACUUM (INDEX_CLEANUP "AUTOX") t1; VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1; VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1; --- vacuum (process_toast true) should be vacuuming toast tables (default is true) -CREATE TABLE local_vacuum_table(name text); -select reltoastrelid from pg_class where relname='local_vacuum_table' -\gset - -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; -SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - --- vacuum (process_toast false) should not be vacuuming toast tables (default is true) -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; -SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - -DROP TABLE local_vacuum_table; SET citus.log_remote_commands TO OFF; create table dist(a int, b int); @@ -777,4 +758,5 @@ DROP TABLE compression_and_defaults, compression_and_generated_col; set client_min_messages to error; drop extension postgres_fdw cascade; drop schema pg14 cascade; +DROP ROLE role_1, r1; reset client_min_messages;