From 4a97664fd7d3d57193c0e0e9fc763c12c73f1312 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Wed, 10 Nov 2021 20:13:04 +0300 Subject: [PATCH 1/3] Store tmp_upgrade/newData/*.log as an artifact --- .circleci/config.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 2daf08b4c..0184d40a1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -163,6 +163,14 @@ jobs: cp core.* /tmp/core_dumps fi when: on_fail + - run: + name: 'Copy pg_upgrade logs for newData dir' + command: | + mkdir -p /tmp/pg_upgrade_newData_logs + if ls src/test/regress/tmp_upgrade/newData/*.log 1> /dev/null 2>&1; then + cp src/test/regress/tmp_upgrade/newData/*.log /tmp/pg_upgrade_newData_logs + fi + when: on_fail - store_artifacts: name: 'Save regressions' path: src/test/regress/regression.diffs @@ -171,6 +179,9 @@ jobs: name: 'Save core dumps' path: /tmp/core_dumps when: on_fail + - store_artifacts: + name: 'Save pg_upgrade logs for newData dir' + path: /tmp/pg_upgrade_newData_logs - codecov/upload: flags: 'test_<< parameters.old_pg_major >>_<< parameters.new_pg_major >>,upgrade' From ef2ca03f24d4d1d7549c4e6c0af1284a34cb7f88 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Wed, 10 Nov 2021 15:15:54 +0300 Subject: [PATCH 2/3] Reproduce bug via test suite --- src/test/regress/before_pg_upgrade_schedule | 5 +++- .../expected/upgrade_autoconverted_after.out | 10 +++---- .../expected/upgrade_columnar_before.out | 27 +++++++++++++++++++ .../expected/upgrade_list_citus_objects.out | 2 +- .../sql/upgrade_autoconverted_after.sql | 4 +-- .../regress/sql/upgrade_columnar_before.sql | 26 ++++++++++++++++++ 6 files changed, 65 insertions(+), 9 deletions(-) diff --git a/src/test/regress/before_pg_upgrade_schedule b/src/test/regress/before_pg_upgrade_schedule index be691bed1..3af949a47 100644 --- a/src/test/regress/before_pg_upgrade_schedule +++ b/src/test/regress/before_pg_upgrade_schedule @@ -3,8 +3,11 @@ test: turn_mx_off test: multi_test_helpers multi_test_helpers_superuser test: multi_test_catalog_views test: upgrade_basic_before -test: upgrade_columnar_before test: upgrade_ref2ref_before test: upgrade_type_before test: upgrade_distributed_function_before upgrade_rebalance_strategy_before test: upgrade_autoconverted_before + +# upgrade_columnar_before renames public schema to citus_schema, so let's +# run this test as the last one. +test: upgrade_columnar_before diff --git a/src/test/regress/expected/upgrade_autoconverted_after.out b/src/test/regress/expected/upgrade_autoconverted_after.out index 06c93c173..61968a3e3 100644 --- a/src/test/regress/expected/upgrade_autoconverted_after.out +++ b/src/test/regress/expected/upgrade_autoconverted_after.out @@ -1,9 +1,9 @@ select logicalrelid, autoconverted from pg_dist_partition - where logicalrelid IN ('citus_local_autoconverted'::regclass, - 'citus_local_not_autoconverted'::regclass); - logicalrelid | autoconverted + where logicalrelid IN ('citus_schema.citus_local_autoconverted'::regclass, + 'citus_schema.citus_local_not_autoconverted'::regclass); + logicalrelid | autoconverted --------------------------------------------------------------------- - citus_local_autoconverted | t - citus_local_not_autoconverted | f + citus_schema.citus_local_autoconverted | t + citus_schema.citus_local_not_autoconverted | f (2 rows) diff --git a/src/test/regress/expected/upgrade_columnar_before.out b/src/test/regress/expected/upgrade_columnar_before.out index 4b730148c..8e8494d31 100644 --- a/src/test/regress/expected/upgrade_columnar_before.out +++ b/src/test/regress/expected/upgrade_columnar_before.out @@ -5,6 +5,33 @@ SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_ele \else \q \endif +-- Test if relying on topological sort of the objects, not their names, works +-- fine when re-creating objects during pg_upgrade. +ALTER SCHEMA public RENAME TO citus_schema; +SET search_path TO citus_schema; +-- As mentioned in https://github.com/citusdata/citus/issues/5447, it +-- is essential to already have public schema to be able to use +-- citus_prepare_pg_upgrade. Until fixing that bug, let's create public +-- schema here on all nodes. +CREATE SCHEMA IF NOT EXISTS public; +-- Do "SELECT 1" to hide port numbers +SELECT 1 FROM run_command_on_workers($$CREATE SCHEMA IF NOT EXISTS public$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +-- create a columnar table within citus_schema +CREATE TABLE new_columnar_table ( + col_1 character varying(100), + col_2 date, + col_3 character varying(100), + col_4 date +) USING columnar; +INSERT INTO new_columnar_table +SELECT ('1', '1999-01-01'::timestamp, '1', '1999-01-01'::timestamp) +FROM generate_series(1, 1000); CREATE SCHEMA upgrade_columnar; SET search_path TO upgrade_columnar, public; CREATE TYPE compfoo AS (f1 int, f2 text); diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 8c3a100f0..c02bab863 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -250,11 +250,11 @@ ORDER BY 1; type noderole view citus_dist_stat_activity view citus_lock_waits + view citus_schema.citus_tables view citus_shard_indexes_on_worker view citus_shards view citus_shards_on_worker view citus_stat_statements - view citus_tables view citus_worker_stat_activity view pg_dist_shard_placement view time_partitions diff --git a/src/test/regress/sql/upgrade_autoconverted_after.sql b/src/test/regress/sql/upgrade_autoconverted_after.sql index 8c5d31cb4..5541ee36e 100644 --- a/src/test/regress/sql/upgrade_autoconverted_after.sql +++ b/src/test/regress/sql/upgrade_autoconverted_after.sql @@ -1,3 +1,3 @@ select logicalrelid, autoconverted from pg_dist_partition - where logicalrelid IN ('citus_local_autoconverted'::regclass, - 'citus_local_not_autoconverted'::regclass); + where logicalrelid IN ('citus_schema.citus_local_autoconverted'::regclass, + 'citus_schema.citus_local_not_autoconverted'::regclass); diff --git a/src/test/regress/sql/upgrade_columnar_before.sql b/src/test/regress/sql/upgrade_columnar_before.sql index c6a4371f7..1f83a4d5a 100644 --- a/src/test/regress/sql/upgrade_columnar_before.sql +++ b/src/test/regress/sql/upgrade_columnar_before.sql @@ -6,6 +6,32 @@ SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_ele \q \endif +-- Test if relying on topological sort of the objects, not their names, works +-- fine when re-creating objects during pg_upgrade. +ALTER SCHEMA public RENAME TO citus_schema; +SET search_path TO citus_schema; + +-- As mentioned in https://github.com/citusdata/citus/issues/5447, it +-- is essential to already have public schema to be able to use +-- citus_prepare_pg_upgrade. Until fixing that bug, let's create public +-- schema here on all nodes. +CREATE SCHEMA IF NOT EXISTS public; + +-- Do "SELECT 1" to hide port numbers +SELECT 1 FROM run_command_on_workers($$CREATE SCHEMA IF NOT EXISTS public$$); + +-- create a columnar table within citus_schema +CREATE TABLE new_columnar_table ( + col_1 character varying(100), + col_2 date, + col_3 character varying(100), + col_4 date +) USING columnar; + +INSERT INTO new_columnar_table +SELECT ('1', '1999-01-01'::timestamp, '1', '1999-01-01'::timestamp) +FROM generate_series(1, 1000); + CREATE SCHEMA upgrade_columnar; SET search_path TO upgrade_columnar, public; From 73f06323d89e778c3591c94e8e40f6e2b6365cbb Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Wed, 17 Nov 2021 16:32:13 +0300 Subject: [PATCH 3/3] Introduce dependencies from columnarAM to columnar metadata objects During pg upgrades, we have seen that it is not guaranteed that a columnar table will be created after metadata objects got created. Prior to changes done in this commit, we had such a dependency relationship in `pg_depend`: ``` columnar_table ----> columnarAM ----> citus extension ^ ^ | | columnar.storage_id_seq -------------------- | | columnar.stripe ------------------------------- ``` Since `pg_upgrade` just knows to follow topological sort of the objects when creating database dump, above dependency graph doesn't imply that `columnar_table` should be created before metadata objects such as `columnar.storage_id_seq` and `columnar.stripe` are created. For this reason, with this commit we add new records to `pg_depend` to make columnarAM depending on all rel objects living in `columnar` schema. That way, `pg_upgrade` will know it needs to create those before creating `columnarAM`, and similarly, before creating any tables using `columnarAM`. Note that in addition to inserting those records via installation script, we also do the same in `citus_finish_pg_upgrade()`. This is because, `pg_upgrade` rebuilds catalog tables in the new cluster and that means, we must insert them in the new cluster too. --- .../columnar/sql/columnar--10.2-3--10.2-4.sql | 5 + .../downgrades/columnar--10.2-4--10.2-3.sql | 6 + .../10.2-4.sql | 40 +++++ .../latest.sql | 40 +++++ .../distributed/sql/citus--10.2-3--10.2-4.sql | 3 + .../sql/downgrades/citus--10.2-4--10.2-3.sql | 7 + .../udfs/citus_finish_pg_upgrade/10.2-4.sql | 144 ++++++++++++++++++ .../udfs/citus_finish_pg_upgrade/latest.sql | 3 + src/test/regress/expected/multi_extension.out | 49 +++++- .../expected/upgrade_columnar_after.out | 99 +++++++++++- .../expected/upgrade_list_citus_objects.out | 3 +- src/test/regress/sql/multi_extension.sql | 38 +++++ .../regress/sql/upgrade_columnar_after.sql | 75 ++++++++- 13 files changed, 508 insertions(+), 4 deletions(-) create mode 100644 src/backend/columnar/sql/columnar--10.2-3--10.2-4.sql create mode 100644 src/backend/columnar/sql/downgrades/columnar--10.2-4--10.2-3.sql create mode 100644 src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/10.2-4.sql create mode 100644 src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/latest.sql create mode 100644 src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/10.2-4.sql diff --git a/src/backend/columnar/sql/columnar--10.2-3--10.2-4.sql b/src/backend/columnar/sql/columnar--10.2-3--10.2-4.sql new file mode 100644 index 000000000..b4600a4bf --- /dev/null +++ b/src/backend/columnar/sql/columnar--10.2-3--10.2-4.sql @@ -0,0 +1,5 @@ +-- columnar--10.2-3--10.2-4.sql + +#include "udfs/columnar_ensure_am_depends_catalog/10.2-4.sql" + +SELECT citus_internal.columnar_ensure_am_depends_catalog(); diff --git a/src/backend/columnar/sql/downgrades/columnar--10.2-4--10.2-3.sql b/src/backend/columnar/sql/downgrades/columnar--10.2-4--10.2-3.sql new file mode 100644 index 000000000..32b2ebaa8 --- /dev/null +++ b/src/backend/columnar/sql/downgrades/columnar--10.2-4--10.2-3.sql @@ -0,0 +1,6 @@ +-- columnar--10.2-4--10.2-3.sql + +DROP FUNCTION citus_internal.columnar_ensure_am_depends_catalog(); + +-- Note that we intentionally do not delete pg_depend records that we inserted +-- via columnar--10.2-3--10.2-4.sql (by using columnar_ensure_am_depends_catalog). diff --git a/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/10.2-4.sql b/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/10.2-4.sql new file mode 100644 index 000000000..754e03fb1 --- /dev/null +++ b/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/10.2-4.sql @@ -0,0 +1,40 @@ +CREATE OR REPLACE FUNCTION citus_internal.columnar_ensure_am_depends_catalog() + RETURNS void + LANGUAGE plpgsql + SET search_path = pg_catalog +AS $func$ +BEGIN + INSERT INTO pg_depend + SELECT -- Define a dependency edge from "columnar table access method" .. + 'pg_am'::regclass::oid as classid, + (select oid from pg_am where amname = 'columnar') as objid, + 0 as objsubid, + -- ... to each object that is registered to pg_class and that lives + -- in "columnar" schema. That contains catalog tables, indexes + -- created on them and the sequences created in "columnar" schema. + -- + -- Given the possibility of user might have created their own objects + -- in columnar schema, we explicitly specify list of objects that we + -- are interested in. + 'pg_class'::regclass::oid as refclassid, + columnar_schema_members.relname::regclass::oid as refobjid, + 0 as refobjsubid, + 'n' as deptype + FROM (VALUES ('columnar.chunk'), + ('columnar.chunk_group'), + ('columnar.chunk_group_pkey'), + ('columnar.chunk_pkey'), + ('columnar.options'), + ('columnar.options_pkey'), + ('columnar.storageid_seq'), + ('columnar.stripe'), + ('columnar.stripe_first_row_number_idx'), + ('columnar.stripe_pkey') + ) columnar_schema_members(relname) + -- Avoid inserting duplicate entries into pg_depend. + EXCEPT TABLE pg_depend; +END; +$func$; +COMMENT ON FUNCTION citus_internal.columnar_ensure_am_depends_catalog() + IS 'internal function responsible for creating dependencies from columnar ' + 'table access method to the rel objects in columnar schema'; diff --git a/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/latest.sql b/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/latest.sql new file mode 100644 index 000000000..754e03fb1 --- /dev/null +++ b/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/latest.sql @@ -0,0 +1,40 @@ +CREATE OR REPLACE FUNCTION citus_internal.columnar_ensure_am_depends_catalog() + RETURNS void + LANGUAGE plpgsql + SET search_path = pg_catalog +AS $func$ +BEGIN + INSERT INTO pg_depend + SELECT -- Define a dependency edge from "columnar table access method" .. + 'pg_am'::regclass::oid as classid, + (select oid from pg_am where amname = 'columnar') as objid, + 0 as objsubid, + -- ... to each object that is registered to pg_class and that lives + -- in "columnar" schema. That contains catalog tables, indexes + -- created on them and the sequences created in "columnar" schema. + -- + -- Given the possibility of user might have created their own objects + -- in columnar schema, we explicitly specify list of objects that we + -- are interested in. + 'pg_class'::regclass::oid as refclassid, + columnar_schema_members.relname::regclass::oid as refobjid, + 0 as refobjsubid, + 'n' as deptype + FROM (VALUES ('columnar.chunk'), + ('columnar.chunk_group'), + ('columnar.chunk_group_pkey'), + ('columnar.chunk_pkey'), + ('columnar.options'), + ('columnar.options_pkey'), + ('columnar.storageid_seq'), + ('columnar.stripe'), + ('columnar.stripe_first_row_number_idx'), + ('columnar.stripe_pkey') + ) columnar_schema_members(relname) + -- Avoid inserting duplicate entries into pg_depend. + EXCEPT TABLE pg_depend; +END; +$func$; +COMMENT ON FUNCTION citus_internal.columnar_ensure_am_depends_catalog() + IS 'internal function responsible for creating dependencies from columnar ' + 'table access method to the rel objects in columnar schema'; diff --git a/src/backend/distributed/sql/citus--10.2-3--10.2-4.sql b/src/backend/distributed/sql/citus--10.2-3--10.2-4.sql index 762934edb..b32b12066 100644 --- a/src/backend/distributed/sql/citus--10.2-3--10.2-4.sql +++ b/src/backend/distributed/sql/citus--10.2-3--10.2-4.sql @@ -2,6 +2,9 @@ -- bump version to 10.2-4 +#include "../../columnar/sql/columnar--10.2-3--10.2-4.sql" + #include "udfs/fix_partition_shard_index_names/10.2-4.sql" #include "udfs/fix_all_partition_shard_index_names/10.2-4.sql" #include "udfs/worker_fix_partition_shard_index_names/10.2-4.sql" +#include "udfs/citus_finish_pg_upgrade/10.2-4.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--10.2-4--10.2-3.sql b/src/backend/distributed/sql/downgrades/citus--10.2-4--10.2-3.sql index 6280755bf..b4dd8f401 100644 --- a/src/backend/distributed/sql/downgrades/citus--10.2-4--10.2-3.sql +++ b/src/backend/distributed/sql/downgrades/citus--10.2-4--10.2-3.sql @@ -3,3 +3,10 @@ DROP FUNCTION pg_catalog.fix_all_partition_shard_index_names(); DROP FUNCTION pg_catalog.fix_partition_shard_index_names(regclass); DROP FUNCTION pg_catalog.worker_fix_partition_shard_index_names(regclass, text, text); + +#include "../udfs/citus_finish_pg_upgrade/10.2-1.sql" + +-- This needs to be done after downgrading citus_finish_pg_upgrade. This is +-- because citus_finish_pg_upgrade/10.2-4 depends on columnar_ensure_am_depends_catalog, +-- which is dropped by columnar--10.2-4--10.2-3.sql +#include "../../../columnar/sql/downgrades/columnar--10.2-4--10.2-3.sql" diff --git a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/10.2-4.sql b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/10.2-4.sql new file mode 100644 index 000000000..2921de962 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/10.2-4.sql @@ -0,0 +1,144 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade() + RETURNS void + LANGUAGE plpgsql + SET search_path = pg_catalog + AS $cppu$ +DECLARE + table_name regclass; + command text; + trigger_name text; +BEGIN + + + IF substring(current_Setting('server_version'), '\d+')::int >= 14 THEN + EXECUTE $cmd$ + CREATE AGGREGATE array_cat_agg(anycompatiblearray) (SFUNC = array_cat, STYPE = anycompatiblearray); + COMMENT ON AGGREGATE array_cat_agg(anycompatiblearray) + IS 'concatenate input arrays into a single array'; + $cmd$; + ELSE + EXECUTE $cmd$ + CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray); + COMMENT ON AGGREGATE array_cat_agg(anyarray) + IS 'concatenate input arrays into a single array'; + $cmd$; + END IF; + + -- + -- Citus creates the array_cat_agg but because of a compatibility + -- issue between pg13-pg14, we drop and create it during upgrade. + -- And as Citus creates it, there needs to be a dependency to the + -- Citus extension, so we create that dependency here. + -- We are not using: + -- ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg + -- because we don't have an easy way to check if the aggregate + -- exists with anyarray type or anycompatiblearray type. + + INSERT INTO pg_depend + SELECT + 'pg_proc'::regclass::oid as classid, + (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'e' as deptype; + + -- + -- restore citus catalog tables + -- + INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition; + INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard; + INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement; + INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata; + INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node; + INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group; + INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction; + INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation; + -- enterprise catalog tables + INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo; + INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo; + + INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT + name, + default_strategy, + shard_cost_function::regprocedure::regproc, + node_capacity_function::regprocedure::regproc, + shard_allowed_on_node_function::regprocedure::regproc, + default_threshold, + minimum_threshold, + improvement_threshold + FROM public.pg_dist_rebalance_strategy; + + -- + -- drop backup tables + -- + DROP TABLE public.pg_dist_authinfo; + DROP TABLE public.pg_dist_colocation; + DROP TABLE public.pg_dist_local_group; + DROP TABLE public.pg_dist_node; + DROP TABLE public.pg_dist_node_metadata; + DROP TABLE public.pg_dist_partition; + DROP TABLE public.pg_dist_placement; + DROP TABLE public.pg_dist_poolinfo; + DROP TABLE public.pg_dist_shard; + DROP TABLE public.pg_dist_transaction; + DROP TABLE public.pg_dist_rebalance_strategy; + + -- + -- reset sequences + -- + PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false); + PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false); + PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false); + PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false); + PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false); + + -- + -- register triggers + -- + FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition + LOOP + trigger_name := 'truncate_trigger_' || table_name::oid; + command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()'; + EXECUTE command; + command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name); + EXECUTE command; + END LOOP; + + -- + -- set dependencies + -- + INSERT INTO pg_depend + SELECT + 'pg_class'::regclass::oid as classid, + p.logicalrelid::regclass::oid as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'n' as deptype + FROM pg_catalog.pg_dist_partition p; + + -- set dependencies for columnar table access method + PERFORM citus_internal.columnar_ensure_am_depends_catalog(); + + -- restore pg_dist_object from the stable identifiers + TRUNCATE citus.pg_dist_object; + INSERT INTO citus.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid) + SELECT + address.classid, + address.objid, + address.objsubid, + naming.distribution_argument_index, + naming.colocationid + FROM + public.pg_dist_object naming, + pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address; + + DROP TABLE public.pg_dist_object; +END; +$cppu$; + +COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade() + IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade'; diff --git a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql index b66ee76cc..2921de962 100644 --- a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql @@ -120,6 +120,9 @@ BEGIN 'n' as deptype FROM pg_catalog.pg_dist_partition p; + -- set dependencies for columnar table access method + PERFORM citus_internal.columnar_ensure_am_depends_catalog(); + -- restore pg_dist_object from the stable identifiers TRUNCATE citus.pg_dist_object; INSERT INTO citus.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid) diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 960371340..c4302d7c1 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -883,6 +883,21 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Test downgrade to 10.2-3 from 10.2-4 ALTER EXTENSION citus UPDATE TO '10.2-4'; ALTER EXTENSION citus UPDATE TO '10.2-3'; +-- Make sure that we don't delete pg_depend entries added in +-- columnar--10.2-3--10.2-4.sql when downgrading to 10.2-3. +SELECT COUNT(*)=10 +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; + ?column? +--------------------------------------------------------------------- + t +(1 row) + -- Should be empty result since upgrade+downgrade should be a no-op SELECT * FROM multi_extension.print_extension_changes(); previous_object | current_object @@ -894,11 +909,43 @@ ALTER EXTENSION citus UPDATE TO '10.2-4'; SELECT * FROM multi_extension.print_extension_changes(); previous_object | current_object --------------------------------------------------------------------- + | function citus_internal.columnar_ensure_am_depends_catalog() void | function fix_all_partition_shard_index_names() SETOF regclass | function fix_partition_shard_index_names(regclass) void | function worker_fix_partition_shard_index_names(regclass,text,text) void -(3 rows) +(4 rows) +-- Make sure that we defined dependencies from all rel objects (tables, +-- indexes, sequences ..) to columnar table access method ... +SELECT pg_class.oid INTO columnar_schema_members +FROM pg_class, pg_namespace +WHERE pg_namespace.oid=pg_class.relnamespace AND + pg_namespace.nspname='columnar'; +SELECT refobjid INTO columnar_schema_members_pg_depend +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; +-- ... , so this should be empty, +(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend) +UNION +(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members); + oid +--------------------------------------------------------------------- +(0 rows) + +-- ... , and both columnar_schema_members_pg_depend & columnar_schema_members +-- should have 10 entries. +SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend; -- Use a synthetic pg_dist_shard record to show that upgrade fails -- when there are cstore_fdw tables INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage) VALUES ('pg_dist_shard', 1, 'c'); diff --git a/src/test/regress/expected/upgrade_columnar_after.out b/src/test/regress/expected/upgrade_columnar_after.out index 58ea3fd3d..196b3c3c7 100644 --- a/src/test/regress/expected/upgrade_columnar_after.out +++ b/src/test/regress/expected/upgrade_columnar_after.out @@ -257,5 +257,102 @@ BEGIN; -- -- I somehow reproduced this bug easily when upgrading pg, that is why -- adding the test to this file. - DROP EXTENSION citus CASCADE; + -- + -- TODO: Need to uncomment following line after fixing + -- https://github.com/citusdata/citus/issues/5483. + -- DROP EXTENSION citus CASCADE; ROLLBACK; +-- Make sure that we define dependencies from all rel objects (tables, +-- indexes, sequences ..) to columnar table access method. +-- +-- Given that this test file is run both before and after pg upgrade, the +-- first run will test that for columnar--10.2-3--10.2-4.sql script, and the +-- second run will test the same for citus_finish_pg_upgrade(), for the post +-- pg-upgrade scenario. +SELECT pg_class.oid INTO columnar_schema_members +FROM pg_class, pg_namespace +WHERE pg_namespace.oid=pg_class.relnamespace AND + pg_namespace.nspname='columnar'; +SELECT refobjid INTO columnar_schema_members_pg_depend +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; +-- ... , so this should be empty, +(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend) +UNION +(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members); + oid +--------------------------------------------------------------------- +(0 rows) + +-- ... , and both columnar_schema_members_pg_depend & columnar_schema_members +-- should have 10 entries. +SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend; +-- Check the same for workers too. +SELECT run_command_on_workers( +$$ +SELECT pg_class.oid INTO columnar_schema_members +FROM pg_class, pg_namespace +WHERE pg_namespace.oid=pg_class.relnamespace AND + pg_namespace.nspname='columnar'; +SELECT refobjid INTO columnar_schema_members_pg_depend +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; +$$ +); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,10201,t,"SELECT 10") + (localhost,10202,t,"SELECT 10") +(2 rows) + +SELECT run_command_on_workers( +$$ +(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend) +UNION +(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members); +$$ +); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,10201,t,"") + (localhost,10202,t,"") +(2 rows) + +SELECT run_command_on_workers( +$$ +SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend; +$$ +); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,10201,t,t) + (localhost,10202,t,t) +(2 rows) + +SELECT run_command_on_workers( +$$ +DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend; +$$ +); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,10201,t,"DROP TABLE") + (localhost,10202,t,"DROP TABLE") +(2 rows) + diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index c02bab863..d9421e3ef 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -57,6 +57,7 @@ ORDER BY 1; function citus_extradata_container(internal) function citus_finish_pg_upgrade() function citus_get_active_worker_nodes() + function citus_internal.columnar_ensure_am_depends_catalog() function citus_internal.downgrade_columnar_storage(regclass) function citus_internal.find_groupid_for_node(text,integer) function citus_internal.pg_dist_node_trigger_func() @@ -258,5 +259,5 @@ ORDER BY 1; view citus_worker_stat_activity view pg_dist_shard_placement view time_partitions -(242 rows) +(243 rows) diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index 03a4d7d84..01702c793 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -369,6 +369,18 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Test downgrade to 10.2-3 from 10.2-4 ALTER EXTENSION citus UPDATE TO '10.2-4'; ALTER EXTENSION citus UPDATE TO '10.2-3'; + +-- Make sure that we don't delete pg_depend entries added in +-- columnar--10.2-3--10.2-4.sql when downgrading to 10.2-3. +SELECT COUNT(*)=10 +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; + -- Should be empty result since upgrade+downgrade should be a no-op SELECT * FROM multi_extension.print_extension_changes(); @@ -376,6 +388,32 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '10.2-4'; SELECT * FROM multi_extension.print_extension_changes(); +-- Make sure that we defined dependencies from all rel objects (tables, +-- indexes, sequences ..) to columnar table access method ... +SELECT pg_class.oid INTO columnar_schema_members +FROM pg_class, pg_namespace +WHERE pg_namespace.oid=pg_class.relnamespace AND + pg_namespace.nspname='columnar'; +SELECT refobjid INTO columnar_schema_members_pg_depend +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; + +-- ... , so this should be empty, +(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend) +UNION +(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members); + +-- ... , and both columnar_schema_members_pg_depend & columnar_schema_members +-- should have 10 entries. +SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend; + +DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend; + -- Use a synthetic pg_dist_shard record to show that upgrade fails -- when there are cstore_fdw tables INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage) VALUES ('pg_dist_shard', 1, 'c'); diff --git a/src/test/regress/sql/upgrade_columnar_after.sql b/src/test/regress/sql/upgrade_columnar_after.sql index 0faa88d24..4776a1576 100644 --- a/src/test/regress/sql/upgrade_columnar_after.sql +++ b/src/test/regress/sql/upgrade_columnar_after.sql @@ -129,5 +129,78 @@ BEGIN; -- -- I somehow reproduced this bug easily when upgrading pg, that is why -- adding the test to this file. - DROP EXTENSION citus CASCADE; + -- + -- TODO: Need to uncomment following line after fixing + -- https://github.com/citusdata/citus/issues/5483. + -- DROP EXTENSION citus CASCADE; ROLLBACK; + +-- Make sure that we define dependencies from all rel objects (tables, +-- indexes, sequences ..) to columnar table access method. +-- +-- Given that this test file is run both before and after pg upgrade, the +-- first run will test that for columnar--10.2-3--10.2-4.sql script, and the +-- second run will test the same for citus_finish_pg_upgrade(), for the post +-- pg-upgrade scenario. +SELECT pg_class.oid INTO columnar_schema_members +FROM pg_class, pg_namespace +WHERE pg_namespace.oid=pg_class.relnamespace AND + pg_namespace.nspname='columnar'; +SELECT refobjid INTO columnar_schema_members_pg_depend +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; + +-- ... , so this should be empty, +(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend) +UNION +(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members); + +-- ... , and both columnar_schema_members_pg_depend & columnar_schema_members +-- should have 10 entries. +SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend; + +DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend; + +-- Check the same for workers too. + +SELECT run_command_on_workers( +$$ +SELECT pg_class.oid INTO columnar_schema_members +FROM pg_class, pg_namespace +WHERE pg_namespace.oid=pg_class.relnamespace AND + pg_namespace.nspname='columnar'; +SELECT refobjid INTO columnar_schema_members_pg_depend +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; +$$ +); + +SELECT run_command_on_workers( +$$ +(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend) +UNION +(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members); +$$ +); + +SELECT run_command_on_workers( +$$ +SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend; +$$ +); + +SELECT run_command_on_workers( +$$ +DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend; +$$ +);