Port recent CI fixes and 13.0.1 changelog entry to main (#7882)

Although we will re-create the main branch from release-13.0 soon, let's
get the CI on main up and running fwiw.
pull/7892/head
Onur Tirtir 2025-02-04 17:15:47 +03:00 committed by GitHub
commit cee0f31ddb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 84 additions and 13 deletions

View File

@ -6,7 +6,7 @@ inputs:
runs: runs:
using: composite using: composite
steps: steps:
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
name: Upload logs name: Upload logs
with: with:
name: ${{ inputs.folder }} name: ${{ inputs.folder }}

View File

@ -17,7 +17,7 @@ runs:
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
fi fi
shell: bash shell: bash
- uses: actions/download-artifact@v3.0.1 - uses: actions/download-artifact@v4.1.8
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
- name: Install Extension - name: Install Extension

View File

@ -21,7 +21,7 @@ runs:
mkdir -p /tmp/codeclimate mkdir -p /tmp/codeclimate
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
path: "/tmp/codeclimate/*.json" path: "/tmp/codeclimate/*.json"
name: codeclimate name: codeclimate-${{ inputs.flags }}

View File

@ -48,7 +48,7 @@ jobs:
image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }}
options: --user root options: --user root
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- name: Check Snapshots - name: Check Snapshots
run: | run: |
git config --global --add safe.directory ${GITHUB_WORKSPACE} git config --global --add safe.directory ${GITHUB_WORKSPACE}
@ -125,7 +125,7 @@ jobs:
- name: Build - name: Build
run: "./ci/build-citus.sh" run: "./ci/build-citus.sh"
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
path: |- path: |-
@ -284,10 +284,12 @@ jobs:
check-arbitrary-configs parallel=4 CONFIGS=$TESTS check-arbitrary-configs parallel=4 CONFIGS=$TESTS
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.pg_major }}_upgrade flags: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-pg-upgrade: test-pg-upgrade:
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
@ -335,6 +337,8 @@ jobs:
if: failure() if: failure()
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
@ -380,10 +384,12 @@ jobs:
done; done;
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.PG_MAJOR }}_citus_upgrade
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.pg_major }}_upgrade flags: ${{ env.PG_MAJOR }}_citus_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
upload-coverage: upload-coverage:
if: always() if: always()
@ -399,10 +405,11 @@ jobs:
- test-citus-upgrade - test-citus-upgrade
- test-pg-upgrade - test-pg-upgrade
steps: steps:
- uses: actions/download-artifact@v3.0.1 - uses: actions/download-artifact@v4.1.8
with: with:
name: "codeclimate" pattern: codeclimate*
path: "codeclimate" path: codeclimate
merge-multiple: true
- name: Upload coverage results to Code Climate - name: Upload coverage results to Code Climate
run: |- run: |-
cc-test-reporter sum-coverage codeclimate/*.json -o total.json cc-test-reporter sum-coverage codeclimate/*.json -o total.json
@ -516,6 +523,7 @@ jobs:
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions/download-artifact@v4.1.8
- uses: "./.github/actions/setup_extension" - uses: "./.github/actions/setup_extension"
- name: Run minimal tests - name: Run minimal tests
run: |- run: |-

View File

@ -34,7 +34,7 @@ jobs:
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
./ci/build-citus.sh ./ci/build-citus.sh
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
path: |- path: |-

View File

@ -129,7 +129,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Set pg_config path and python parameters for deb based distros - name: Set pg_config path and python parameters for deb based distros
run: | run: |

View File

@ -1,3 +1,7 @@
### citus v13.0.1 (February 4th, 2025) ###
* Drops support for PostgreSQL 14 (#7753)
### citus v13.0.0 (January 17, 2025) ### ### citus v13.0.0 (January 17, 2025) ###
* Adds support for PostgreSQL 17 (#7699, #7661) * Adds support for PostgreSQL 17 (#7699, #7661)

View File

@ -62,10 +62,16 @@ def run_citus_upgrade_tests(config, before_upgrade_schedule, after_upgrade_sched
install_citus(config.post_tar_path) install_citus(config.post_tar_path)
# disable 2pc recovery for all nodes to work around https://github.com/citusdata/citus/issues/7875
disable_2pc_recovery_for_all_nodes(config.bindir, config)
restart_databases(config.bindir, config.datadir, config.mixed_mode, config) restart_databases(config.bindir, config.datadir, config.mixed_mode, config)
run_alter_citus(config.bindir, config.mixed_mode, config) run_alter_citus(config.bindir, config.mixed_mode, config)
verify_upgrade(config, config.mixed_mode, config.node_name_to_ports.values()) verify_upgrade(config, config.mixed_mode, config.node_name_to_ports.values())
# re-enable 2pc recovery for all nodes
enable_2pc_recovery_for_all_nodes(config.bindir, config)
run_test_on_coordinator(config, after_upgrade_schedule) run_test_on_coordinator(config, after_upgrade_schedule)
remove_citus(config.post_tar_path) remove_citus(config.post_tar_path)
@ -146,6 +152,18 @@ def restart_database(pg_path, abs_data_path, node_name, node_ports, logfile_pref
subprocess.run(command, check=True) subprocess.run(command, check=True)
def disable_2pc_recovery_for_all_nodes(pg_path, config):
for port in config.node_name_to_ports.values():
utils.psql(pg_path, port, "ALTER SYSTEM SET citus.recover_2pc_interval TO -1;")
utils.psql(pg_path, port, "SELECT pg_reload_conf();")
def enable_2pc_recovery_for_all_nodes(pg_path, config):
for port in config.node_name_to_ports.values():
utils.psql(pg_path, port, "ALTER SYSTEM RESET citus.recover_2pc_interval;")
utils.psql(pg_path, port, "SELECT pg_reload_conf();")
def run_alter_citus(pg_path, mixed_mode, config): def run_alter_citus(pg_path, mixed_mode, config):
for port in config.node_name_to_ports.values(): for port in config.node_name_to_ports.values():
if mixed_mode and port in ( if mixed_mode and port in (

View File

@ -28,3 +28,12 @@ SELECT * FROM pg_dist_cleanup;
CALL citus_cleanup_orphaned_resources(); CALL citus_cleanup_orphaned_resources();
NOTICE: cleaned up 1 orphaned resources NOTICE: cleaned up 1 orphaned resources
DROP TABLE table_with_orphaned_shards; DROP TABLE table_with_orphaned_shards;
-- Re-enable automatic shard cleanup by maintenance daemon as
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)

View File

@ -30,6 +30,23 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELE
(1 row) (1 row)
-- create an orphaned placement based on an existing one -- create an orphaned placement based on an existing one
--
-- But before doing that, first disable automatic shard cleanup
-- by maintenance daemon so that we can reliably test the cleanup
-- in upgrade_pg_dist_cleanup_after.sql.
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
SELECT pg_sleep(0.1);
pg_sleep
---------------------------------------------------------------------
(1 row)
INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid) INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
FROM pg_dist_placement FROM pg_dist_placement

View File

@ -13,3 +13,8 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardid IN (SELECT shardid FROM pg_
SELECT * FROM pg_dist_cleanup; SELECT * FROM pg_dist_cleanup;
CALL citus_cleanup_orphaned_resources(); CALL citus_cleanup_orphaned_resources();
DROP TABLE table_with_orphaned_shards; DROP TABLE table_with_orphaned_shards;
-- Re-enable automatic shard cleanup by maintenance daemon as
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
SELECT pg_reload_conf();

View File

@ -16,6 +16,16 @@ SELECT create_distributed_table('table_with_orphaned_shards', 'a');
-- show all 32 placements are active -- show all 32 placements are active
SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='table_with_orphaned_shards'::regclass); SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='table_with_orphaned_shards'::regclass);
-- create an orphaned placement based on an existing one -- create an orphaned placement based on an existing one
--
-- But before doing that, first disable automatic shard cleanup
-- by maintenance daemon so that we can reliably test the cleanup
-- in upgrade_pg_dist_cleanup_after.sql.
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
SELECT pg_reload_conf();
SELECT pg_sleep(0.1);
INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid) INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
FROM pg_dist_placement FROM pg_dist_placement