mirror of https://github.com/citusdata/citus.git
Merge branch 'main' into contributing-dev
commit
fe1a05cd84
|
@ -6,7 +6,7 @@ inputs:
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/upload-artifact@v3.1.1
|
- uses: actions/upload-artifact@v4.6.0
|
||||||
name: Upload logs
|
name: Upload logs
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.folder }}
|
name: ${{ inputs.folder }}
|
||||||
|
|
|
@ -17,7 +17,7 @@ runs:
|
||||||
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
|
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
shell: bash
|
shell: bash
|
||||||
- uses: actions/download-artifact@v3.0.1
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: build-${{ env.PG_MAJOR }}
|
name: build-${{ env.PG_MAJOR }}
|
||||||
- name: Install Extension
|
- name: Install Extension
|
||||||
|
|
|
@ -21,7 +21,7 @@ runs:
|
||||||
mkdir -p /tmp/codeclimate
|
mkdir -p /tmp/codeclimate
|
||||||
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
|
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
|
||||||
shell: bash
|
shell: bash
|
||||||
- uses: actions/upload-artifact@v3.1.1
|
- uses: actions/upload-artifact@v4.6.0
|
||||||
with:
|
with:
|
||||||
path: "/tmp/codeclimate/*.json"
|
path: "/tmp/codeclimate/*.json"
|
||||||
name: codeclimate
|
name: codeclimate-${{ inputs.flags }}
|
||||||
|
|
|
@ -48,7 +48,7 @@ jobs:
|
||||||
image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }}
|
image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }}
|
||||||
options: --user root
|
options: --user root
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3.5.0
|
- uses: actions/checkout@v4
|
||||||
- name: Check Snapshots
|
- name: Check Snapshots
|
||||||
run: |
|
run: |
|
||||||
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||||
|
@ -125,7 +125,7 @@ jobs:
|
||||||
- name: Build
|
- name: Build
|
||||||
run: "./ci/build-citus.sh"
|
run: "./ci/build-citus.sh"
|
||||||
shell: bash
|
shell: bash
|
||||||
- uses: actions/upload-artifact@v3.1.1
|
- uses: actions/upload-artifact@v4.6.0
|
||||||
with:
|
with:
|
||||||
name: build-${{ env.PG_MAJOR }}
|
name: build-${{ env.PG_MAJOR }}
|
||||||
path: |-
|
path: |-
|
||||||
|
@ -284,10 +284,12 @@ jobs:
|
||||||
check-arbitrary-configs parallel=4 CONFIGS=$TESTS
|
check-arbitrary-configs parallel=4 CONFIGS=$TESTS
|
||||||
- uses: "./.github/actions/save_logs_and_results"
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
if: always()
|
if: always()
|
||||||
|
with:
|
||||||
|
folder: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
|
||||||
- uses: "./.github/actions/upload_coverage"
|
- uses: "./.github/actions/upload_coverage"
|
||||||
if: always()
|
if: always()
|
||||||
with:
|
with:
|
||||||
flags: ${{ env.pg_major }}_upgrade
|
flags: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
|
||||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
test-pg-upgrade:
|
test-pg-upgrade:
|
||||||
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
|
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
|
||||||
|
@ -335,6 +337,8 @@ jobs:
|
||||||
if: failure()
|
if: failure()
|
||||||
- uses: "./.github/actions/save_logs_and_results"
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
if: always()
|
if: always()
|
||||||
|
with:
|
||||||
|
folder: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
|
||||||
- uses: "./.github/actions/upload_coverage"
|
- uses: "./.github/actions/upload_coverage"
|
||||||
if: always()
|
if: always()
|
||||||
with:
|
with:
|
||||||
|
@ -380,10 +384,12 @@ jobs:
|
||||||
done;
|
done;
|
||||||
- uses: "./.github/actions/save_logs_and_results"
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
if: always()
|
if: always()
|
||||||
|
with:
|
||||||
|
folder: ${{ env.PG_MAJOR }}_citus_upgrade
|
||||||
- uses: "./.github/actions/upload_coverage"
|
- uses: "./.github/actions/upload_coverage"
|
||||||
if: always()
|
if: always()
|
||||||
with:
|
with:
|
||||||
flags: ${{ env.pg_major }}_upgrade
|
flags: ${{ env.PG_MAJOR }}_citus_upgrade
|
||||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
upload-coverage:
|
upload-coverage:
|
||||||
if: always()
|
if: always()
|
||||||
|
@ -399,10 +405,11 @@ jobs:
|
||||||
- test-citus-upgrade
|
- test-citus-upgrade
|
||||||
- test-pg-upgrade
|
- test-pg-upgrade
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/download-artifact@v3.0.1
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: "codeclimate"
|
pattern: codeclimate*
|
||||||
path: "codeclimate"
|
path: codeclimate
|
||||||
|
merge-multiple: true
|
||||||
- name: Upload coverage results to Code Climate
|
- name: Upload coverage results to Code Climate
|
||||||
run: |-
|
run: |-
|
||||||
cc-test-reporter sum-coverage codeclimate/*.json -o total.json
|
cc-test-reporter sum-coverage codeclimate/*.json -o total.json
|
||||||
|
@ -516,6 +523,7 @@ jobs:
|
||||||
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
|
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/download-artifact@v4.1.8
|
||||||
- uses: "./.github/actions/setup_extension"
|
- uses: "./.github/actions/setup_extension"
|
||||||
- name: Run minimal tests
|
- name: Run minimal tests
|
||||||
run: |-
|
run: |-
|
||||||
|
|
|
@ -34,7 +34,7 @@ jobs:
|
||||||
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||||
./ci/build-citus.sh
|
./ci/build-citus.sh
|
||||||
shell: bash
|
shell: bash
|
||||||
- uses: actions/upload-artifact@v3.1.1
|
- uses: actions/upload-artifact@v4.6.0
|
||||||
with:
|
with:
|
||||||
name: build-${{ env.PG_MAJOR }}
|
name: build-${{ env.PG_MAJOR }}
|
||||||
path: |-
|
path: |-
|
||||||
|
|
|
@ -129,7 +129,7 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set pg_config path and python parameters for deb based distros
|
- name: Set pg_config path and python parameters for deb based distros
|
||||||
run: |
|
run: |
|
||||||
|
|
|
@ -1,3 +1,7 @@
|
||||||
|
### citus v13.0.1 (February 4th, 2025) ###
|
||||||
|
|
||||||
|
* Drops support for PostgreSQL 14 (#7753)
|
||||||
|
|
||||||
### citus v13.0.0 (January 17, 2025) ###
|
### citus v13.0.0 (January 17, 2025) ###
|
||||||
|
|
||||||
* Adds support for PostgreSQL 17 (#7699, #7661)
|
* Adds support for PostgreSQL 17 (#7699, #7661)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
| **<br/>The Citus database is 100% open source.<br/><img width=1000/><br/>Learn what's new in the [Citus 12.1 release blog](https://www.citusdata.com/blog/2023/09/22/adding-postgres-16-support-to-citus-12-1/) and the [Citus Updates page](https://www.citusdata.com/updates/).<br/><br/>**|
|
| **<br/>The Citus database is 100% open source.<br/><img width=1000/><br/>Learn what's new in the [Citus 13.0 release blog](https://www.citusdata.com/blog/2025/02/06/distribute-postgresql-17-with-citus-13/) and the [Citus Updates page](https://www.citusdata.com/updates/).<br/><br/>**|
|
||||||
|---|
|
|---|
|
||||||
<br/>
|
<br/>
|
||||||
|
|
||||||
|
@ -95,14 +95,14 @@ Install packages on Ubuntu / Debian:
|
||||||
```bash
|
```bash
|
||||||
curl https://install.citusdata.com/community/deb.sh > add-citus-repo.sh
|
curl https://install.citusdata.com/community/deb.sh > add-citus-repo.sh
|
||||||
sudo bash add-citus-repo.sh
|
sudo bash add-citus-repo.sh
|
||||||
sudo apt-get -y install postgresql-16-citus-12.1
|
sudo apt-get -y install postgresql-17-citus-13.0
|
||||||
```
|
```
|
||||||
|
|
||||||
Install packages on CentOS / Red Hat:
|
Install packages on Red Hat:
|
||||||
```bash
|
```bash
|
||||||
curl https://install.citusdata.com/community/rpm.sh > add-citus-repo.sh
|
curl https://install.citusdata.com/community/rpm.sh > add-citus-repo.sh
|
||||||
sudo bash add-citus-repo.sh
|
sudo bash add-citus-repo.sh
|
||||||
sudo yum install -y citus121_16
|
sudo yum install -y citus130_17
|
||||||
```
|
```
|
||||||
|
|
||||||
To add Citus to your local PostgreSQL database, add the following to `postgresql.conf`:
|
To add Citus to your local PostgreSQL database, add the following to `postgresql.conf`:
|
||||||
|
|
|
@ -4688,7 +4688,7 @@ void
|
||||||
SendOrCollectCommandListToMetadataNodes(MetadataSyncContext *context, List *commands)
|
SendOrCollectCommandListToMetadataNodes(MetadataSyncContext *context, List *commands)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* do not send any command to workers if we collcet commands.
|
* do not send any command to workers if we collect commands.
|
||||||
* Collect commands into metadataSyncContext's collected command
|
* Collect commands into metadataSyncContext's collected command
|
||||||
* list.
|
* list.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -62,10 +62,16 @@ def run_citus_upgrade_tests(config, before_upgrade_schedule, after_upgrade_sched
|
||||||
|
|
||||||
install_citus(config.post_tar_path)
|
install_citus(config.post_tar_path)
|
||||||
|
|
||||||
|
# disable 2pc recovery for all nodes to work around https://github.com/citusdata/citus/issues/7875
|
||||||
|
disable_2pc_recovery_for_all_nodes(config.bindir, config)
|
||||||
|
|
||||||
restart_databases(config.bindir, config.datadir, config.mixed_mode, config)
|
restart_databases(config.bindir, config.datadir, config.mixed_mode, config)
|
||||||
run_alter_citus(config.bindir, config.mixed_mode, config)
|
run_alter_citus(config.bindir, config.mixed_mode, config)
|
||||||
verify_upgrade(config, config.mixed_mode, config.node_name_to_ports.values())
|
verify_upgrade(config, config.mixed_mode, config.node_name_to_ports.values())
|
||||||
|
|
||||||
|
# re-enable 2pc recovery for all nodes
|
||||||
|
enable_2pc_recovery_for_all_nodes(config.bindir, config)
|
||||||
|
|
||||||
run_test_on_coordinator(config, after_upgrade_schedule)
|
run_test_on_coordinator(config, after_upgrade_schedule)
|
||||||
remove_citus(config.post_tar_path)
|
remove_citus(config.post_tar_path)
|
||||||
|
|
||||||
|
@ -146,6 +152,18 @@ def restart_database(pg_path, abs_data_path, node_name, node_ports, logfile_pref
|
||||||
subprocess.run(command, check=True)
|
subprocess.run(command, check=True)
|
||||||
|
|
||||||
|
|
||||||
|
def disable_2pc_recovery_for_all_nodes(pg_path, config):
|
||||||
|
for port in config.node_name_to_ports.values():
|
||||||
|
utils.psql(pg_path, port, "ALTER SYSTEM SET citus.recover_2pc_interval TO -1;")
|
||||||
|
utils.psql(pg_path, port, "SELECT pg_reload_conf();")
|
||||||
|
|
||||||
|
|
||||||
|
def enable_2pc_recovery_for_all_nodes(pg_path, config):
|
||||||
|
for port in config.node_name_to_ports.values():
|
||||||
|
utils.psql(pg_path, port, "ALTER SYSTEM RESET citus.recover_2pc_interval;")
|
||||||
|
utils.psql(pg_path, port, "SELECT pg_reload_conf();")
|
||||||
|
|
||||||
|
|
||||||
def run_alter_citus(pg_path, mixed_mode, config):
|
def run_alter_citus(pg_path, mixed_mode, config):
|
||||||
for port in config.node_name_to_ports.values():
|
for port in config.node_name_to_ports.values():
|
||||||
if mixed_mode and port in (
|
if mixed_mode and port in (
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
BEGIN;
|
||||||
|
SET LOCAL citus.show_shards_for_app_name_prefixes = '';
|
||||||
-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job"
|
-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job"
|
||||||
-- along with their details. This modification includes a fix for a null pointer exception that occurred
|
-- along with their details. This modification includes a fix for a null pointer exception that occurred
|
||||||
-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604.
|
-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604.
|
||||||
|
@ -31,3 +33,4 @@ order by
|
||||||
pg_dist_background_task_depend_job_id_fkey | job_id | pg_dist_background_job | pg_catalog
|
pg_dist_background_task_depend_job_id_fkey | job_id | pg_dist_background_job | pg_catalog
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
|
END;
|
||||||
|
|
|
@ -28,3 +28,12 @@ SELECT * FROM pg_dist_cleanup;
|
||||||
CALL citus_cleanup_orphaned_resources();
|
CALL citus_cleanup_orphaned_resources();
|
||||||
NOTICE: cleaned up 1 orphaned resources
|
NOTICE: cleaned up 1 orphaned resources
|
||||||
DROP TABLE table_with_orphaned_shards;
|
DROP TABLE table_with_orphaned_shards;
|
||||||
|
-- Re-enable automatic shard cleanup by maintenance daemon as
|
||||||
|
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
|
||||||
|
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
pg_reload_conf
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,23 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELE
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- create an orphaned placement based on an existing one
|
-- create an orphaned placement based on an existing one
|
||||||
|
--
|
||||||
|
-- But before doing that, first disable automatic shard cleanup
|
||||||
|
-- by maintenance daemon so that we can reliably test the cleanup
|
||||||
|
-- in upgrade_pg_dist_cleanup_after.sql.
|
||||||
|
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
pg_reload_conf
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT pg_sleep(0.1);
|
||||||
|
pg_sleep
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
|
INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
|
||||||
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
|
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
|
||||||
FROM pg_dist_placement
|
FROM pg_dist_placement
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
BEGIN;
|
||||||
|
SET LOCAL citus.show_shards_for_app_name_prefixes = '';
|
||||||
-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job"
|
-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job"
|
||||||
-- along with their details. This modification includes a fix for a null pointer exception that occurred
|
-- along with their details. This modification includes a fix for a null pointer exception that occurred
|
||||||
-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604.
|
-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604.
|
||||||
|
@ -25,3 +27,4 @@ where
|
||||||
and ns.nspname='pg_catalog'
|
and ns.nspname='pg_catalog'
|
||||||
order by
|
order by
|
||||||
fns.nspname, fc.relname, a.attnum;
|
fns.nspname, fc.relname, a.attnum;
|
||||||
|
END;
|
||||||
|
|
|
@ -13,3 +13,8 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardid IN (SELECT shardid FROM pg_
|
||||||
SELECT * FROM pg_dist_cleanup;
|
SELECT * FROM pg_dist_cleanup;
|
||||||
CALL citus_cleanup_orphaned_resources();
|
CALL citus_cleanup_orphaned_resources();
|
||||||
DROP TABLE table_with_orphaned_shards;
|
DROP TABLE table_with_orphaned_shards;
|
||||||
|
|
||||||
|
-- Re-enable automatic shard cleanup by maintenance daemon as
|
||||||
|
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
|
||||||
|
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
|
|
@ -16,6 +16,16 @@ SELECT create_distributed_table('table_with_orphaned_shards', 'a');
|
||||||
-- show all 32 placements are active
|
-- show all 32 placements are active
|
||||||
SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='table_with_orphaned_shards'::regclass);
|
SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='table_with_orphaned_shards'::regclass);
|
||||||
-- create an orphaned placement based on an existing one
|
-- create an orphaned placement based on an existing one
|
||||||
|
--
|
||||||
|
-- But before doing that, first disable automatic shard cleanup
|
||||||
|
-- by maintenance daemon so that we can reliably test the cleanup
|
||||||
|
-- in upgrade_pg_dist_cleanup_after.sql.
|
||||||
|
|
||||||
|
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
|
||||||
|
SELECT pg_sleep(0.1);
|
||||||
|
|
||||||
INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
|
INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
|
||||||
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
|
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
|
||||||
FROM pg_dist_placement
|
FROM pg_dist_placement
|
||||||
|
|
Loading…
Reference in New Issue