Merge branch 'main' into reassign_owned_prop

reassign_owned_prop_onur
Gürkan İndibay 2023-11-02 11:17:08 +03:00 committed by GitHub
commit aff3f3f4a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 111 additions and 23 deletions

View File

@ -10,6 +10,10 @@ on:
required: false
default: false
type: boolean
push:
branches:
- "main"
- "release-*"
pull_request:
types: [opened, reopened,synchronize]
jobs:
@ -501,7 +505,7 @@ jobs:
for test in "${tests_array[@]}"
do
test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/")
gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line
gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat ${{ env.runs }} --use-whole-schedule-line
done
shell: bash
- uses: "./.github/actions/save_logs_and_results"

View File

@ -71,7 +71,7 @@ jobs:
- uses: "./.github/actions/setup_extension"
- name: Run minimal tests
run: |-
gosu circleci src/test/regress/citus_tests/run_test.py ${{ env.test }} --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line
gosu circleci src/test/regress/citus_tests/run_test.py ${{ env.test }} --repeat ${{ env.runs }} --use-whole-schedule-line
shell: bash
- uses: "./.github/actions/save_logs_and_results"
if: always()

View File

@ -107,6 +107,12 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
(1 row)
SELECT public.wait_for_resource_cleanup();
wait_for_resource_cleanup
---------------------------------------------------------------------
(1 row)
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema";
-- Replication slots should be cleaned up

View File

@ -277,12 +277,12 @@ CONTEXT: while executing command on localhost:xxxxx
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 1 | 1
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx | 2 | 0
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx | 2 | 0
@ -336,7 +336,7 @@ CONTEXT: while executing command on localhost:xxxxx
(1 row)
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
(0 rows)
@ -388,7 +388,7 @@ CONTEXT: while executing command on localhost:xxxxx
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0
@ -455,7 +455,7 @@ CONTEXT: while executing command on localhost:xxxxx
(1 row)
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
(0 rows)
@ -507,7 +507,7 @@ CONTEXT: while executing command on localhost:xxxxx
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0
@ -574,7 +574,7 @@ CONTEXT: while executing command on localhost:xxxxx
(1 row)
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
(0 rows)
@ -634,7 +634,7 @@ WARNING: connection to the remote node localhost:xxxxx failed with the followin
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 1 | 1
@ -701,7 +701,7 @@ CONTEXT: while executing command on localhost:xxxxx
(1 row)
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
(0 rows)

View File

@ -9,9 +9,14 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
RESET client_min_messages;
-- Kill maintenance daemon so it gets restarted and gets a gpid containing our
-- nodeid
SELECT pg_terminate_backend(pid)
SELECT COUNT(pg_terminate_backend(pid)) >= 0
FROM pg_stat_activity
WHERE application_name = 'Citus Maintenance Daemon' \gset
WHERE application_name = 'Citus Maintenance Daemon';
?column?
---------------------------------------------------------------------
t
(1 row)
-- reconnect to make sure we get a session with the gpid containing our nodeid
\c - - - -
CREATE SCHEMA global_cancel;
@ -77,6 +82,7 @@ ERROR: must be a superuser to terminate superuser process
SELECT pg_cancel_backend(citus_backend_gpid());
ERROR: canceling statement due to user request
\c - postgres - :master_port
DROP USER global_cancel_user;
SET client_min_messages TO DEBUG;
-- 10000000000 is the node id multiplier for global pid
SELECT pg_cancel_backend(10000000000 * citus_coordinator_nodeid() + 0);

View File

@ -0,0 +1,68 @@
Parsed test spec with 2 sessions
starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin: BEGIN;
step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
step s2-begin: BEGIN;
step s2-update-node-1:
-- update a specific node by address
SELECT master_update_node(nodeid, 'localhost', nodeport + 10)
FROM pg_dist_node
WHERE nodename = 'localhost'
AND nodeport = 57637;
<waiting ...>
step s1-abort: ABORT;
step s2-update-node-1: <... completed>
master_update_node
---------------------------------------------------------------------
(1 row)
step s2-abort: ABORT;
master_remove_node
---------------------------------------------------------------------
(2 rows)
starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin: BEGIN;
step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
step s2-begin: BEGIN;
step s2-update-node-1-force:
-- update a specific node by address (force)
SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100)
FROM pg_dist_node
WHERE nodename = 'localhost'
AND nodeport = 57637;
<waiting ...>
step s2-update-node-1-force: <... completed>
master_update_node
---------------------------------------------------------------------
(1 row)
step s2-abort: ABORT;
step s1-abort: ABORT;
FATAL: terminating connection due to administrator command
FATAL: terminating connection due to administrator command
SSL connection has been closed unexpectedly
server closed the connection unexpectedly
master_remove_node
---------------------------------------------------------------------
(2 rows)

View File

@ -79,6 +79,8 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
ARRAY[:worker_2_node, :worker_2_node, :worker_2_node],
'force_logical');
SELECT public.wait_for_resource_cleanup();
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema";
-- Replication slots should be cleaned up

View File

@ -136,7 +136,7 @@ SELECT create_distributed_table('table_to_split', 'id');
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
-- we need to allow connection so that we can connect to proxy
SELECT citus.mitmproxy('conn.allow()');
@ -155,7 +155,7 @@ SELECT create_distributed_table('table_to_split', 'id');
\c - postgres - :master_port
SELECT public.wait_for_resource_cleanup();
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
\c - - - :worker_2_proxy_port
SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog;
@ -182,7 +182,7 @@ SELECT create_distributed_table('table_to_split', 'id');
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
-- we need to allow connection so that we can connect to proxy
SELECT citus.mitmproxy('conn.allow()');
@ -201,7 +201,7 @@ SELECT create_distributed_table('table_to_split', 'id');
\c - postgres - :master_port
SELECT public.wait_for_resource_cleanup();
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
\c - - - :worker_2_proxy_port
SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog;
@ -228,7 +228,7 @@ SELECT create_distributed_table('table_to_split', 'id');
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
-- we need to allow connection so that we can connect to proxy
SELECT citus.mitmproxy('conn.allow()');
@ -247,7 +247,7 @@ SELECT create_distributed_table('table_to_split', 'id');
\c - postgres - :master_port
SELECT public.wait_for_resource_cleanup();
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
\c - - - :worker_2_proxy_port
SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog;
@ -275,7 +275,7 @@ SELECT create_distributed_table('table_to_split', 'id');
'force_logical');
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' order by relname;
-- we need to allow connection so that we can connect to proxy
SELECT citus.mitmproxy('conn.allow()');
@ -295,7 +295,7 @@ SELECT create_distributed_table('table_to_split', 'id');
\c - postgres - :master_port
SELECT public.wait_for_resource_cleanup();
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id;
\c - - - :worker_2_proxy_port
SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog;

View File

@ -5,9 +5,9 @@ RESET client_min_messages;
-- Kill maintenance daemon so it gets restarted and gets a gpid containing our
-- nodeid
SELECT pg_terminate_backend(pid)
SELECT COUNT(pg_terminate_backend(pid)) >= 0
FROM pg_stat_activity
WHERE application_name = 'Citus Maintenance Daemon' \gset
WHERE application_name = 'Citus Maintenance Daemon';
-- reconnect to make sure we get a session with the gpid containing our nodeid
\c - - - -
@ -58,6 +58,8 @@ SELECT pg_cancel_backend(citus_backend_gpid());
\c - postgres - :master_port
DROP USER global_cancel_user;
SET client_min_messages TO DEBUG;
-- 10000000000 is the node id multiplier for global pid