From 8c5c0dd74c0e7b68523bc537ae06f8d4f50f34e4 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Fri, 3 Jan 2020 11:40:50 +0100 Subject: [PATCH] Normalize tests: s/localhost:[0-9]+/localhost:xxxxx/g --- src/test/regress/bin/normalize.sed | 142 ++++++------ src/test/regress/expected/add_coordinator.out | 2 +- .../regress/expected/aggregate_support.out | 2 +- .../expected/disable_object_propagation.out | 4 +- .../expected/failure_1pc_copy_append.out | 30 +-- .../expected/failure_1pc_copy_hash.out | 38 ++-- .../expected/failure_add_disable_node.out | 26 +-- .../failure_connection_establishment.out | 6 +- .../regress/expected/failure_copy_on_hash.out | 22 +- .../expected/failure_copy_to_reference.out | 16 +- ...ure_create_distributed_table_non_empty.out | 34 +-- .../failure_create_reference_table.out | 16 +- .../regress/expected/failure_create_table.out | 28 +-- .../regress/expected/failure_cte_subquery.out | 12 +- src/test/regress/expected/failure_ddl.out | 44 ++-- .../failure_insert_select_pushdown.out | 4 +- .../failure_insert_select_via_coordinator.out | 12 +- .../regress/expected/failure_multi_dml.out | 14 +- .../expected/failure_multi_row_insert.out | 10 +- .../failure_multi_shard_update_delete.out | 34 +-- .../expected/failure_mx_metadata_sync.out | 10 +- .../regress/expected/failure_ref_tables.out | 6 +- .../regress/expected/failure_savepoints.out | 48 ++-- .../regress/expected/failure_single_mod.out | 6 +- .../expected/failure_single_select.out | 14 +- .../regress/expected/failure_truncate.out | 40 ++-- src/test/regress/expected/failure_vacuum.out | 6 +- .../regress/expected/failure_vacuum_1.out | 4 +- .../foreign_key_restriction_enforcement.out | 2 +- .../foreign_key_to_reference_table.out | 10 +- .../expected/intermediate_result_pruning.out | 210 +++++++++--------- .../regress/expected/intermediate_results.out | 2 +- .../expected/isolation_add_remove_node.out | 2 +- .../expected/isolation_shouldhaveshards.out | 2 +- .../regress/expected/multi_703_upgrade.out | 2 +- .../multi_alter_table_add_constraints.out | 30 +-- .../regress/expected/multi_citus_tools.out | 4 +- .../expected/multi_cluster_management.out | 4 +- .../multi_create_table_constraints.out | 22 +- .../expected/multi_distribution_metadata.out | 6 +- .../regress/expected/multi_foreign_key.out | 36 +-- .../expected/multi_insert_select_conflict.out | 2 +- .../regress/expected/multi_metadata_sync.out | 22 +- .../expected/multi_modifying_xacts.out | 40 ++-- src/test/regress/expected/multi_multiuser.out | 10 +- .../multi_mx_function_call_delegation.out | 4 +- .../expected/multi_mx_modifying_xacts.out | 8 +- .../expected/multi_mx_node_metadata.out | 16 +- .../regress/expected/multi_partitioning.out | 16 +- .../multi_remove_node_reference_table.out | 22 +- .../multi_repartition_join_planning.out | 102 ++++----- ...multi_repartition_join_task_assignment.out | 38 ++-- .../multi_replicate_reference_table.out | 18 +- .../regress/expected/multi_router_planner.out | 4 +- .../expected/multi_shard_update_delete.out | 2 +- .../regress/expected/multi_sql_function.out | 2 +- .../expected/multi_task_assignment_policy.out | 24 +- .../multi_transactional_drop_shards.out | 2 +- src/test/regress/expected/pg12.out | 2 +- .../expected/propagate_extension_commands.out | 4 +- .../set_operation_and_local_tables.out | 2 +- src/test/regress/expected/sql_procedure.out | 4 +- src/test/regress/expected/with_basics.out | 2 +- 63 files changed, 654 insertions(+), 654 deletions(-) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index cfa68cf6a..e091ceeb8 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -8,74 +8,74 @@ # In all tests, normalize worker ports, placement ids, and shard ids s/localhost:[0-9]+/localhost:xxxxx/g -s/ port=[0-9]+ / port=xxxxx /g -s/placement [0-9]+/placement xxxxx/g -s/shard [0-9]+/shard xxxxx/g -s/assigned task [0-9]+ to node/assigned task to node/ -s/node group [12] (but|does)/node group \1/ - -# Differing names can have differing table column widths -s/(-+\|)+-+/---/g -s/.*-------------.*/---------------------------------------------------------------------/g - -# In foreign_key_to_reference_table, normalize shard table names, etc in -# the generated plan -s/"(foreign_key_2_|fkey_ref_to_dist_|fkey_ref_)[0-9]+"/"\1xxxxxxx"/g -s/"(referenced_table_|referencing_table_|referencing_table2_)[0-9]+"/"\1xxxxxxx"/g -s/"(referencing_table_0_|referenced_table2_)[0-9]+"/"\1xxxxxxx"/g -s/\(id\)=\([0-9]+\)/(id)=(X)/g -s/\(ref_id\)=\([0-9]+\)/(ref_id)=(X)/g - -# shard table names for multi_subtransactions -s/"t2_[0-9]+"/"t2_xxxxxxx"/g - -# shard table names for custom_aggregate_support -s/ daily_uniques_[0-9]+ / daily_uniques_xxxxxxx /g - -# In foreign_key_restriction_enforcement, normalize shard names -s/"(on_update_fkey_table_|fkey_)[0-9]+"/"\1xxxxxxx"/g - -# In multi_insert_select_conflict, normalize shard name and constraints -s/"(target_table_|target_table_|test_ref_table_)[0-9]+"/"\1xxxxxxx"/g -s/\(col_1\)=\([0-9]+\)/(col_1)=(X)/g - -# In multi_name_lengths, normalize shard names -s/name_len_12345678901234567890123456789012345678_fcd8ab6f_[0-9]+/name_len_12345678901234567890123456789012345678_fcd8ab6f_xxxxx/g - -# normalize pkey constraints in multi_insert_select.sql -s/"(raw_events_second_user_id_value_1_key_|agg_events_user_id_value_1_agg_key_)[0-9]+"/"\1xxxxxxx"/g - -# normalize failed task ids -s/ERROR: failed to execute task [0-9]+/ERROR: failed to execute task X/g - -# ignore could not consume warnings -/WARNING: could not consume data from worker node/d - -# ignore WAL warnings -/DEBUG: .+creating and filling new WAL file/d - -# normalize file names for partitioned files -s/(task_[0-9]+\.)[0-9]+/\1xxxx/g -s/(job_[0-9]+\/task_[0-9]+\/p_[0-9]+\.)[0-9]+/\1xxxx/g - -# isolation_ref2ref_foreign_keys -s/"(ref_table_[0-9]_|ref_table_[0-9]_value_fkey_)[0-9]+"/"\1xxxxxxx"/g - -# Line info varies between versions -/^LINE [0-9]+:.*$/d -/^ *\^$/d - -# Remove trailing whitespace -s/ *$//g - -# pg12 changes -s/Partitioned table "/Table "/g -s/\) TABLESPACE pg_default$/\)/g -s/invalid input syntax for type /invalid input syntax for /g -s/_id_ref_id_fkey/_id_fkey/g -s/_ref_id_id_fkey_/_ref_id_fkey_/g -s/fk_test_2_col1_col2_fkey/fk_test_2_col1_fkey/g -s/_id_other_column_ref_fkey/_id_fkey/g - -# intermediate_results -s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g +#s/ port=[0-9]+ / port=xxxxx /g +#s/placement [0-9]+/placement xxxxx/g +#s/shard [0-9]+/shard xxxxx/g +#s/assigned task [0-9]+ to node/assigned task to node/ +#s/node group [12] (but|does)/node group \1/ +# +## Differing names can have differing table column widths +#s/(-+\|)+-+/---/g +#s/.*-------------.*/---------------------------------------------------------------------/g +# +## In foreign_key_to_reference_table, normalize shard table names, etc in +## the generated plan +#s/"(foreign_key_2_|fkey_ref_to_dist_|fkey_ref_)[0-9]+"/"\1xxxxxxx"/g +#s/"(referenced_table_|referencing_table_|referencing_table2_)[0-9]+"/"\1xxxxxxx"/g +#s/"(referencing_table_0_|referenced_table2_)[0-9]+"/"\1xxxxxxx"/g +#s/\(id\)=\([0-9]+\)/(id)=(X)/g +#s/\(ref_id\)=\([0-9]+\)/(ref_id)=(X)/g +# +## shard table names for multi_subtransactions +#s/"t2_[0-9]+"/"t2_xxxxxxx"/g +# +## shard table names for custom_aggregate_support +#s/ daily_uniques_[0-9]+ / daily_uniques_xxxxxxx /g +# +## In foreign_key_restriction_enforcement, normalize shard names +#s/"(on_update_fkey_table_|fkey_)[0-9]+"/"\1xxxxxxx"/g +# +## In multi_insert_select_conflict, normalize shard name and constraints +#s/"(target_table_|target_table_|test_ref_table_)[0-9]+"/"\1xxxxxxx"/g +#s/\(col_1\)=\([0-9]+\)/(col_1)=(X)/g +# +## In multi_name_lengths, normalize shard names +#s/name_len_12345678901234567890123456789012345678_fcd8ab6f_[0-9]+/name_len_12345678901234567890123456789012345678_fcd8ab6f_xxxxx/g +# +## normalize pkey constraints in multi_insert_select.sql +#s/"(raw_events_second_user_id_value_1_key_|agg_events_user_id_value_1_agg_key_)[0-9]+"/"\1xxxxxxx"/g +# +## normalize failed task ids +#s/ERROR: failed to execute task [0-9]+/ERROR: failed to execute task X/g +# +## ignore could not consume warnings +#/WARNING: could not consume data from worker node/d +# +## ignore WAL warnings +#/DEBUG: .+creating and filling new WAL file/d +# +## normalize file names for partitioned files +#s/(task_[0-9]+\.)[0-9]+/\1xxxx/g +#s/(job_[0-9]+\/task_[0-9]+\/p_[0-9]+\.)[0-9]+/\1xxxx/g +# +## isolation_ref2ref_foreign_keys +#s/"(ref_table_[0-9]_|ref_table_[0-9]_value_fkey_)[0-9]+"/"\1xxxxxxx"/g +# +## Line info varies between versions +#/^LINE [0-9]+:.*$/d +#/^ *\^$/d +# +## Remove trailing whitespace +#s/ *$//g +# +## pg12 changes +#s/Partitioned table "/Table "/g +#s/\) TABLESPACE pg_default$/\)/g +#s/invalid input syntax for type /invalid input syntax for /g +#s/_id_ref_id_fkey/_id_fkey/g +#s/_ref_id_id_fkey_/_ref_id_fkey_/g +#s/fk_test_2_col1_col2_fkey/fk_test_2_col1_fkey/g +#s/_id_other_column_ref_fkey/_id_fkey/g +# +## intermediate_results +#s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g diff --git a/src/test/regress/expected/add_coordinator.out b/src/test/regress/expected/add_coordinator.out index 41d89db06..8681c8b8a 100644 --- a/src/test/regress/expected/add_coordinator.out +++ b/src/test/regress/expected/add_coordinator.out @@ -14,7 +14,7 @@ SELECT master_add_node('localhost', 12345, groupid => 0) = :master_nodeid; ERROR: group 0 already has a primary node -- start_metadata_sync_to_node() for coordinator should raise a notice SELECT start_metadata_sync_to_node('localhost', :master_port); -NOTICE: localhost:57636 is the coordinator and already contains metadata, skipping syncing the metadata +NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata start_metadata_sync_to_node ----------------------------- diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index e5b8eeec9..c69950335 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -187,7 +187,7 @@ create aggregate sumstring(text) ( ); select sumstring(valf::text) from aggdata where valf is not null; ERROR: function "aggregate_support.sumstring(text)" does not exist -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx select create_distributed_function('sumstring(text)'); create_distributed_function ----------------------------- diff --git a/src/test/regress/expected/disable_object_propagation.out b/src/test/regress/expected/disable_object_propagation.out index 08186ca11..224666408 100644 --- a/src/test/regress/expected/disable_object_propagation.out +++ b/src/test/regress/expected/disable_object_propagation.out @@ -19,7 +19,7 @@ CREATE TYPE tt1 AS (a int , b int); CREATE TABLE t2 (a int PRIMARY KEY, b tt1); SELECT create_distributed_table('t2', 'a'); ERROR: type "disabled_object_propagation.tt1" does not exist -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx SELECT 1 FROM run_command_on_workers($$ BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; @@ -43,7 +43,7 @@ CREATE TYPE tt2 AS ENUM ('a', 'b'); CREATE TABLE t3 (a int PRIMARY KEY, b tt2); SELECT create_distributed_table('t3', 'a'); ERROR: type "disabled_object_propagation.tt2" does not exist -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx SELECT 1 FROM run_command_on_workers($$ BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; diff --git a/src/test/regress/expected/failure_1pc_copy_append.out b/src/test/regress/expected/failure_1pc_copy_append.out index 44d80cc52..7a30efdfe 100644 --- a/src/test/regress/expected/failure_1pc_copy_append.out +++ b/src/test/regress/expected/failure_1pc_copy_append.out @@ -74,7 +74,7 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; @@ -99,8 +99,8 @@ SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id"). COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -ERROR: failure on connection marked as essential: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; @@ -127,7 +127,7 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; @@ -151,7 +151,7 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()'); (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard 100404 on localhost:9060 +ERROR: failed to COPY to shard 100404 on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; @@ -168,7 +168,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()'); (1 row) SELECT count(1) FROM copy_test; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -208,10 +208,10 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -ERROR: failure on connection marked as essential: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; @@ -238,10 +238,10 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -ERROR: failure on connection marked as essential: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; @@ -266,10 +266,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; diff --git a/src/test/regress/expected/failure_1pc_copy_hash.out b/src/test/regress/expected/failure_1pc_copy_hash.out index 0e4d97ec2..b03c9043b 100644 --- a/src/test/regress/expected/failure_1pc_copy_hash.out +++ b/src/test/regress/expected/failure_1pc_copy_hash.out @@ -61,9 +61,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction").kil COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY copy_test, line 1: "0, 0" -ERROR: failure on connection marked as essential: localhost:9060 +ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY copy_test, line 1: "0, 0" -- ==== kill the connection when we try to start the COPY ==== -- the query should abort @@ -77,7 +77,7 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY copy_test, line 1: "0, 0" -- ==== kill the connection when we first start sending data ==== -- the query should abort @@ -88,7 +88,7 @@ SELECT citus.mitmproxy('conn.onCopyData().killall()'); -- raw rows from the clie (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard 100400 on localhost:9060 +ERROR: failed to COPY to shard 100400 on localhost:xxxxx -- ==== kill the connection when the worker confirms it's received the data ==== -- the query should abort SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").killall()'); @@ -98,7 +98,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").killall()'); (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard 100400 on localhost:9060 +ERROR: failed to COPY to shard 100400 on localhost:xxxxx -- ==== kill the connection when we try to send COMMIT ==== -- the query should succeed, and the placement should be marked inactive SELECT citus.mitmproxy('conn.allow()'); @@ -129,10 +129,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()'); COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- the shard is marked invalid SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -219,7 +219,7 @@ SELECT citus.mitmproxy('conn.killall()'); (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -250,9 +250,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id"). COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY copy_test, line 1: "0, 0" -ERROR: failure on connection marked as essential: localhost:9060 +ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -280,7 +280,7 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -305,7 +305,7 @@ SELECT citus.mitmproxy('conn.onCopyData().killall()'); (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard 100400 on localhost:9060 +ERROR: failed to COPY to shard 100400 on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -330,10 +330,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()'); COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: could not commit transaction for shard 100400 on any active node ERROR: could not commit transaction on any active node SELECT citus.mitmproxy('conn.allow()'); @@ -370,10 +370,10 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT").killall()'); COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: could not commit transaction for shard 100400 on any active node ERROR: could not commit transaction on any active node SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/expected/failure_add_disable_node.out b/src/test/regress/expected/failure_add_disable_node.out index d64542f6e..c19df311c 100644 --- a/src/test/regress/expected/failure_add_disable_node.out +++ b/src/test/regress/expected/failure_add_disable_node.out @@ -53,7 +53,7 @@ ORDER BY placementid; (2 rows) SELECT master_disable_node('localhost', :worker_2_proxy_port); -NOTICE: Node localhost:9060 has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back. +NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back. master_disable_node --------------------- @@ -83,11 +83,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT master_activate_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:9060 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -122,7 +122,7 @@ SELECT master_activate_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; @@ -148,7 +148,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backen (1 row) SELECT master_activate_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:9060 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx ERROR: canceling statement due to user request -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() @@ -234,11 +234,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:9060 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; @@ -263,7 +263,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backen (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:9060 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx ERROR: canceling statement due to user request -- verify node is not added SELECT * FROM master_get_active_worker_nodes() @@ -290,7 +290,7 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:9060 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx master_add_node ----------------- 6 @@ -329,9 +329,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); (1 row) SELECT master_add_node('localhost', :worker_1_port); -NOTICE: Replicating reference table "user_table" to the node localhost:57637 -ERROR: could not copy table "user_table_200000" from "localhost:9060" -CONTEXT: while executing command on localhost:57637 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx +ERROR: could not copy table "user_table_200000" from "localhost:xxxxx" +CONTEXT: while executing command on localhost:xxxxx -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; @@ -347,7 +347,7 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) SELECT master_add_node('localhost', :worker_1_port); -NOTICE: Replicating reference table "user_table" to the node localhost:57637 +NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx master_add_node ----------------- 8 diff --git a/src/test/regress/expected/failure_connection_establishment.out b/src/test/regress/expected/failure_connection_establishment.out index bbb430e22..75d6d13c2 100644 --- a/src/test/regress/expected/failure_connection_establishment.out +++ b/src/test/regress/expected/failure_connection_establishment.out @@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.delay(500)'); (1 row) ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); -ERROR: could not establish any connections to the node localhost:9060 after 400 ms +ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -169,7 +169,7 @@ SELECT citus.mitmproxy('conn.delay(500)'); (1 row) SELECT count(*) FROM single_replicatated; -ERROR: could not establish any connections to the node localhost:9060 after 400 ms +ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms SET citus.force_max_query_parallelization TO OFF; -- one similar test, but this time on modification queries -- to see that connection establishement failures could @@ -224,7 +224,7 @@ RESET client_min_messages; -- verify get_global_active_transactions works when a timeout happens on a connection SELECT get_global_active_transactions(); WARNING: could not establish connection after 400 ms -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx get_global_active_transactions -------------------------------- (0 rows) diff --git a/src/test/regress/expected/failure_copy_on_hash.out b/src/test/regress/expected/failure_copy_on_hash.out index 9f413641b..827ea3547 100644 --- a/src/test/regress/expected/failure_copy_on_hash.out +++ b/src/test/regress/expected/failure_copy_on_hash.out @@ -36,7 +36,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \COPY test_table FROM stdin delimiter ','; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -69,7 +69,7 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()'); (1 row) \COPY test_table FROM stdin delimiter ','; -ERROR: failed to COPY to shard 1710000 on localhost:9060 +ERROR: failed to COPY to shard 1710000 on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -124,7 +124,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").kill()'); (1 row) \COPY test_table FROM stdin delimiter ','; -ERROR: failed to COPY to shard 1710002 on localhost:9060 +ERROR: failed to COPY to shard 1710002 on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -179,7 +179,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); \COPY test_table FROM stdin delimiter ','; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -239,7 +239,7 @@ BEGIN; \COPY test_table FROM stdin delimiter ','; ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -276,22 +276,22 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \COPY test_table_2 FROM stdin delimiter ','; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: COPY test_table_2, line 1: "1,2" -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: COPY test_table_2, line 2: "3,4" -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: COPY test_table_2, line 3: "6,7" -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -341,7 +341,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY test_table_2, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -386,7 +386,7 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()'); (1 row) \COPY test_table_2 FROM stdin delimiter ','; -ERROR: failed to COPY to shard 1710012 on localhost:9060 +ERROR: failed to COPY to shard 1710012 on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- diff --git a/src/test/regress/expected/failure_copy_to_reference.out b/src/test/regress/expected/failure_copy_to_reference.out index 77f854912..ce65f33a5 100644 --- a/src/test/regress/expected/failure_copy_to_reference.out +++ b/src/test/regress/expected/failure_copy_to_reference.out @@ -35,7 +35,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: failure on connection marked as essential: localhost:9060 +ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -63,7 +63,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: failure on connection marked as essential: localhost:9060 +ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -122,7 +122,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -178,7 +178,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").kill()'); (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: failed to COPY to shard 130000 on localhost:9060 +ERROR: failed to COPY to shard 130000 on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -236,7 +236,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -265,7 +265,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); \copy test_table FROM STDIN DELIMITER ',' ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -396,7 +396,7 @@ SET LOCAL client_min_messages TO WARNING; \copy test_table FROM STDIN DELIMITER ',' ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -429,7 +429,7 @@ SET LOCAL client_min_messages TO WARNING; \copy test_table FROM STDIN DELIMITER ',' ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index 8b5acb091..94a60fdaa 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -26,7 +26,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -62,7 +62,7 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count ------- @@ -125,7 +125,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -190,7 +190,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -211,7 +211,7 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count ------- @@ -227,7 +227,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()'); SELECT create_distributed_table('test_table', 'id'); NOTICE: Copying data from local table... -ERROR: failed to COPY to shard 11000016 on localhost:9060 +ERROR: failed to COPY to shard 11000016 on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count ------- @@ -278,7 +278,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); SELECT create_distributed_table('test_table', 'id'); ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count ------- @@ -467,7 +467,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -514,7 +514,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_ ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count ------- @@ -555,7 +555,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_comma (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -593,7 +593,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -661,7 +661,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -726,7 +726,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -747,7 +747,7 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count ------- @@ -762,7 +762,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: failed to COPY to shard 11000096 on localhost:9060 +ERROR: failed to COPY to shard 11000096 on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count ------- @@ -940,7 +940,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -980,7 +980,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_ ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count ------- diff --git a/src/test/regress/expected/failure_create_reference_table.out b/src/test/regress/expected/failure_create_reference_table.out index 9cd6b4a97..e46e5cca4 100644 --- a/src/test/regress/expected/failure_create_reference_table.out +++ b/src/test/regress/expected/failure_create_reference_table.out @@ -25,7 +25,7 @@ SELECT citus.mitmproxy('conn.onQuery().kill()'); (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -43,7 +43,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").kill()'); (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -76,7 +76,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").kill()'); (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -110,7 +110,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").kill()'); SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... -ERROR: failed to COPY to shard 10000005 on localhost:9060 +ERROR: failed to COPY to shard 10000005 on localhost:xxxxx SELECT count(*) FROM pg_dist_shard_placement; count ------- @@ -145,7 +145,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki SELECT create_reference_table('ref_table'); ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard_placement; count ------- @@ -202,8 +202,8 @@ SELECT create_reference_table('ref_table'); WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 -ERROR: failure on connection marked as essential: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx COMMIT; -- kill on ROLLBACK, should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); @@ -222,7 +222,7 @@ NOTICE: Copying data from local table... ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid ---------+------------+-------------+----------+----------+------------- diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index 9a74ae556..7fc3c1b2e 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -20,7 +20,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -57,7 +57,7 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -89,7 +89,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -120,7 +120,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_comman (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -154,7 +154,7 @@ BEGIN; (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -225,7 +225,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -284,7 +284,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -347,7 +347,7 @@ SELECT citus.mitmproxy('conn.kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -384,7 +384,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -463,7 +463,7 @@ SELECT citus.mitmproxy('conn.kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -496,7 +496,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -533,7 +533,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -615,7 +615,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT master_create_worker_shards('test_table_2', 4, 2); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -653,7 +653,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").k SELECT master_create_worker_shards('test_table_2', 4, 2); ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- diff --git a/src/test/regress/expected/failure_cte_subquery.out b/src/test/regress/expected/failure_cte_subquery.out index 9bf9e2531..e59d1c3d7 100644 --- a/src/test/regress/expected/failure_cte_subquery.out +++ b/src/test/regress/expected/failure_cte_subquery.out @@ -52,7 +52,7 @@ FROM ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- kill at the second copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM cte_failure.events_table_16000002").kill()'); mitmproxy @@ -83,7 +83,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -117,7 +117,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -257,7 +257,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -290,7 +290,7 @@ INSERT INTO users_table SELECT * FROM cte_delete; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -373,7 +373,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_ddl.out b/src/test/regress/expected/failure_ddl.out index dd63b47a1..bf8d303c9 100644 --- a/src/test/regress/expected/failure_ddl.out +++ b/src/test/regress/expected/failure_ddl.out @@ -37,7 +37,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -71,7 +71,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -104,7 +104,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -223,15 +223,15 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); ALTER TABLE test_table ADD COLUMN new_column INT; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: could not commit transaction for shard 100802 on any active node WARNING: could not commit transaction for shard 100800 on any active node SELECT citus.mitmproxy('conn.allow()'); @@ -289,9 +289,9 @@ SET LOCAL client_min_messages TO WARNING; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- now cancel just after the worker sends response to -- but Postgres doesn't accepts interrupts during COMMIT and ROLLBACK -- so should not cancel at all, so not an effective test but adding in @@ -349,7 +349,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -383,7 +383,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -416,7 +416,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -450,7 +450,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki ALTER TABLE test_table DROP COLUMN new_column; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -720,7 +720,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -754,7 +754,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -787,7 +787,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -821,7 +821,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -1048,7 +1048,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -1081,7 +1081,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -1093,7 +1093,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").aft (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_insert_select_pushdown.out b/src/test/regress/expected/failure_insert_select_pushdown.out index a27290627..3dac1df2e 100644 --- a/src/test/regress/expected/failure_insert_select_pushdown.out +++ b/src/test/regress/expected/failure_insert_select_pushdown.out @@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown" (1 row) INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -98,7 +98,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown" (1 row) INSERT INTO events_table SELECT * FROM events_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_insert_select_via_coordinator.out b/src/test/regress/expected/failure_insert_select_via_coordinator.out index 735194327..ab90cfe40 100644 --- a/src/test/regress/expected/failure_insert_select_via_coordinator.out +++ b/src/test/regress/expected/failure_insert_select_via_coordinator.out @@ -56,7 +56,7 @@ INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_tab ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); mitmproxy @@ -68,7 +68,7 @@ INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_tab ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy @@ -112,7 +112,7 @@ INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); mitmproxy @@ -124,7 +124,7 @@ INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy @@ -170,7 +170,7 @@ INSERT INTO events_reference_distributed SELECT * FROM events_reference; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); mitmproxy @@ -182,7 +182,7 @@ INSERT INTO events_reference_distributed SELECT * FROM events_reference; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_multi_dml.out b/src/test/regress/expected/failure_multi_dml.out index 0552e8e80..85609126d 100644 --- a/src/test/regress/expected/failure_multi_dml.out +++ b/src/test/regress/expected/failure_multi_dml.out @@ -33,7 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").kill()'); BEGIN; DELETE FROM dml_test WHERE id = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -96,7 +96,7 @@ BEGIN; DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -154,7 +154,7 @@ DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); UPDATE dml_test SET name = 'alpha' WHERE id = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -392,10 +392,10 @@ UPDATE dml_test SET name = 'alpha' WHERE id = 1; UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx --- should see all changes, but they only went to one placement (other is unhealthy) SELECT * FROM dml_test ORDER BY id ASC; id | name @@ -444,7 +444,7 @@ UPDATE dml_test SET name = 'alpha' WHERE id = 1; UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx --- shouldn't see any changes after failed COMMIT SELECT * FROM dml_test ORDER BY id ASC; id | name diff --git a/src/test/regress/expected/failure_multi_row_insert.out b/src/test/regress/expected/failure_multi_row_insert.out index 07f637102..2eac35bfe 100644 --- a/src/test/regress/expected/failure_multi_row_insert.out +++ b/src/test/regress/expected/failure_multi_row_insert.out @@ -43,7 +43,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -58,7 +58,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,7), (5,8); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -73,7 +73,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,11), (6,12); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -93,7 +93,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()'); (1 row) INSERT INTO distributed_table VALUES (1,15), (6,16); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -113,7 +113,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (2,19),(1,20); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_multi_shard_update_delete.out b/src/test/regress/expected/failure_multi_shard_update_delete.out index ff160802e..9cf343283 100644 --- a/src/test/regress/expected/failure_multi_shard_update_delete.out +++ b/src/test/regress/expected/failure_multi_shard_update_delete.out @@ -63,7 +63,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -82,7 +82,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005"). (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -145,7 +145,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -164,7 +164,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill( (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -221,7 +221,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -240,7 +240,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005"). (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -303,7 +303,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -322,7 +322,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill( (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -396,7 +396,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); (1 row) DELETE FROM r1 WHERE a = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -414,7 +414,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -465,7 +465,7 @@ RETURNING *; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -496,7 +496,7 @@ UPDATE t3 SET c = q.c FROM ( SELECT b, max(c) as c FROM t2 GROUP BY b) q WHERE t3.b = q.b RETURNING *; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -552,7 +552,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t3_201013").kill( (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -587,7 +587,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -606,7 +606,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO (1 row) UPDATE t3 SET b = 1 WHERE b = 2 RETURNING *; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -626,7 +626,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -661,7 +661,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index c47fbe278..3e781aaa1 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -48,7 +48,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- Failure to drop all tables in pg_dist_partition SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')'); mitmproxy @@ -68,7 +68,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- Failure to truncate pg_dist_node in the worker SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')'); mitmproxy @@ -88,7 +88,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- Failure to populate pg_dist_node in the worker SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' || :pid || ')'); mitmproxy @@ -108,7 +108,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- Verify that coordinator knows worker does not have valid metadata SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; hasmetadata @@ -147,7 +147,7 @@ SELECT create_distributed_table('t2', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_shard").cancel(' || :pid || ')'); mitmproxy ----------- diff --git a/src/test/regress/expected/failure_ref_tables.out b/src/test/regress/expected/failure_ref_tables.out index 5727cb90c..2bb66d36c 100644 --- a/src/test/regress/expected/failure_ref_tables.out +++ b/src/test/regress/expected/failure_ref_tables.out @@ -33,7 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO ref_table VALUES (5, 6); -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -51,7 +51,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); (1 row) UPDATE ref_table SET key=7 RETURNING value; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -71,7 +71,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); BEGIN; DELETE FROM ref_table WHERE key=5; UPDATE ref_table SET key=value; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_savepoints.out b/src/test/regress/expected/failure_savepoints.out index 02163c4a3..1f410d36c 100644 --- a/src/test/regress/expected/failure_savepoints.out +++ b/src/test/regress/expected/failure_savepoints.out @@ -39,15 +39,15 @@ BEGIN; INSERT INTO artists VALUES (5, 'Asher Lev'); SAVEPOINT s1; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: connection error: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: connection error: localhost:xxxxx DETAIL: connection not open WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx DELETE FROM artists WHERE id=4; ERROR: current transaction is aborted, commands ignored until end of transaction block RELEASE SAVEPOINT s1; @@ -73,17 +73,17 @@ DELETE FROM artists WHERE id=4; RELEASE SAVEPOINT s1; WARNING: AbortSubTransaction while in COMMIT state WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: connection error: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: connection error: localhost:xxxxx DETAIL: connection not open WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: savepoint "savepoint_2" does not exist -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx ROLLBACK; SELECT * FROM artists WHERE id IN (4, 5); id | name @@ -104,9 +104,9 @@ SAVEPOINT s1; DELETE FROM artists WHERE id=4; ROLLBACK TO SAVEPOINT s1; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COMMIT; ERROR: could not make changes to shard 100950 on any node SELECT * FROM artists WHERE id IN (4, 5); @@ -131,15 +131,15 @@ INSERT INTO artists VALUES (5, 'Jacob Kahn'); RELEASE SAVEPOINT s2; WARNING: AbortSubTransaction while in COMMIT state WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: connection error: localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: connection error: localhost:xxxxx DETAIL: connection not open WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COMMIT; SELECT * FROM artists WHERE id IN (4, 5); id | name @@ -162,9 +162,9 @@ SAVEPOINT s2; DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s2; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx COMMIT; ERROR: could not make changes to shard 100950 on any node SELECT * FROM artists WHERE id IN (4, 5); @@ -213,7 +213,7 @@ ROLLBACK TO SAVEPOINT s1; WARNING: connection not open WARNING: connection not open WARNING: connection not open -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx WARNING: connection not open WARNING: connection not open COMMIT; @@ -248,7 +248,7 @@ BEGIN; INSERT INTO researchers VALUES (7, 4, 'Jan Plaza'); SAVEPOINT s1; WARNING: connection not open -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx WARNING: connection not open WARNING: connection not open ERROR: connection not open @@ -290,7 +290,7 @@ WARNING: connection not open WARNING: connection not open RELEASE SAVEPOINT s1; COMMIT; -ERROR: failure on connection marked as essential: localhost:9060 +ERROR: failure on connection marked as essential: localhost:xxxxx -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; id | lab_id | name @@ -321,7 +321,7 @@ ROLLBACK TO s1; RELEASE SAVEPOINT s1; WARNING: AbortSubTransaction while in COMMIT state WARNING: connection not open -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx WARNING: connection not open WARNING: connection not open WARNING: savepoint "savepoint_3" does not exist diff --git a/src/test/regress/expected/failure_single_mod.out b/src/test/regress/expected/failure_single_mod.out index da2cb8b40..cbdb49c2b 100644 --- a/src/test/regress/expected/failure_single_mod.out +++ b/src/test/regress/expected/failure_single_mod.out @@ -27,7 +27,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO mod_test VALUES (2, 6); -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -63,7 +63,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); (1 row) UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -102,7 +102,7 @@ INSERT INTO mod_test VALUES (2, 6); INSERT INTO mod_test VALUES (2, 7); DELETE FROM mod_test WHERE key=2 AND value = '7'; UPDATE mod_test SET value='ok' WHERE key=2; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_single_select.out b/src/test/regress/expected/failure_single_select.out index 2f07f7f5c..3a8553338 100644 --- a/src/test/regress/expected/failure_single_select.out +++ b/src/test/regress/expected/failure_single_select.out @@ -28,7 +28,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -38,7 +38,7 @@ DETAIL: server closed the connection unexpectedly (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -57,7 +57,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -69,7 +69,7 @@ DETAIL: server closed the connection unexpectedly INSERT INTO select_test VALUES (3, 'even more data'); SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -160,7 +160,7 @@ SELECT * FROM select_test WHERE key = 3; INSERT INTO select_test VALUES (3, 'even more data'); SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -187,7 +187,7 @@ SELECT recover_prepared_transactions(); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx -- bug from https://github.com/citusdata/citus/issues/1926 SET citus.max_cached_conns_per_worker TO 0; -- purge cache DROP TABLE select_test; @@ -215,7 +215,7 @@ SELECT * FROM select_test WHERE key = 1; (1 row) SELECT * FROM select_test WHERE key = 1; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_truncate.out b/src/test/regress/expected/failure_truncate.out index 4c86ff171..487f47407 100644 --- a/src/test/regress/expected/failure_truncate.out +++ b/src/test/regress/expected/failure_truncate.out @@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -102,7 +102,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -159,7 +159,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -284,15 +284,15 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); TRUNCATE test_table; WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 -WARNING: failed to commit transaction on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx WARNING: could not commit transaction for shard 120002 on any active node WARNING: could not commit transaction for shard 120000 on any active node SELECT citus.mitmproxy('conn.allow()'); @@ -365,7 +365,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); TRUNCATE reference_table CASCADE; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -433,7 +433,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).kill()'); (1 row) TRUNCATE reference_table CASCADE; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -506,7 +506,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki TRUNCATE reference_table CASCADE; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -577,7 +577,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -635,7 +635,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -692,7 +692,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.tes (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -750,7 +750,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").k TRUNCATE test_table; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- @@ -956,7 +956,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -1014,7 +1014,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -1071,7 +1071,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -1129,7 +1129,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki TRUNCATE test_table; ERROR: connection not open -CONTEXT: while executing command on localhost:9060 +CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ----------- diff --git a/src/test/regress/expected/failure_vacuum.out b/src/test/regress/expected/failure_vacuum.out index 430d21b6d..e7961d4e2 100644 --- a/src/test/regress/expected/failure_vacuum.out +++ b/src/test/regress/expected/failure_vacuum.out @@ -31,7 +31,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); (1 row) VACUUM vacuum_test; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -42,7 +42,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); (1 row) ANALYZE vacuum_test; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -111,7 +111,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); (1 row) VACUUM vacuum_test, other_vacuum_test; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_vacuum_1.out b/src/test/regress/expected/failure_vacuum_1.out index 52cb95d32..5b153efa8 100644 --- a/src/test/regress/expected/failure_vacuum_1.out +++ b/src/test/regress/expected/failure_vacuum_1.out @@ -31,7 +31,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); (1 row) VACUUM vacuum_test; -ERROR: connection error: localhost:9060 +ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -42,7 +42,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); (1 row) ANALYZE vacuum_test; -WARNING: connection error: localhost:9060 +WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/foreign_key_restriction_enforcement.out b/src/test/regress/expected/foreign_key_restriction_enforcement.out index 5efcbd9dd..2769a83a9 100644 --- a/src/test/regress/expected/foreign_key_restriction_enforcement.out +++ b/src/test/regress/expected/foreign_key_restriction_enforcement.out @@ -473,7 +473,7 @@ DETAIL: Reference relation "transitive_reference_table" is modified, which migh UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 1; ERROR: insert or update on table "on_update_fkey_table_2380002" violates foreign key constraint "fkey_2380002" DETAIL: Key (value_1)=(101) is not present in table "reference_table_2380001". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 2; ERROR: current transaction is aborted, commands ignored until end of transaction block UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 3; diff --git a/src/test/regress/expected/foreign_key_to_reference_table.out b/src/test/regress/expected/foreign_key_to_reference_table.out index dd59151c2..eefe93e32 100644 --- a/src/test/regress/expected/foreign_key_to_reference_table.out +++ b/src/test/regress/expected/foreign_key_to_reference_table.out @@ -454,7 +454,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFER INSERT INTO referencing_table VALUES(1, 1); ERROR: insert or update on table "referencing_table_7000141" violates foreign key constraint "fkey_ref_7000141" DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_7000140". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table SELECT x, x from generate_series(1,1000) as f(x); INSERT INTO referencing_table SELECT x, x from generate_series(1,500) as f(x); @@ -463,7 +463,7 @@ INSERT INTO referencing_table SELECT x, x from generate_series(1,500) as f(x); DELETE FROM referenced_table WHERE id > 3; ERROR: update or delete on table "referenced_table_7000140" violates foreign key constraint "fkey_ref_7000143" on table "referencing_table_7000143" DETAIL: Key (id)=(4) is still referenced from table "referencing_table_7000143". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referenced_table WHERE id = 501; -- test cascading truncate @@ -1570,7 +1570,7 @@ INSERT INTO test_table_2 VALUES (4,2147483648); -- should fail since there is a bigint out of integer range > (2^32 - 1) ALTER TABLE test_table_2 ALTER COLUMN value_1 SET DATA TYPE int; ERROR: integer out of range -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count ------- @@ -1816,7 +1816,7 @@ ALTER TABLE referencing_table_4 ADD CONSTRAINT fkey_to_ref FOREIGN KEY (value_1) INSERT INTO referencing_table VALUES (0, 5); ERROR: insert or update on table "referencing_table_4_7000540" violates foreign key constraint "fkey_7000540" DETAIL: Key (id)=(0) is not present in table "referencing_table_0_7000524". -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx -- should succeed on partitioning_test_0 INSERT INTO referencing_table VALUES (0, 1); SELECT * FROM referencing_table; @@ -1829,7 +1829,7 @@ SELECT * FROM referencing_table; INSERT INTO referencing_table VALUES (0, 5); ERROR: insert or update on table "referencing_table_4_7000540" violates foreign key constraint "fkey_to_ref_7000540" DETAIL: Key (value_1)=(5) is not present in table "referenced_table_7000512". -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO referenced_table VALUES(5,5); -- should succeed since both of the foreign constraints are positive INSERT INTO referencing_table VALUES (0, 5); diff --git a/src/test/regress/expected/intermediate_result_pruning.out b/src/test/regress/expected/intermediate_result_pruning.out index f9850c7d8..129b1c449 100644 --- a/src/test/regress/expected/intermediate_result_pruning.out +++ b/src/test/regress/expected/intermediate_result_pruning.out @@ -49,8 +49,8 @@ FROM some_values_1 JOIN table_2 USING (key); DEBUG: generating subplan 5_1 for CTE some_values_1: SELECT key FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Subplan 5_1 will be sent to localhost:57637 -DEBUG: Subplan 5_1 will be sent to localhost:57638 +DEBUG: Subplan 5_1 will be sent to localhost:xxxxx +DEBUG: Subplan 5_1 will be sent to localhost:xxxxx count ------- 2 @@ -67,7 +67,7 @@ FROM some_values_1 JOIN table_2 USING (key) WHERE table_2.key = 1; DEBUG: generating subplan 7_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Subplan 7_1 will be sent to localhost:57637 +DEBUG: Subplan 7_1 will be sent to localhost:xxxxx count ------- 0 @@ -84,8 +84,8 @@ FROM some_values_1 JOIN ref_table USING (key); DEBUG: generating subplan 9_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) -DEBUG: Subplan 9_1 will be sent to localhost:57637 -DEBUG: Subplan 9_1 will be sent to localhost:57638 +DEBUG: Subplan 9_1 will be sent to localhost:xxxxx +DEBUG: Subplan 9_1 will be sent to localhost:xxxxx count ------- 2 @@ -104,8 +104,8 @@ FROM DEBUG: generating subplan 11_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 11_2 for CTE some_values_2: SELECT key, random() AS random FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Subplan 11_1 will be sent to localhost:57638 -DEBUG: Subplan 11_2 will be sent to localhost:57637 +DEBUG: Subplan 11_1 will be sent to localhost:xxxxx +DEBUG: Subplan 11_2 will be sent to localhost:xxxxx count ------- 0 @@ -124,9 +124,9 @@ FROM DEBUG: generating subplan 14_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 14_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) -DEBUG: Subplan 14_1 will be sent to localhost:57637 -DEBUG: Subplan 14_1 will be sent to localhost:57638 -DEBUG: Subplan 14_2 will be sent to localhost:57638 +DEBUG: Subplan 14_1 will be sent to localhost:xxxxx +DEBUG: Subplan 14_1 will be sent to localhost:xxxxx +DEBUG: Subplan 14_2 will be sent to localhost:xxxxx count ------- 1 @@ -146,9 +146,9 @@ FROM DEBUG: generating subplan 17_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 17_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('17_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) -DEBUG: Subplan 17_1 will be sent to localhost:57638 -DEBUG: Subplan 17_1 will be sent to localhost:57637 -DEBUG: Subplan 17_2 will be sent to localhost:57638 +DEBUG: Subplan 17_1 will be sent to localhost:xxxxx +DEBUG: Subplan 17_1 will be sent to localhost:xxxxx +DEBUG: Subplan 17_2 will be sent to localhost:xxxxx count ------- 1 @@ -168,9 +168,9 @@ FROM DEBUG: generating subplan 20_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 20_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('20_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) -DEBUG: Subplan 20_1 will be sent to localhost:57638 -DEBUG: Subplan 20_1 will be sent to localhost:57637 -DEBUG: Subplan 20_2 will be sent to localhost:57638 +DEBUG: Subplan 20_1 will be sent to localhost:xxxxx +DEBUG: Subplan 20_1 will be sent to localhost:xxxxx +DEBUG: Subplan 20_2 will be sent to localhost:xxxxx count ------- 0 @@ -191,8 +191,8 @@ FROM DEBUG: generating subplan 23_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 23_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Subplan 23_1 will be sent to localhost:57637 -DEBUG: Subplan 23_2 will be sent to localhost:57637 +DEBUG: Subplan 23_1 will be sent to localhost:xxxxx +DEBUG: Subplan 23_2 will be sent to localhost:xxxxx count ------- 0 @@ -210,10 +210,10 @@ FROM DEBUG: generating subplan 26_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 26_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.<>) 3) -DEBUG: Subplan 26_1 will be sent to localhost:57637 -DEBUG: Subplan 26_1 will be sent to localhost:57638 -DEBUG: Subplan 26_2 will be sent to localhost:57637 -DEBUG: Subplan 26_2 will be sent to localhost:57638 +DEBUG: Subplan 26_1 will be sent to localhost:xxxxx +DEBUG: Subplan 26_1 will be sent to localhost:xxxxx +DEBUG: Subplan 26_2 will be sent to localhost:xxxxx +DEBUG: Subplan 26_2 will be sent to localhost:xxxxx count ------- 1 @@ -233,10 +233,10 @@ FROM DEBUG: generating subplan 29_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 29_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('29_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) DEBUG: Plan 29 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('29_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('29_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.<>) 3) -DEBUG: Subplan 29_1 will be sent to localhost:57637 -DEBUG: Subplan 29_1 will be sent to localhost:57638 -DEBUG: Subplan 29_2 will be sent to localhost:57637 -DEBUG: Subplan 29_2 will be sent to localhost:57638 +DEBUG: Subplan 29_1 will be sent to localhost:xxxxx +DEBUG: Subplan 29_1 will be sent to localhost:xxxxx +DEBUG: Subplan 29_2 will be sent to localhost:xxxxx +DEBUG: Subplan 29_2 will be sent to localhost:xxxxx count ------- 0 @@ -253,8 +253,8 @@ FROM (some_values_1 JOIN ref_table USING (key)) JOIN table_2 USING (key); DEBUG: generating subplan 32_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.ref_table WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Subplan 32_1 will be sent to localhost:57637 -DEBUG: Subplan 32_1 will be sent to localhost:57638 +DEBUG: Subplan 32_1 will be sent to localhost:xxxxx +DEBUG: Subplan 32_1 will be sent to localhost:xxxxx count ------- 2 @@ -288,8 +288,8 @@ FROM DEBUG: generating subplan 35_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 35_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('35_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('35_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 -DEBUG: Subplan 35_1 will be sent to localhost:57637 -DEBUG: Subplan 35_2 will be sent to localhost:57637 +DEBUG: Subplan 35_1 will be sent to localhost:xxxxx +DEBUG: Subplan 35_2 will be sent to localhost:xxxxx count ------- 0 @@ -316,10 +316,10 @@ DEBUG: generating subplan 39_1 for CTE some_values_1: SELECT key, random() AS r DEBUG: generating subplan 39_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) top_cte JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Subplan 38_1 will be sent to localhost:57637 -DEBUG: Subplan 38_1 will be sent to localhost:57638 -DEBUG: Subplan 39_1 will be sent to localhost:57637 -DEBUG: Subplan 39_2 will be sent to localhost:57638 +DEBUG: Subplan 38_1 will be sent to localhost:xxxxx +DEBUG: Subplan 38_1 will be sent to localhost:xxxxx +DEBUG: Subplan 39_1 will be sent to localhost:xxxxx +DEBUG: Subplan 39_2 will be sent to localhost:xxxxx count ------- 0 @@ -346,9 +346,9 @@ DEBUG: generating subplan 43_1 for CTE some_values_1: SELECT key, random() AS r DEBUG: generating subplan 43_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('43_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('43_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) top_cte JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 2) -DEBUG: Subplan 42_1 will be sent to localhost:57638 -DEBUG: Subplan 43_1 will be sent to localhost:57637 -DEBUG: Subplan 43_2 will be sent to localhost:57637 +DEBUG: Subplan 42_1 will be sent to localhost:xxxxx +DEBUG: Subplan 43_1 will be sent to localhost:xxxxx +DEBUG: Subplan 43_2 will be sent to localhost:xxxxx count ------- 0 @@ -367,12 +367,12 @@ DEBUG: generating subplan 46_1 for CTE some_values_1: SELECT key, random() AS r DEBUG: generating subplan 46_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('46_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) DEBUG: generating subplan 46_3 for CTE some_values_3: SELECT some_values_2.key FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('46_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('46_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) DEBUG: Plan 46 query after replacing subqueries and CTEs: SELECT some_values_3.key, ref_table.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('46_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_3 JOIN intermediate_result_pruning.ref_table ON (true)) -DEBUG: Subplan 46_1 will be sent to localhost:57637 -DEBUG: Subplan 46_1 will be sent to localhost:57638 -DEBUG: Subplan 46_2 will be sent to localhost:57637 -DEBUG: Subplan 46_2 will be sent to localhost:57638 -DEBUG: Subplan 46_3 will be sent to localhost:57637 -DEBUG: Subplan 46_3 will be sent to localhost:57638 +DEBUG: Subplan 46_1 will be sent to localhost:xxxxx +DEBUG: Subplan 46_1 will be sent to localhost:xxxxx +DEBUG: Subplan 46_2 will be sent to localhost:xxxxx +DEBUG: Subplan 46_2 will be sent to localhost:xxxxx +DEBUG: Subplan 46_3 will be sent to localhost:xxxxx +DEBUG: Subplan 46_3 will be sent to localhost:xxxxx key | key | value -----+-----+------- (0 rows) @@ -387,8 +387,8 @@ SELECT count(*) FROM some_values_2 JOIN some_values_1 USING (key); DEBUG: generating subplan 50_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 50_2 for CTE some_values_2: SELECT key, random() AS random FROM intermediate_result_pruning.table_2 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan 50 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('50_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('50_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) -DEBUG: Subplan 50_1 will be sent to localhost:57638 -DEBUG: Subplan 50_2 will be sent to localhost:57638 +DEBUG: Subplan 50_1 will be sent to localhost:xxxxx +DEBUG: Subplan 50_2 will be sent to localhost:xxxxx count ------- 2 @@ -404,8 +404,8 @@ SELECT count(*) FROM some_values_2 JOIN some_values_1 USING (key) WHERE false; DEBUG: generating subplan 53_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 53_2 for CTE some_values_2: SELECT key, random() AS random FROM intermediate_result_pruning.table_2 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('53_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('53_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE false -DEBUG: Subplan 53_1 will be sent to localhost:57637 -DEBUG: Subplan 53_2 will be sent to localhost:57637 +DEBUG: Subplan 53_1 will be sent to localhost:xxxxx +DEBUG: Subplan 53_2 will be sent to localhost:xxxxx count ------- 0 @@ -426,8 +426,8 @@ FROM DEBUG: generating subplan 56_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan 56_2 for CTE some_values_3: SELECT key, random() AS random FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('56_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 DEBUG: Plan 56 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('56_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_3 -DEBUG: Subplan 56_1 will be sent to localhost:57638 -DEBUG: Subplan 56_2 will be sent to localhost:57637 +DEBUG: Subplan 56_1 will be sent to localhost:xxxxx +DEBUG: Subplan 56_2 will be sent to localhost:xxxxx count ------- 2 @@ -479,15 +479,15 @@ DEBUG: generating subplan 59_4 for subquery SELECT avg((table_2.value)::integer DEBUG: generating subplan 59_5 for subquery SELECT min(table_1.value) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('59_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, intermediate_result_pruning.table_1 WHERE ((level_5.avg_ev_type OPERATOR(pg_catalog.=) (table_1.key)::numeric) AND (table_1.key OPERATOR(pg_catalog.>) 111)) GROUP BY level_5.avg_ev_type DEBUG: generating subplan 59_6 for subquery SELECT avg((level_6.min)::integer) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('59_5'::text, 'binary'::citus_copy_format) intermediate_result(min text)) level_6, intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) (level_6.min)::integer) GROUP BY table_1.value DEBUG: Plan 59 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('59_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar -DEBUG: Subplan 59_1 will be sent to localhost:57638 -DEBUG: Subplan 59_2 will be sent to localhost:57637 -DEBUG: Subplan 59_3 will be sent to localhost:57637 -DEBUG: Subplan 59_3 will be sent to localhost:57638 -DEBUG: Subplan 59_4 will be sent to localhost:57637 -DEBUG: Subplan 59_4 will be sent to localhost:57638 -DEBUG: Subplan 59_5 will be sent to localhost:57637 -DEBUG: Subplan 59_5 will be sent to localhost:57638 -DEBUG: Subplan 59_6 will be sent to localhost:57637 +DEBUG: Subplan 59_1 will be sent to localhost:xxxxx +DEBUG: Subplan 59_2 will be sent to localhost:xxxxx +DEBUG: Subplan 59_3 will be sent to localhost:xxxxx +DEBUG: Subplan 59_3 will be sent to localhost:xxxxx +DEBUG: Subplan 59_4 will be sent to localhost:xxxxx +DEBUG: Subplan 59_4 will be sent to localhost:xxxxx +DEBUG: Subplan 59_5 will be sent to localhost:xxxxx +DEBUG: Subplan 59_5 will be sent to localhost:xxxxx +DEBUG: Subplan 59_6 will be sent to localhost:xxxxx count ------- 0 @@ -538,12 +538,12 @@ DEBUG: generating subplan 66_4 for subquery SELECT avg((table_2.value)::integer DEBUG: generating subplan 66_5 for subquery SELECT min(table_1.value) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('66_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, intermediate_result_pruning.table_1 WHERE ((level_5.avg_ev_type OPERATOR(pg_catalog.=) (table_1.key)::numeric) AND (table_1.key OPERATOR(pg_catalog.=) 111)) GROUP BY level_5.avg_ev_type DEBUG: generating subplan 66_6 for subquery SELECT avg((level_6.min)::integer) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('66_5'::text, 'binary'::citus_copy_format) intermediate_result(min text)) level_6, intermediate_result_pruning.table_1 WHERE ((table_1.key OPERATOR(pg_catalog.=) (level_6.min)::integer) AND (table_1.key OPERATOR(pg_catalog.=) 4)) GROUP BY table_1.value DEBUG: Plan 66 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('66_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar -DEBUG: Subplan 66_1 will be sent to localhost:57638 -DEBUG: Subplan 66_2 will be sent to localhost:57637 -DEBUG: Subplan 66_3 will be sent to localhost:57637 -DEBUG: Subplan 66_4 will be sent to localhost:57638 -DEBUG: Subplan 66_5 will be sent to localhost:57638 -DEBUG: Subplan 66_6 will be sent to localhost:57637 +DEBUG: Subplan 66_1 will be sent to localhost:xxxxx +DEBUG: Subplan 66_2 will be sent to localhost:xxxxx +DEBUG: Subplan 66_3 will be sent to localhost:xxxxx +DEBUG: Subplan 66_4 will be sent to localhost:xxxxx +DEBUG: Subplan 66_5 will be sent to localhost:xxxxx +DEBUG: Subplan 66_6 will be sent to localhost:xxxxx count ------- 0 @@ -557,8 +557,8 @@ INTERSECT DEBUG: generating subplan 73_1 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 1) DEBUG: generating subplan 73_2 for subquery SELECT key FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.=) 2) DEBUG: Plan 73 query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('73_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('73_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) -DEBUG: Subplan 73_1 will be sent to localhost:57638 -DEBUG: Subplan 73_2 will be sent to localhost:57638 +DEBUG: Subplan 73_1 will be sent to localhost:xxxxx +DEBUG: Subplan 73_2 will be sent to localhost:xxxxx key ----- (0 rows) @@ -585,10 +585,10 @@ DEBUG: generating subplan 77_2 for subquery SELECT key FROM intermediate_result DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('77_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('77_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) DEBUG: generating subplan 76_2 for CTE cte_2: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 3) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 4) DEBUG: Plan 76 query after replacing subqueries and CTEs: SELECT cte_1.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('76_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 UNION SELECT cte_2.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('76_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_2 -DEBUG: Subplan 76_1 will be sent to localhost:57638 -DEBUG: Subplan 77_1 will be sent to localhost:57637 -DEBUG: Subplan 77_2 will be sent to localhost:57637 -DEBUG: Subplan 76_2 will be sent to localhost:57638 +DEBUG: Subplan 76_1 will be sent to localhost:xxxxx +DEBUG: Subplan 77_1 will be sent to localhost:xxxxx +DEBUG: Subplan 77_2 will be sent to localhost:xxxxx +DEBUG: Subplan 76_2 will be sent to localhost:xxxxx key ----- (0 rows) @@ -614,11 +614,11 @@ DEBUG: generating subplan 82_2 for subquery SELECT key FROM intermediate_result DEBUG: Plan 82 query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('82_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('82_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) DEBUG: generating subplan 81_2 for CTE cte_2: SELECT count(*) AS count FROM (intermediate_result_pruning.table_1 JOIN (SELECT intermediate_result.key FROM read_intermediate_result('81_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 USING (key)) DEBUG: Plan 81 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('81_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) cte_2 -DEBUG: Subplan 81_1 will be sent to localhost:57637 -DEBUG: Subplan 81_1 will be sent to localhost:57638 -DEBUG: Subplan 82_1 will be sent to localhost:57637 -DEBUG: Subplan 82_2 will be sent to localhost:57637 -DEBUG: Subplan 81_2 will be sent to localhost:57638 +DEBUG: Subplan 81_1 will be sent to localhost:xxxxx +DEBUG: Subplan 81_1 will be sent to localhost:xxxxx +DEBUG: Subplan 82_1 will be sent to localhost:xxxxx +DEBUG: Subplan 82_2 will be sent to localhost:xxxxx +DEBUG: Subplan 81_2 will be sent to localhost:xxxxx count ------- 0 @@ -636,8 +636,8 @@ WHERE foo.key != bar.key; DEBUG: generating subplan 86_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2 DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('86_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) -DEBUG: Subplan 86_1 will be sent to localhost:57637 -DEBUG: Subplan 86_1 will be sent to localhost:57638 +DEBUG: Subplan 86_1 will be sent to localhost:xxxxx +DEBUG: Subplan 86_1 will be sent to localhost:xxxxx count ------- 14 @@ -654,7 +654,7 @@ WHERE foo.key != bar.key; DEBUG: generating subplan 88_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2 DEBUG: Plan 88 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('88_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) -DEBUG: Subplan 88_1 will be sent to localhost:57637 +DEBUG: Subplan 88_1 will be sent to localhost:xxxxx count ------- 4 @@ -678,10 +678,10 @@ DEBUG: generating subplan 90_2 for CTE raw_data: DELETE FROM intermediate_resul DEBUG: generating subplan 92_1 for subquery SELECT min(key) AS min FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('90_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) select_data WHERE (key OPERATOR(pg_catalog.>) 1) DEBUG: Plan 92 query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE (key OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('92_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) RETURNING key, value DEBUG: Plan 90 query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('90_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data -DEBUG: Subplan 90_1 will be sent to localhost:57637 -DEBUG: Subplan 90_2 will be sent to localhost:57638 -DEBUG: Subplan 92_1 will be sent to localhost:57637 -DEBUG: Subplan 92_1 will be sent to localhost:57638 +DEBUG: Subplan 90_1 will be sent to localhost:xxxxx +DEBUG: Subplan 90_2 will be sent to localhost:xxxxx +DEBUG: Subplan 92_1 will be sent to localhost:xxxxx +DEBUG: Subplan 92_1 will be sent to localhost:xxxxx key | value -----+------- 3 | 3 @@ -708,10 +708,10 @@ DEBUG: generating subplan 94_2 for CTE raw_data: DELETE FROM intermediate_resul DEBUG: generating subplan 96_1 for subquery SELECT min(key) AS min FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('94_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) select_data WHERE ((key)::double precision OPERATOR(pg_catalog.>) ((1)::double precision OPERATOR(pg_catalog.+) random())) DEBUG: Plan 96 query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE ((value)::integer OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('96_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) RETURNING key, value DEBUG: Plan 94 query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('94_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data -DEBUG: Subplan 94_1 will be sent to localhost:57637 -DEBUG: Subplan 94_2 will be sent to localhost:57638 -DEBUG: Subplan 96_1 will be sent to localhost:57637 -DEBUG: Subplan 96_1 will be sent to localhost:57638 +DEBUG: Subplan 94_1 will be sent to localhost:xxxxx +DEBUG: Subplan 94_2 will be sent to localhost:xxxxx +DEBUG: Subplan 96_1 will be sent to localhost:xxxxx +DEBUG: Subplan 96_1 will be sent to localhost:xxxxx key | value -----+------- 3 | 3 @@ -735,8 +735,8 @@ DEBUG: generating subplan 98_1 for CTE raw_data: DELETE FROM intermediate_resul DEBUG: generating subplan 99_1 for subquery SELECT min(key) AS min FROM intermediate_result_pruning.table_1 WHERE ((key)::double precision OPERATOR(pg_catalog.>) random()) DEBUG: Plan 99 query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE (((value)::integer OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('99_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (key OPERATOR(pg_catalog.=) 6)) RETURNING key, value DEBUG: Plan 98 query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('98_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data -DEBUG: Subplan 98_1 will be sent to localhost:57637 -DEBUG: Subplan 99_1 will be sent to localhost:57637 +DEBUG: Subplan 98_1 will be sent to localhost:xxxxx +DEBUG: Subplan 99_1 will be sent to localhost:xxxxx key | value -----+------- 6 | 6 @@ -758,7 +758,7 @@ DEBUG: volatile functions are not allowed in distributed INSERT ... SELECT quer DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: generating subplan 104_1 for subquery SELECT value FROM intermediate_result_pruning.table_1 WHERE (random() OPERATOR(pg_catalog.>) (1)::double precision) DEBUG: Plan 104 query after replacing subqueries and CTEs: SELECT key, value FROM intermediate_result_pruning.table_2 WHERE ((value OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value FROM read_intermediate_result('104_1'::text, 'binary'::citus_copy_format) intermediate_result(value text))) AND (key OPERATOR(pg_catalog.=) 1)) -DEBUG: Subplan 104_1 will be sent to localhost:57637 +DEBUG: Subplan 104_1 will be sent to localhost:xxxxx -- a similar query, with more complex subquery INSERT INTO table_1 SELECT * FROM table_2 where key = 1 AND @@ -787,11 +787,11 @@ DEBUG: Plan 108 query after replacing subqueries and CTEs: SELECT intermediate_ DEBUG: generating subplan 107_2 for CTE cte_2: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 3) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 4) DEBUG: generating subplan 107_3 for subquery SELECT cte_1.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('107_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 UNION SELECT cte_2.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('107_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_2 DEBUG: Plan 107 query after replacing subqueries and CTEs: SELECT key, value FROM intermediate_result_pruning.table_2 WHERE ((key OPERATOR(pg_catalog.=) 1) AND ((value)::integer OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.key FROM read_intermediate_result('107_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)))) -DEBUG: Subplan 107_1 will be sent to localhost:57637 -DEBUG: Subplan 108_1 will be sent to localhost:57638 -DEBUG: Subplan 108_2 will be sent to localhost:57638 -DEBUG: Subplan 107_2 will be sent to localhost:57637 -DEBUG: Subplan 107_3 will be sent to localhost:57637 +DEBUG: Subplan 107_1 will be sent to localhost:xxxxx +DEBUG: Subplan 108_1 will be sent to localhost:xxxxx +DEBUG: Subplan 108_2 will be sent to localhost:xxxxx +DEBUG: Subplan 107_2 will be sent to localhost:xxxxx +DEBUG: Subplan 107_3 will be sent to localhost:xxxxx -- same query, cte is on the FROM clause -- and this time the final query (and top-level intermediate result) -- hits all the shards because table_2.key != 1 @@ -824,12 +824,12 @@ DEBUG: Plan 115 query after replacing subqueries and CTEs: SELECT intermediate_ DEBUG: generating subplan 114_2 for CTE cte_2: SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 3) INTERSECT SELECT table_1.key FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 4) DEBUG: generating subplan 114_3 for subquery SELECT cte_1.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('114_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 UNION SELECT cte_2.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('114_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_2 DEBUG: Plan 114 query after replacing subqueries and CTEs: SELECT table_2.key, table_2.value FROM intermediate_result_pruning.table_2, (SELECT intermediate_result.key FROM read_intermediate_result('114_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) foo WHERE ((table_2.key OPERATOR(pg_catalog.<>) 1) AND (foo.key OPERATOR(pg_catalog.=) (table_2.value)::integer)) -DEBUG: Subplan 114_1 will be sent to localhost:57637 -DEBUG: Subplan 115_1 will be sent to localhost:57638 -DEBUG: Subplan 115_2 will be sent to localhost:57638 -DEBUG: Subplan 114_2 will be sent to localhost:57637 -DEBUG: Subplan 114_3 will be sent to localhost:57637 -DEBUG: Subplan 114_3 will be sent to localhost:57638 +DEBUG: Subplan 114_1 will be sent to localhost:xxxxx +DEBUG: Subplan 115_1 will be sent to localhost:xxxxx +DEBUG: Subplan 115_2 will be sent to localhost:xxxxx +DEBUG: Subplan 114_2 will be sent to localhost:xxxxx +DEBUG: Subplan 114_3 will be sent to localhost:xxxxx +DEBUG: Subplan 114_3 will be sent to localhost:xxxxx -- append partitioned/heap-type SET citus.replication_model TO statement; -- do not print out 'building index pg_toast_xxxxx_index' messages @@ -887,7 +887,7 @@ WHERE data IN (SELECT data FROM range_partitioned); DEBUG: generating subplan 120_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned DEBUG: Plan 120 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) 'A'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('120_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) -DEBUG: Subplan 120_1 will be sent to localhost:57637 +DEBUG: Subplan 120_1 will be sent to localhost:xxxxx count ------- 0 @@ -903,8 +903,8 @@ WHERE data IN (SELECT data FROM range_partitioned); DEBUG: generating subplan 122_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned DEBUG: Plan 122 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.>=) 'A'::text) AND (range_column OPERATOR(pg_catalog.<=) 'K'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('122_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) -DEBUG: Subplan 122_1 will be sent to localhost:57637 -DEBUG: Subplan 122_1 will be sent to localhost:57638 +DEBUG: Subplan 122_1 will be sent to localhost:xxxxx +DEBUG: Subplan 122_1 will be sent to localhost:xxxxx count ------- 0 @@ -923,8 +923,8 @@ WHERE range_partitioned.data IN (SELECT data FROM some_data); DEBUG: generating subplan 124_1 for CTE some_data: SELECT data FROM intermediate_result_pruning.range_partitioned DEBUG: Plan 124 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) ANY (ARRAY['A'::text, 'E'::text])) AND (data OPERATOR(pg_catalog.=) ANY (SELECT some_data.data FROM (SELECT intermediate_result.data FROM read_intermediate_result('124_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)) some_data))) -DEBUG: Subplan 124_1 will be sent to localhost:57637 -DEBUG: Subplan 124_1 will be sent to localhost:57638 +DEBUG: Subplan 124_1 will be sent to localhost:xxxxx +DEBUG: Subplan 124_1 will be sent to localhost:xxxxx count ------- 0 diff --git a/src/test/regress/expected/intermediate_results.out b/src/test/regress/expected/intermediate_results.out index b97cac425..9d981808d 100644 --- a/src/test/regress/expected/intermediate_results.out +++ b/src/test/regress/expected/intermediate_results.out @@ -486,7 +486,7 @@ ROLLBACK TO SAVEPOINT s1; -- fetch from worker 2 should fail SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_2_port); ERROR: could not open file "base/pgsql_job_cache/10_0_200/squares_1.data": No such file or directory -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx ROLLBACK TO SAVEPOINT s1; -- still, results aren't available on coordinator yet SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int); diff --git a/src/test/regress/expected/isolation_add_remove_node.out b/src/test/regress/expected/isolation_add_remove_node.out index e6d03b1a4..0cdf114bb 100644 --- a/src/test/regress/expected/isolation_add_remove_node.out +++ b/src/test/regress/expected/isolation_add_remove_node.out @@ -234,7 +234,7 @@ step s1-commit: COMMIT; step s2-remove-node-1: <... completed> -error in steps s1-commit s2-remove-node-1: ERROR: node at "localhost:57637" does not exist +error in steps s1-commit s2-remove-node-1: ERROR: node at "localhost:xxxxx" does not exist step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; diff --git a/src/test/regress/expected/isolation_shouldhaveshards.out b/src/test/regress/expected/isolation_shouldhaveshards.out index faa82b86a..5168a5949 100644 --- a/src/test/regress/expected/isolation_shouldhaveshards.out +++ b/src/test/regress/expected/isolation_shouldhaveshards.out @@ -159,7 +159,7 @@ step s2-commit: COMMIT; step s1-noshards: <... completed> -error in steps s2-commit s1-noshards: ERROR: node at "localhost:57637" does not exist +error in steps s2-commit s1-noshards: ERROR: node at "localhost:xxxxx" does not exist step s1-commit: COMMIT; diff --git a/src/test/regress/expected/multi_703_upgrade.out b/src/test/regress/expected/multi_703_upgrade.out index 8c6204b72..282458ad5 100644 --- a/src/test/regress/expected/multi_703_upgrade.out +++ b/src/test/regress/expected/multi_703_upgrade.out @@ -8,7 +8,7 @@ INSERT INTO pg_dist_shard_placement (1, 1, 1, 0, 'localhost', :worker_1_port); -- if there are no worker nodes which match the shards this should fail ALTER EXTENSION citus UPDATE TO '7.0-3'; -ERROR: There is no node at "localhost:57637" +ERROR: There is no node at "localhost:xxxxx" CONTEXT: PL/pgSQL function citus.find_groupid_for_node(text,integer) line 6 at RAISE -- if you add a matching worker the upgrade should succeed INSERT INTO pg_dist_node (nodename, nodeport, groupid) diff --git a/src/test/regress/expected/multi_alter_table_add_constraints.out b/src/test/regress/expected/multi_alter_table_add_constraints.out index 3458427a7..0b307903e 100644 --- a/src/test/regress/expected/multi_alter_table_add_constraints.out +++ b/src/test/regress/expected/multi_alter_table_add_constraints.out @@ -31,14 +31,14 @@ INSERT INTO products VALUES(1, 'product_1', 1); INSERT INTO products VALUES(1, 'product_1', 1); ERROR: duplicate key value violates unique constraint "p_key_1450001" DETAIL: Key (product_no)=(1) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx ALTER TABLE products DROP CONSTRAINT p_key; INSERT INTO products VALUES(1, 'product_1', 1); -- Can not create constraint since it conflicts with the existing data ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); ERROR: could not create unique index "p_key_1450001" DETAIL: Key (product_no)=(1) is duplicated. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products; -- Check "PRIMARY KEY CONSTRAINT" with reference table CREATE TABLE products_ref ( @@ -62,7 +62,7 @@ INSERT INTO products_ref VALUES(1, 'product_1', 1); INSERT INTO products_ref VALUES(1, 'product_1', 1); ERROR: duplicate key value violates unique constraint "p_key_1450032" DETAIL: Key (product_no)=(1) already exists. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products_ref; -- Check "PRIMARY KEY CONSTRAINT" on append table CREATE TABLE products_append ( @@ -114,7 +114,7 @@ INSERT INTO unique_test_table VALUES(1, 'Ahmet'); INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_1450035" DETAIL: Key (id)=(1) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx ALTER TABLE unique_test_table DROP CONSTRAINT unn_id; -- Insert row which will conflict with the next unique constraint command INSERT INTO unique_test_table VALUES(1, 'Mehmet'); @@ -122,7 +122,7 @@ INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ALTER TABLE unique_test_table ADD CONSTRAINT unn_id UNIQUE(id); ERROR: could not create unique index "unn_id_1450035" DETAIL: Key (id)=(1) is duplicated. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- Can create unique constraint over multiple columns which must include -- distribution column ALTER TABLE unique_test_table ADD CONSTRAINT unn_id_name UNIQUE(id, name); @@ -130,7 +130,7 @@ ALTER TABLE unique_test_table ADD CONSTRAINT unn_id_name UNIQUE(id, name); INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_name_1450035" DETAIL: Key (id, name)=(1, Mehmet) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE unique_test_table; -- Check "UNIQUE CONSTRAINT" with reference table CREATE TABLE unique_test_table_ref(id int, name varchar(20)); @@ -148,7 +148,7 @@ INSERT INTO unique_test_table_ref VALUES(1, 'Ahmet'); INSERT INTO unique_test_table_ref VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_1450066" DETAIL: Key (id)=(1) already exists. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- We can add unique constraint with multiple columns ALTER TABLE unique_test_table_ref DROP CONSTRAINT unn_id; ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_id_name UNIQUE(id,name); @@ -202,12 +202,12 @@ ALTER TABLE products ADD CONSTRAINT p_multi_check CHECK(price > discounted_price INSERT INTO products VALUES(1, 'product_1', -1, -2); ERROR: new row for relation "products_1450069" violates check constraint "p_check_1450069" DETAIL: Failing row contains (1, product_1, -1, -2). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO products VALUES(1, 'product_1', 5, 3); INSERT INTO products VALUES(1, 'product_1', 2, 3); ERROR: new row for relation "products_1450069" violates check constraint "p_multi_check_1450069" DETAIL: Failing row contains (1, product_1, 2, 3). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products; -- Check "CHECK CONSTRAINT" with reference table CREATE TABLE products_ref ( @@ -230,12 +230,12 @@ ALTER TABLE products_ref ADD CONSTRAINT p_multi_check CHECK(price > discounted_p INSERT INTO products_ref VALUES(1, 'product_1', -1, -2); ERROR: new row for relation "products_ref_1450100" violates check constraint "p_check_1450100" DETAIL: Failing row contains (1, product_1, -1, -2). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO products_ref VALUES(1, 'product_1', 5, 3); INSERT INTO products_ref VALUES(1, 'product_1', 2, 3); ERROR: new row for relation "products_ref_1450100" violates check constraint "p_multi_check_1450100" DETAIL: Failing row contains (1, product_1, 2, 3). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products_ref; -- Check "CHECK CONSTRAINT" with append table CREATE TABLE products_append ( @@ -285,7 +285,7 @@ INSERT INTO products VALUES(2,'product_2', 5); INSERT INTO products VALUES(2,'product_2', 5); ERROR: conflicting key value violates exclusion constraint "exc_pno_name_1450126" DETAIL: Key (product_no, name)=(2, product_2) conflicts with existing key (product_no, name)=(2, product_2). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products; -- Check "EXCLUSION CONSTRAINT" with reference table CREATE TABLE products_ref ( @@ -309,7 +309,7 @@ INSERT INTO products_ref VALUES(1,'product_2', 10); INSERT INTO products_ref VALUES(2,'product_2', 5); ERROR: conflicting key value violates exclusion constraint "exc_name_1450134" DETAIL: Key (name)=(product_2) conflicts with existing key (name)=(product_2). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DROP TABLE products_ref; -- Check "EXCLUSION CONSTRAINT" with append table CREATE TABLE products_append ( @@ -358,7 +358,7 @@ ALTER TABLE products ALTER COLUMN name SET NOT NULL; INSERT INTO products VALUES(1,NULL,5); ERROR: null value in column "name" violates not-null constraint DETAIL: Failing row contains (1, null, 5). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO products VALUES(NULL,'product_1', 5); ERROR: cannot perform an INSERT with NULL in the partition column DROP TABLE products; @@ -379,7 +379,7 @@ ALTER TABLE products_ref ALTER COLUMN name SET NOT NULL; INSERT INTO products_ref VALUES(1,NULL,5); ERROR: null value in column "name" violates not-null constraint DETAIL: Failing row contains (1, null, 5). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO products_ref VALUES(NULL,'product_1', 5); DROP TABLE products_ref; -- Check "NOT NULL" with append table diff --git a/src/test/regress/expected/multi_citus_tools.out b/src/test/regress/expected/multi_citus_tools.out index 53d4b986a..4c6825840 100644 --- a/src/test/regress/expected/multi_citus_tools.out +++ b/src/test/regress/expected/multi_citus_tools.out @@ -16,7 +16,7 @@ SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int false); node_name | node_port | success | result -----------+-----------+---------+------------------------------------ - localhost | 666 | f | failed to connect to localhost:666 + localhost | 666 | f | failed to connect to localhost:xxxxx (1 row) SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], @@ -24,7 +24,7 @@ SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int true); node_name | node_port | success | result -----------+-----------+---------+------------------------------------ - localhost | 666 | f | failed to connect to localhost:666 + localhost | 666 | f | failed to connect to localhost:xxxxx (1 row) RESET client_min_messages; diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index 972a0da28..552ac55f4 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -121,7 +121,7 @@ INSERT INTO test_reference_table VALUES (1, '1'); -- try to disable a node with active placements see that node is removed -- observe that a notification is displayed SELECT master_disable_node('localhost', :worker_2_port); -NOTICE: Node localhost:57638 has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back. +NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back. master_disable_node --------------------- @@ -654,7 +654,7 @@ SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', (1 row) SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000); -ERROR: node at "localhost:2000" does not exist +ERROR: node at "localhost:xxxxx" does not exist SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); master_add_secondary_node --------------------------- diff --git a/src/test/regress/expected/multi_create_table_constraints.out b/src/test/regress/expected/multi_create_table_constraints.out index 90fa5b4a8..a7af3f7ba 100644 --- a/src/test/regress/expected/multi_create_table_constraints.out +++ b/src/test/regress/expected/multi_create_table_constraints.out @@ -102,7 +102,7 @@ INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); ERROR: duplicate key value violates unique constraint "uq_two_columns_partition_col_other_col_key_365008" DETAIL: Key (partition_col, other_col)=(1, 1) already exists. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_on_part_col ( partition_col integer, @@ -119,7 +119,7 @@ INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,2); ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_partition_col_excl_365012" DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_on_two_columns ( partition_col integer, @@ -136,7 +136,7 @@ INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_partition_col_other_col_excl_365016" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_on_two_columns_prt ( partition_col integer, @@ -155,7 +155,7 @@ INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_prt_partition_col_other_col_excl_365020" DETAIL: Key (partition_col, other_col)=(1, 101) conflicts with existing key (partition_col, other_col)=(1, 101). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_wrong_operator ( partition_col tsrange, @@ -181,7 +181,7 @@ INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00 INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); ERROR: conflicting key value violates exclusion constraint "ex_overlaps_other_col_partition_col_excl_365027" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx -- now show that Citus can distribute unique and EXCLUDE constraints that -- include the partition column, for hash-partitioned tables. -- However, EXCLUDE constraints must include the partition column with @@ -225,7 +225,7 @@ INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); ERROR: duplicate key value violates unique constraint "uq_two_columns_named_uniq_365036" DETAIL: Key (partition_col, other_col)=(1, 1) already exists. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_on_part_col_named ( partition_col integer, @@ -242,7 +242,7 @@ INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,2); ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_named_exclude_365040" DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_on_two_columns_named ( partition_col integer, @@ -259,7 +259,7 @@ INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_named_exclude_365044" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_multiple_excludes ( partition_col integer, @@ -278,11 +278,11 @@ INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VAL INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,2); ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl1_365048" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,2,1); ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl2_365048" DETAIL: Key (partition_col, other_other_col)=(1, 1) conflicts with existing key (partition_col, other_other_col)=(1, 1). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE ex_wrong_operator_named ( partition_col tsrange, @@ -308,7 +308,7 @@ INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00 INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); ERROR: conflicting key value violates exclusion constraint "ex_overlaps_operator_named_exclude_365055" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- now show that Citus allows unique constraints on range-partitioned tables. CREATE TABLE uq_range_tables ( diff --git a/src/test/regress/expected/multi_distribution_metadata.out b/src/test/regress/expected/multi_distribution_metadata.out index ebf001275..4ed0920fb 100644 --- a/src/test/regress/expected/multi_distribution_metadata.out +++ b/src/test/regress/expected/multi_distribution_metadata.out @@ -109,21 +109,21 @@ ERROR: could not find valid entry for shard 540005 SELECT load_shard_placement_array(540001, false); load_shard_placement_array ----------------------------------- - {localhost:57637,localhost:57638} + {localhost:xxxxx,localhost:xxxxx} (1 row) -- only one of which is finalized SELECT load_shard_placement_array(540001, true); load_shard_placement_array ---------------------------- - {localhost:57637} + {localhost:xxxxx} (1 row) -- should see error for non-existent shard SELECT load_shard_placement_array(540001, false); load_shard_placement_array ----------------------------------- - {localhost:57637,localhost:57638} + {localhost:xxxxx,localhost:xxxxx} (1 row) -- should see column id of 'name' diff --git a/src/test/regress/expected/multi_foreign_key.out b/src/test/regress/expected/multi_foreign_key.out index 37f717a43..d388bc902 100644 --- a/src/test/regress/expected/multi_foreign_key.out +++ b/src/test/regress/expected/multi_foreign_key.out @@ -135,7 +135,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referencing_table VALUES(1, 1); ERROR: insert or update on table "referencing_table_1350129" violates foreign key constraint "referencing_table_ref_id_fkey_1350129" DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_1350097". -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); @@ -144,7 +144,7 @@ INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350097" violates foreign key constraint "referencing_table_ref_id_fkey_1350129" on table "referencing_table_1350129" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350129". -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referencing_table WHERE ref_id = 1; DELETE FROM referenced_table WHERE id = 1; @@ -229,7 +229,7 @@ INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350225" violates foreign key constraint "referencing_table_ref_id_fkey_1350257" on table "referencing_table_1350257" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350257". -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; @@ -267,7 +267,7 @@ BEGIN; DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350289" violates foreign key constraint "referencing_table_ref_id_fkey_1350321" on table "referencing_table_1350321" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350321". -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx DELETE FROM referencing_table WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; @@ -305,7 +305,7 @@ INSERT INTO referencing_table VALUES(1, 1); UPDATE referenced_table SET test_column = 10 WHERE id = 1; ERROR: update or delete on table "referenced_table_1350353" violates foreign key constraint "referencing_table_ref_id_fkey_1350385" on table "referencing_table_1350385" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_1350385". -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx BEGIN; UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; @@ -345,7 +345,7 @@ BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; ERROR: update or delete on table "referenced_table_1350417" violates foreign key constraint "referencing_table_ref_id_fkey_1350449" on table "referencing_table_1350449" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_1350449". -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx UPDATE referencing_table SET id = 20 WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; @@ -406,7 +406,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referencing_table VALUES(null, 2); ERROR: insert or update on table "referencing_table_1350600" violates foreign key constraint "referencing_table_ref_id_fkey_1350600" DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM referencing_table; id | ref_id ----+-------- @@ -523,7 +523,7 @@ INSERT INTO referencing_table VALUES(1, 1); ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); ERROR: insert or update on table "referencing_table_1350628" violates foreign key constraint "test_constraint_1350628" DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_1350624". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- test foreign constraint with correct conditions DELETE FROM referencing_table WHERE ref_id = 1; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); @@ -532,7 +532,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) INSERT INTO referencing_table VALUES(1, 1); ERROR: insert or update on table "referencing_table_1350628" violates foreign key constraint "test_constraint_1350628" DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_1350624". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); @@ -541,7 +541,7 @@ INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350624" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_1350628" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350628". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referencing_table WHERE ref_id = 1; DELETE FROM referenced_table WHERE id = 1; @@ -571,7 +571,7 @@ INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350624" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_1350628" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350628". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; @@ -595,7 +595,7 @@ BEGIN; DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350624" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_1350628" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350628". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx DELETE FROM referencing_table WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; @@ -617,7 +617,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, UPDATE referenced_table SET test_column = 10 WHERE id = 1; ERROR: update or delete on table "referenced_table_1350624" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_1350628" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_1350628". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx BEGIN; UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; @@ -641,7 +641,7 @@ BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; ERROR: update or delete on table "referenced_table_1350624" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_1350628" DETAIL: Key (id, test_column)=(1, 10) is still referenced from table "referencing_table_1350628". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx UPDATE referencing_table SET id = 20 WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; @@ -675,7 +675,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, INSERT INTO referencing_table VALUES(null, 2); ERROR: insert or update on table "referencing_table_1350631" violates foreign key constraint "test_constraint_1350631" DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx SELECT * FROM referencing_table; id | ref_id ----+-------- @@ -707,7 +707,7 @@ ALTER TABLE cyclic_reference_table2 ADD CONSTRAINT cyclic_constraint2 FOREIGN KE INSERT INTO cyclic_reference_table1 VALUES(1, 1); ERROR: insert or update on table "cyclic_reference_table1_1350632" violates foreign key constraint "cyclic_constraint1_1350632" DETAIL: Key (id, table2_id)=(1, 1) is not present in table "cyclic_reference_table2_1350636". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- proper insertion to table with cyclic dependency BEGIN; INSERT INTO cyclic_reference_table1 VALUES(1, 1); @@ -789,7 +789,7 @@ INSERT INTO self_referencing_table1 VALUES(1, 1, 1); INSERT INTO self_referencing_table1 VALUES(1, 2, 3); ERROR: insert or update on table "self_referencing_table1_1350640" violates foreign key constraint "self_referencing_table1_id_fkey_1350640" DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table1_1350640". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- verify that rows are actually inserted SELECT * FROM self_referencing_table1; id | other_column | other_column_ref @@ -814,7 +814,7 @@ INSERT INTO self_referencing_table2 VALUES(1, 1, 1); INSERT INTO self_referencing_table2 VALUES(1, 2, 3); ERROR: insert or update on table "self_referencing_table2_1350644" violates foreign key constraint "self_referencing_fk_constraint_1350644" DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table2_1350644". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- verify that rows are actually inserted SELECT * FROM self_referencing_table2; id | other_column | other_column_ref diff --git a/src/test/regress/expected/multi_insert_select_conflict.out b/src/test/regress/expected/multi_insert_select_conflict.out index 9c174a6fd..252d0bf79 100644 --- a/src/test/regress/expected/multi_insert_select_conflict.out +++ b/src/test/regress/expected/multi_insert_select_conflict.out @@ -308,7 +308,7 @@ NOTICE: truncate cascades to table "target_table" FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *; ERROR: insert or update on table "target_table_1900000" violates foreign key constraint "fkey_1900000" DETAIL: Key (col_1)=(1) is not present in table "test_ref_table_1900012". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx ROLLBACK; BEGIN; DELETE FROM test_ref_table WHERE key > 10; diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 72cc80e1d..59b760c5c 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -1402,7 +1402,7 @@ WHERE logicalrelid='mx_ref'::regclass; \c - - - :master_port SELECT master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "mx_ref" to the node localhost:57638 +NOTICE: Replicating reference table "mx_ref" to the node localhost:xxxxx master_add_node ----------------- 7 @@ -1528,30 +1528,30 @@ SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport=:worker_1_po CREATE TABLE dist_table_2(a int); SELECT create_distributed_table('dist_table_2', 'a'); -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT create_reference_table('dist_table_2'); -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. ALTER TABLE dist_table_1 ADD COLUMN b int; -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT master_add_node('localhost', :master_port, groupid => 0); -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT master_disable_node('localhost', :worker_1_port); -ERROR: Disabling localhost:57637 failed -DETAIL: localhost:57637 is a metadata node, but is out of sync +ERROR: Disabling localhost:xxxxx failed +DETAIL: localhost:xxxxx is a metadata node, but is out of sync HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. SELECT master_disable_node('localhost', :worker_2_port); -ERROR: Disabling localhost:57638 failed -DETAIL: localhost:57637 is a metadata node, but is out of sync +ERROR: Disabling localhost:xxxxx failed +DETAIL: localhost:xxxxx is a metadata node, but is out of sync HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. SELECT master_remove_node('localhost', :worker_1_port); -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT master_remove_node('localhost', :worker_2_port); -ERROR: localhost:57637 is a metadata node, but is out of sync +ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. -- master_update_node should succeed SELECT nodeid AS worker_2_nodeid FROM pg_dist_node WHERE nodeport=:worker_2_port \gset diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index cb6f1d756..3fe00097f 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -153,7 +153,7 @@ INSERT INTO labs VALUES (6, 'Bell Labs'); INSERT INTO researchers VALUES (9, 6, 'Leslie Lamport'); ERROR: duplicate key value violates unique constraint "avoid_name_confusion_idx_1200001" DETAIL: Key (lab_id, name)=(6, Leslie Lamport) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx ABORT; -- SELECTs may occur after a modification: First check that selecting -- from the modified node works. @@ -377,9 +377,9 @@ DELETE FROM researchers WHERE lab_id = 6; \copy researchers FROM STDIN delimiter ',' COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57638 +WARNING: failed to commit transaction on localhost:xxxxx WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 +WARNING: failed to commit transaction on localhost:xxxxx WARNING: could not commit transaction for shard 1200001 on any active node ERROR: could not commit transaction on any active node \unset VERBOSITY @@ -485,7 +485,7 @@ INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (1, 'orange'); ERROR: duplicate key value violates unique constraint "objects_pkey_1200003" DETAIL: Key (id)=(1) already exists. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx COMMIT; -- data shouldn't have persisted... SELECT * FROM objects WHERE id = 1; @@ -620,7 +620,7 @@ INSERT INTO objects VALUES (2, 'BAD'); INSERT INTO labs VALUES (9, 'Umbrella Corporation'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57638 +WARNING: failed to commit transaction on localhost:xxxxx -- data should be persisted SELECT * FROM objects WHERE id = 2; id | name @@ -663,9 +663,9 @@ INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 +WARNING: failed to commit transaction on localhost:xxxxx WARNING: illegal value -WARNING: failed to commit transaction on localhost:57638 +WARNING: failed to commit transaction on localhost:xxxxx WARNING: could not commit transaction for shard 1200002 on any active node WARNING: could not commit transaction for shard 1200003 on any active node ERROR: could not commit transaction on any active node @@ -704,7 +704,7 @@ INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 +WARNING: failed to commit transaction on localhost:xxxxx WARNING: could not commit transaction for shard 1200002 on any active node \set VERBOSITY default -- data to objects should be persisted, but labs should not... @@ -1177,15 +1177,15 @@ ALTER USER test_user RENAME TO test_user_new; \c - test_user - :master_port -- should fail since the worker doesn't have test_user anymore INSERT INTO reference_failure_test VALUES (1, '1'); -ERROR: connection error: localhost:57637 +ERROR: connection error: localhost:xxxxx -- the same as the above, but wrapped within a transaction BEGIN; INSERT INTO reference_failure_test VALUES (1, '1'); -ERROR: connection error: localhost:57637 +ERROR: connection error: localhost:xxxxx COMMIT; BEGIN; COPY reference_failure_test FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection error: localhost:57637 +ERROR: connection error: localhost:xxxxx COMMIT; -- show that no data go through the table and shard states are good SET client_min_messages to 'ERROR'; @@ -1210,8 +1210,8 @@ ORDER BY s.logicalrelid, sp.shardstate; BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -WARNING: connection error: localhost:57637 -WARNING: connection error: localhost:57637 +WARNING: connection error: localhost:xxxxx +WARNING: connection error: localhost:xxxxx -- some placements are invalid before abort SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) @@ -1232,8 +1232,8 @@ ORDER BY shardid, nodeport; ABORT; -- verify nothing is inserted SELECT count(*) FROM numbers_hash_failure_test; -WARNING: connection error: localhost:57637 -WARNING: connection error: localhost:57637 +WARNING: connection error: localhost:xxxxx +WARNING: connection error: localhost:xxxxx count ------- 0 @@ -1258,8 +1258,8 @@ ORDER BY shardid, nodeport; BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -WARNING: connection error: localhost:57637 -WARNING: connection error: localhost:57637 +WARNING: connection error: localhost:xxxxx +WARNING: connection error: localhost:xxxxx -- check shard states before commit SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) @@ -1297,8 +1297,8 @@ ORDER BY shardid, nodeport; -- verify data is inserted SELECT count(*) FROM numbers_hash_failure_test; -WARNING: connection error: localhost:57637 -WARNING: connection error: localhost:57637 +WARNING: connection error: localhost:xxxxx +WARNING: connection error: localhost:xxxxx count ------- 2 @@ -1310,7 +1310,7 @@ ALTER USER test_user RENAME TO test_user_new; \c - test_user - :master_port -- fails on all shard placements INSERT INTO numbers_hash_failure_test VALUES (2,2); -ERROR: connection error: localhost:57638 +ERROR: connection error: localhost:xxxxx -- connect back to the master with the proper user to continue the tests \c - :default_user - :master_port SET citus.next_shard_id TO 1200020; diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index 28d7f114b..7d01cc32f 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -421,7 +421,7 @@ INSERT INTO full_access_user_schema.t1 VALUES (1),(2),(3); -- not allowed to create a table SELECT create_distributed_table('full_access_user_schema.t1', 'id'); ERROR: permission denied for schema full_access_user_schema -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx RESET ROLE; SET ROLE usage_access; CREATE TYPE usage_access_type AS ENUM ('a', 'b'); @@ -667,14 +667,14 @@ RESET ROLE; -- super user should not be able to copy files created by a user SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.10": No such file or directory -CONTEXT: while executing command on localhost:57637 -ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:57637 +CONTEXT: while executing command on localhost:xxxxx +ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:xxxxx -- different user should not be able to fetch partition file SET ROLE usage_access; SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.44518": No such file or directory -CONTEXT: while executing command on localhost:57637 -ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:57637 +CONTEXT: while executing command on localhost:xxxxx +ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:xxxxx -- only the user whom created the files should be able to fetch SET ROLE full_access; SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index 2f4f45b69..bd20fe4d9 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -406,9 +406,9 @@ DETAIL: A distributed function is created. To make sure subsequent commands see select mx_call_func_raise(2); DEBUG: pushing down the function call DEBUG: warning -DETAIL: WARNING from localhost:57638 +DETAIL: WARNING from localhost:xxxxx ERROR: error -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx PL/pgSQL function multi_mx_function_call_delegation.mx_call_func_raise(integer) line 4 at RAISE -- Don't push-down when doing INSERT INTO ... SELECT func(); SET client_min_messages TO ERROR; diff --git a/src/test/regress/expected/multi_mx_modifying_xacts.out b/src/test/regress/expected/multi_mx_modifying_xacts.out index f8a9e03f2..c8b988d56 100644 --- a/src/test/regress/expected/multi_mx_modifying_xacts.out +++ b/src/test/regress/expected/multi_mx_modifying_xacts.out @@ -217,7 +217,7 @@ INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (1, 'orange'); ERROR: duplicate key value violates unique constraint "objects_mx_pkey_1220103" DETAIL: Key (id)=(1) already exists. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; @@ -342,7 +342,7 @@ INSERT INTO objects_mx VALUES (2, 'BAD'); INSERT INTO labs_mx VALUES (9, 'Umbrella Corporation'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 +WARNING: failed to commit transaction on localhost:xxxxx WARNING: could not commit transaction for shard 1220103 on any active node WARNING: could not commit transaction for shard 1220102 on any active node ERROR: could not commit transaction on any active node @@ -370,7 +370,7 @@ INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 +WARNING: failed to commit transaction on localhost:xxxxx WARNING: could not commit transaction for shard 1220103 on any active node WARNING: could not commit transaction for shard 1220102 on any active node ERROR: could not commit transaction on any active node @@ -395,7 +395,7 @@ INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:57637 +WARNING: failed to commit transaction on localhost:xxxxx WARNING: could not commit transaction for shard 1220103 on any active node WARNING: could not commit transaction for shard 1220102 on any active node ERROR: could not commit transaction on any active node diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index c5b5718a1..39d707118 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -173,7 +173,7 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; -- Test updating a node when another node is in readonly-mode -------------------------------------------------------------------------- SELECT master_add_node('localhost', :worker_2_port) AS nodeid_2 \gset -NOTICE: Replicating reference table "ref_table" to the node localhost:57638 +NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); ?column? ---------- @@ -383,7 +383,7 @@ SELECT verify_metadata('localhost', :worker_1_port); (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table" to the node localhost:57638 +NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx ?column? ---------- 1 @@ -414,8 +414,8 @@ SELECT wait_until_metadata_sync(); UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2); -- should error out SELECT 1 FROM master_disable_node('localhost', 1); -ERROR: Disabling localhost:1 failed -DETAIL: connection error: localhost:1 +ERROR: Disabling localhost:xxxxx failed +DETAIL: connection error: localhost:xxxxx HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. -- try again after stopping metadata sync SELECT stop_metadata_sync_to_node('localhost', 1); @@ -449,7 +449,7 @@ SELECT wait_until_metadata_sync(); (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table" to the node localhost:57638 +NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx ?column? ---------- 1 @@ -481,8 +481,8 @@ SELECT wait_until_metadata_sync(); UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2); -- should error out SELECT 1 FROM master_disable_node('localhost', :worker_2_port); -ERROR: Disabling localhost:57638 failed -DETAIL: connection error: localhost:1 +ERROR: Disabling localhost:xxxxx failed +DETAIL: connection error: localhost:xxxxx HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. -- try again after stopping metadata sync SELECT stop_metadata_sync_to_node('localhost', 1); @@ -511,7 +511,7 @@ SELECT wait_until_metadata_sync(); (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table" to the node localhost:57638 +NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx ?column? ---------- 1 diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index d0b4a2ec7..5a6600040 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -190,11 +190,11 @@ ORDER BY INSERT INTO partitioning_hash_test VALUES (8, 5); ERROR: no partition of relation "partitioning_hash_test_1660012" found for row DETAIL: Partition key of the failing row contains (subid) = (5). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx INSERT INTO partitioning_hash_test VALUES (9, 12); ERROR: no partition of relation "partitioning_hash_test_1660015" found for row DETAIL: Partition key of the failing row contains (subid) = (12). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx CREATE TABLE partitioning_hash_test_2 (id int, subid int); INSERT INTO partitioning_hash_test_2 VALUES (8, 5); ALTER TABLE partitioning_hash_test ATTACH PARTITION partitioning_hash_test_2 FOR VALUES WITH (MODULUS 3, REMAINDER 2); @@ -375,7 +375,7 @@ SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER BY 1; UPDATE partitioning_test SET time = '2020-07-07' WHERE id = 7; ERROR: no partition of relation "partitioning_test_1660001" found for row DETAIL: Partition key of the failing row contains ("time") = (2020-07-07). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx -- UPDATE with subqueries on partitioned table UPDATE partitioning_test @@ -445,7 +445,7 @@ SELECT * FROM partitioning_test_default ORDER BY 1, 2; -- create a new partition (will fail) CREATE TABLE partitioning_test_2014 PARTITION OF partitioning_test FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); ERROR: updated partition constraint for default partition would be violated by some row -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx BEGIN; ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_default; CREATE TABLE partitioning_test_2014 PARTITION OF partitioning_test FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); @@ -519,7 +519,7 @@ SELECT * FROM partitioning_test_2009 ORDER BY 1; UPDATE partitioning_test_2009 SET time = time + INTERVAL '6 month'; ERROR: new row for relation "partitioning_test_2009_1660005" violates partition constraint DETAIL: Failing row contains (3, 2010-03-11). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx -- -- DDL in distributed partitioned tables -- @@ -1210,7 +1210,7 @@ INSERT INTO multi_column_partitioning_0_0_10_0 VALUES(5, -5); INSERT INTO multi_column_partitioning VALUES(10, 1); ERROR: no partition of relation "multi_column_partitioning_1660101" found for row DETAIL: Partition key of the failing row contains (c1, c2) = (10, 1). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- test with MINVALUE/MAXVALUE CREATE TABLE multi_column_partitioning_10_max_20_min PARTITION OF multi_column_partitioning FOR VALUES FROM (10, MAXVALUE) TO (20, MINVALUE); -- test INSERT to partition with MINVALUE/MAXVALUE bounds @@ -1220,7 +1220,7 @@ INSERT INTO multi_column_partitioning_10_max_20_min VALUES(19, -19); INSERT INTO multi_column_partitioning VALUES(20, -20); ERROR: no partition of relation "multi_column_partitioning_1660101" found for row DETAIL: Partition key of the failing row contains (c1, c2) = (20, -20). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- see data is loaded to multi-column partitioned table SELECT * FROM multi_column_partitioning ORDER BY 1, 2; c1 | c2 @@ -1920,7 +1920,7 @@ ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2010 FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); ERROR: insert or update on table "partitioning_test_2010_1660191" violates foreign key constraint "partitioning_reference_fkey_1660179" DETAIL: Key (id)=(1) is not present in table "reference_table_1660177". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- Truncate, so attaching again won't fail TRUNCATE partitioning_test_2010; -- Attach a table which already has the same constraint diff --git a/src/test/regress/expected/multi_remove_node_reference_table.out b/src/test/regress/expected/multi_remove_node_reference_table.out index 662463386..cb72249f4 100644 --- a/src/test/regress/expected/multi_remove_node_reference_table.out +++ b/src/test/regress/expected/multi_remove_node_reference_table.out @@ -18,7 +18,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -- remove non-existing node SELECT master_remove_node('localhost', 55555); -ERROR: node at "localhost:55555" does not exist +ERROR: node at "localhost:xxxxx" does not exist -- remove a node with no reference tables -- verify node exist before removal SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; @@ -218,10 +218,10 @@ WHERE \c - - - :master_port -- remove same node twice SELECT master_remove_node('localhost', :worker_2_port); -ERROR: node at "localhost:57638" does not exist +ERROR: node at "localhost:xxxxx" does not exist -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx ?column? ---------- 1 @@ -242,7 +242,7 @@ SELECT master_remove_node('localhost', :worker_2_port); -- re-add the node for the next test SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx ?column? ---------- 1 @@ -458,7 +458,7 @@ WHERE \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx ?column? ---------- 1 @@ -583,7 +583,7 @@ SELECT * FROM remove_node_reference_table; \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx ?column? ---------- 1 @@ -704,7 +704,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_ -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx ?column? ---------- 1 @@ -903,8 +903,8 @@ WHERE \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 -NOTICE: Replicating reference table "table1" to the node localhost:57638 +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx +NOTICE: Replicating reference table "table1" to the node localhost:xxxxx ?column? ---------- 1 @@ -1018,8 +1018,8 @@ WHERE \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 -NOTICE: Replicating reference table "table1" to the node localhost:57638 +NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx +NOTICE: Replicating reference table "table1" to the node localhost:xxxxx ?column? ---------- 1 diff --git a/src/test/regress/expected/multi_repartition_join_planning.out b/src/test/regress/expected/multi_repartition_join_planning.out index 7290429d4..ba46b5ed9 100644 --- a/src/test/regress/expected/multi_repartition_join_planning.out +++ b/src/test/regress/expected/multi_repartition_join_planning.out @@ -71,8 +71,8 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290002 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290003 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: generated sql query for task 2 @@ -83,8 +83,8 @@ DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 4 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 4 to node localhost:xxxxx DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] @@ -103,9 +103,9 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 11 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task 4 to node localhost:xxxxx +DEBUG: assigned task 6 to node localhost:xxxxx +DEBUG: assigned task 2 to node localhost:xxxxx DEBUG: completed cleanup query for job 3 DEBUG: completed cleanup query for job 3 DEBUG: completed cleanup query for job 2 @@ -161,14 +161,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -205,10 +205,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 6 to node localhost:xxxxx +DEBUG: assigned task 9 to node localhost:xxxxx +DEBUG: assigned task 12 to node localhost:xxxxx DEBUG: completed cleanup query for job 6 DEBUG: completed cleanup query for job 6 DEBUG: completed cleanup query for job 4 @@ -235,14 +235,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -279,10 +279,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 9 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 6 to node localhost:xxxxx +DEBUG: assigned task 9 to node localhost:xxxxx +DEBUG: assigned task 12 to node localhost:xxxxx DEBUG: completed cleanup query for job 9 DEBUG: completed cleanup query for job 9 DEBUG: completed cleanup query for job 7 @@ -311,14 +311,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -355,10 +355,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 6 to node localhost:xxxxx +DEBUG: assigned task 9 to node localhost:xxxxx +DEBUG: assigned task 12 to node localhost:xxxxx DEBUG: completed cleanup query for job 12 DEBUG: completed cleanup query for job 12 DEBUG: completed cleanup query for job 10 @@ -385,14 +385,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -429,10 +429,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 9 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 6 to node localhost:xxxxx +DEBUG: assigned task 9 to node localhost:xxxxx +DEBUG: assigned task 12 to node localhost:xxxxx DEBUG: completed cleanup query for job 15 DEBUG: completed cleanup query for job 15 DEBUG: completed cleanup query for job 13 @@ -461,10 +461,10 @@ DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690006 stock WHERE true" DEBUG: generated sql query for task 4 DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690007 stock WHERE true" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx +DEBUG: assigned task 4 to node localhost:xxxxx +DEBUG: assigned task 3 to node localhost:xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT ol_i_id FROM order_line_690000 order_line WHERE true" DEBUG: generated sql query for task 2 @@ -473,10 +473,10 @@ DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT ol_i_id FROM order_line_690002 order_line WHERE true" DEBUG: generated sql query for task 4 DETAIL: query string: "SELECT ol_i_id FROM order_line_690003 order_line WHERE true" -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 4 to node localhost:57638 +DEBUG: assigned task 1 to node localhost:xxxxx +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 4 to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -513,10 +513,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 20 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 6 to node localhost:xxxxx +DEBUG: assigned task 9 to node localhost:xxxxx +DEBUG: assigned task 12 to node localhost:xxxxx DEBUG: completed cleanup query for job 18 DEBUG: completed cleanup query for job 18 DEBUG: completed cleanup query for job 16 diff --git a/src/test/regress/expected/multi_repartition_join_task_assignment.out b/src/test/regress/expected/multi_repartition_join_task_assignment.out index f504ce3fb..dbcc548ed 100644 --- a/src/test/regress/expected/multi_repartition_join_task_assignment.out +++ b/src/test/regress/expected/multi_repartition_join_task_assignment.out @@ -18,8 +18,8 @@ FROM WHERE o_custkey = c_custkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] @@ -32,9 +32,9 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 9 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task 4 to node localhost:xxxxx +DEBUG: assigned task 6 to node localhost:xxxxx +DEBUG: assigned task 2 to node localhost:xxxxx count ------- 2985 @@ -52,17 +52,17 @@ WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 1 to node localhost:57637 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 4 DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 8 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 +DEBUG: assigned task 4 to node localhost:xxxxx +DEBUG: assigned task 2 to node localhost:xxxxx count ------- 12000 @@ -77,11 +77,11 @@ FROM WHERE l_partkey = c_nationkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 1 to node localhost:57637 +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 1 to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -110,10 +110,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 16 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 9 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 6 to node localhost:xxxxx +DEBUG: assigned task 9 to node localhost:xxxxx +DEBUG: assigned task 12 to node localhost:xxxxx count ------- 125 diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out index 5a724afa5..56cc91c73 100644 --- a/src/test/regress/expected/multi_replicate_reference_table.out +++ b/src/test/regress/expected/multi_replicate_reference_table.out @@ -122,7 +122,7 @@ WHERE colocationid IN (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_valid" to the node localhost:57638 +NOTICE: Replicating reference table "replicate_reference_table_valid" to the node localhost:xxxxx ?column? ---------- 1 @@ -247,7 +247,7 @@ WHERE colocationid IN BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_rollback" to the node localhost:57638 +NOTICE: Replicating reference table "replicate_reference_table_rollback" to the node localhost:xxxxx ?column? ---------- 1 @@ -311,7 +311,7 @@ WHERE colocationid IN BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_commit" to the node localhost:57638 +NOTICE: Replicating reference table "replicate_reference_table_commit" to the node localhost:xxxxx ?column? ---------- 1 @@ -552,7 +552,7 @@ WHERE colocationid IN BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_drop" to the node localhost:57638 +NOTICE: Replicating reference table "replicate_reference_table_drop" to the node localhost:xxxxx ?column? ---------- 1 @@ -616,7 +616,7 @@ WHERE colocationid IN (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "table1" to the node localhost:57638 +NOTICE: Replicating reference table "table1" to the node localhost:xxxxx ?column? ---------- 1 @@ -679,9 +679,9 @@ ORDER BY shardid, nodeport; (0 rows) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table_1" to the node localhost:57638 -NOTICE: Replicating reference table "ref_table_2" to the node localhost:57638 -NOTICE: Replicating reference table "ref_table_3" to the node localhost:57638 +NOTICE: Replicating reference table "ref_table_1" to the node localhost:xxxxx +NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx +NOTICE: Replicating reference table "ref_table_3" to the node localhost:xxxxx ?column? ---------- 1 @@ -752,7 +752,7 @@ ORDER BY 1,4,5; -- we should see the two shard placements after activation SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "initially_not_replicated_reference_table" to the node localhost:57638 +NOTICE: Replicating reference table "initially_not_replicated_reference_table" to the node localhost:xxxxx ?column? ---------- 1 diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index 871ef8a68..f98cda2c9 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -2402,7 +2402,7 @@ GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; -- we will fail to connect to worker 2, since the user does not exist BEGIN; INSERT INTO failure_test VALUES (1, 1); -WARNING: connection error: localhost:57638 +WARNING: connection error: localhost:xxxxx DETAIL: FATAL: role "router_user" does not exist SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( @@ -2420,7 +2420,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement ROLLBACK; INSERT INTO failure_test VALUES (2, 1); -WARNING: connection error: localhost:57638 +WARNING: connection error: localhost:xxxxx DETAIL: FATAL: role "router_user" does not exist SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( diff --git a/src/test/regress/expected/multi_shard_update_delete.out b/src/test/regress/expected/multi_shard_update_delete.out index 7f6715cb1..96a21660f 100644 --- a/src/test/regress/expected/multi_shard_update_delete.out +++ b/src/test/regress/expected/multi_shard_update_delete.out @@ -615,7 +615,7 @@ UPDATE users_test_table as utt SET value_1 = 3 WHERE value_2 > (SELECT value_3 FROM events_test_table as ett WHERE utt.user_id = ett.user_id); ERROR: more than one row returned by a subquery used as an expression -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- We can not pushdown a query if the target relation is reference table UPDATE users_reference_copy_table SET value_2 = 5 diff --git a/src/test/regress/expected/multi_sql_function.out b/src/test/regress/expected/multi_sql_function.out index dc4c2547f..47966e7b0 100644 --- a/src/test/regress/expected/multi_sql_function.out +++ b/src/test/regress/expected/multi_sql_function.out @@ -351,7 +351,7 @@ $$ LANGUAGE SQL; SELECT insert_twice(); ERROR: duplicate key value violates unique constraint "table_with_unique_constraint_a_key_1230009" DETAIL: Key (a)=(4) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx SQL function "insert_twice" statement 2 SELECT * FROM table_with_unique_constraint ORDER BY a; a diff --git a/src/test/regress/expected/multi_task_assignment_policy.out b/src/test/regress/expected/multi_task_assignment_policy.out index 470860e0b..95d19dd14 100644 --- a/src/test/regress/expected/multi_task_assignment_policy.out +++ b/src/test/regress/expected/multi_task_assignment_policy.out @@ -70,9 +70,9 @@ SET client_min_messages TO DEBUG3; SET citus.task_assignment_policy TO 'greedy'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task 1 to node localhost:xxxxx +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 2 to node localhost:xxxxx QUERY PLAN ---------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -82,9 +82,9 @@ DEBUG: assigned task 2 to node localhost:57637 EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task 1 to node localhost:xxxxx +DEBUG: assigned task 3 to node localhost:xxxxx +DEBUG: assigned task 2 to node localhost:xxxxx QUERY PLAN ---------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -96,9 +96,9 @@ DEBUG: assigned task 2 to node localhost:57637 SET citus.task_assignment_policy TO 'first-replica'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 +DEBUG: assigned task 1 to node localhost:xxxxx +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 3 to node localhost:xxxxx QUERY PLAN ---------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -108,9 +108,9 @@ DEBUG: assigned task 3 to node localhost:57638 EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 +DEBUG: assigned task 1 to node localhost:xxxxx +DEBUG: assigned task 2 to node localhost:xxxxx +DEBUG: assigned task 3 to node localhost:xxxxx QUERY PLAN ---------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) diff --git a/src/test/regress/expected/multi_transactional_drop_shards.out b/src/test/regress/expected/multi_transactional_drop_shards.out index 307bfa70e..8016d3633 100644 --- a/src/test/regress/expected/multi_transactional_drop_shards.out +++ b/src/test/regress/expected/multi_transactional_drop_shards.out @@ -655,7 +655,7 @@ ORDER BY \c - - - :master_port -- try using the coordinator as a worker and then dropping the table SELECT 1 FROM master_add_node('localhost', :master_port); -NOTICE: Replicating reference table "transactional_drop_reference" to the node localhost:57636 +NOTICE: Replicating reference table "transactional_drop_reference" to the node localhost:xxxxx ?column? ---------- 1 diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out index 8e5350b42..70afe9dc7 100644 --- a/src/test/regress/expected/pg12.out +++ b/src/test/regress/expected/pg12.out @@ -286,7 +286,7 @@ NOTICE: Copying data from local table... INSERT INTO collection_users VALUES (1, 1000, 1); ERROR: insert or update on table "collection_users_60028" violates foreign key constraint "collection_users_fkey_60028" DETAIL: Key (key, collection_id)=(1, 1000) is not present in table "collections_list_60016". -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- whereas new record with partition should go through INSERT INTO collections_list VALUES (2, 1, '1.2'); INSERT INTO collection_users VALUES (5, 1, 2); diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index cd23afe6e..4286db521 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -184,7 +184,7 @@ SELECT create_reference_table('ref_table_2'); -- and add the other node SELECT 1 from master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table_2" to the node localhost:57638 +NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx ?column? ---------- 1 @@ -365,7 +365,7 @@ BEGIN; COMMIT; -- add the node back SELECT 1 from master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "t3" to the node localhost:57638 +NOTICE: Replicating reference table "t3" to the node localhost:xxxxx ?column? ---------- 1 diff --git a/src/test/regress/expected/set_operation_and_local_tables.out b/src/test/regress/expected/set_operation_and_local_tables.out index f5f5e0884..08bfe49a1 100644 --- a/src/test/regress/expected/set_operation_and_local_tables.out +++ b/src/test/regress/expected/set_operation_and_local_tables.out @@ -81,7 +81,7 @@ DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable ERROR: division by zero -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:xxxxx -- we should be able to run set operations with generate series and local tables as well ((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC; DEBUG: Local tables cannot be used in distributed queries. diff --git a/src/test/regress/expected/sql_procedure.out b/src/test/regress/expected/sql_procedure.out index 3da4f0386..cdec3e3b7 100644 --- a/src/test/regress/expected/sql_procedure.out +++ b/src/test/regress/expected/sql_procedure.out @@ -90,7 +90,7 @@ $$; CALL test_procedure_modify_insert(2,12); ERROR: duplicate key value violates unique constraint "idx_table_100503" DETAIL: Key (id, org_id)=(2, 12) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" PL/pgSQL function test_procedure_modify_insert(integer,integer) line 5 at SQL statement SELECT * FROM test_table ORDER BY 1, 2; @@ -110,7 +110,7 @@ $$; CALL test_procedure_modify_insert_commit(2,30); ERROR: duplicate key value violates unique constraint "idx_table_100503" DETAIL: Key (id, org_id)=(2, 30) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" PL/pgSQL function test_procedure_modify_insert_commit(integer,integer) line 5 at SQL statement SELECT * FROM test_table ORDER BY 1, 2; diff --git a/src/test/regress/expected/with_basics.out b/src/test/regress/expected/with_basics.out index 939689800..30bf7a169 100644 --- a/src/test/regress/expected/with_basics.out +++ b/src/test/regress/expected/with_basics.out @@ -100,7 +100,7 @@ WITH cte AS ( ) SELECT (SELECT * FROM cte); ERROR: more than one row returned by a subquery used as an expression -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:xxxxx WITH cte_basic AS ( SELECT user_id FROM users_table WHERE user_id = 1 )