From e54fba6552a9e68e543f1a156ccf9b53d3a805cd Mon Sep 17 00:00:00 2001 From: Halil Ozan Akgul Date: Mon, 16 Mar 2020 11:56:59 +0300 Subject: [PATCH] Adds changes to expected files --- .../regress/bin/test/expected/different.out | 10 +- src/test/regress/expected/add_coordinator.out | 10 +- .../regress/expected/aggregate_support.out | 12 +- .../expected/alter_role_propagation.out | 68 +-- src/test/regress/expected/base_enable_mx.out | 4 +- src/test/regress/expected/ch_bench_having.out | 18 +- .../regress/expected/ch_bench_having_mx.out | 18 +- .../expected/coordinator_shouldhaveshards.out | 8 +- src/test/regress/expected/cte_inline.out | 14 +- src/test/regress/expected/cte_inline_0.out | 8 +- .../expected/custom_aggregate_support.out | 64 +-- .../expected/custom_aggregate_support_1.out | 64 +-- .../expected/disable_object_propagation.out | 12 +- .../expected/distributed_collations.out | 16 +- .../distributed_collations_conflict.out | 10 +- .../expected/distributed_functions.out | 76 +-- .../distributed_functions_conflict.out | 4 +- .../distributed_intermediate_results.out | 52 +- .../expected/distributed_procedure.out | 44 +- .../regress/expected/distributed_types.out | 56 +-- .../expected/distributed_types_conflict.out | 4 +- .../distributed_types_xact_add_enum_value.out | 12 +- ...istributed_types_xact_add_enum_value_0.out | 12 +- .../expected/escape_extension_name.out | 16 +- .../expected/escape_extension_name_0.out | 16 +- .../expected/failure_1pc_copy_append.out | 70 +-- .../expected/failure_1pc_copy_hash.out | 54 +-- .../expected/failure_add_disable_node.out | 86 ++-- .../failure_connection_establishment.out | 6 +- .../regress/expected/failure_copy_on_hash.out | 22 +- .../expected/failure_copy_to_reference.out | 16 +- ...ure_create_distributed_table_non_empty.out | 86 ++-- .../failure_create_index_concurrently.out | 6 +- .../failure_create_reference_table.out | 18 +- .../regress/expected/failure_create_table.out | 108 ++--- .../regress/expected/failure_cte_subquery.out | 12 +- src/test/regress/expected/failure_ddl.out | 268 +++++------ .../expected/failure_distributed_results.out | 32 +- .../failure_insert_select_pushdown.out | 4 +- .../failure_insert_select_repartition.out | 16 +- .../failure_insert_select_via_coordinator.out | 12 +- .../regress/expected/failure_multi_dml.out | 18 +- .../expected/failure_multi_row_insert.out | 10 +- .../failure_multi_shard_update_delete.out | 34 +- .../expected/failure_mx_metadata_sync.out | 28 +- .../regress/expected/failure_ref_tables.out | 6 +- .../failure_replicated_partitions.out | 2 +- .../regress/expected/failure_savepoints.out | 48 +- src/test/regress/expected/failure_setup.out | 4 +- .../regress/expected/failure_single_mod.out | 6 +- .../expected/failure_single_select.out | 14 +- .../regress/expected/failure_truncate.out | 40 +- src/test/regress/expected/failure_vacuum.out | 6 +- .../regress/expected/failure_vacuum_1.out | 4 +- .../foreign_key_restriction_enforcement.out | 2 +- .../foreign_key_to_reference_table.out | 10 +- .../expected/grant_on_schema_propagation.out | 96 ++-- .../insert_select_connection_leak.out | 2 +- .../expected/insert_select_repartition.out | 44 +- .../expected/intermediate_result_pruning.out | 140 +++--- .../regress/expected/intermediate_results.out | 22 +- ...add_node_vs_reference_table_operations.out | 96 ++-- .../expected/isolation_add_remove_node.out | 126 ++--- .../isolation_alter_role_propagation.out | 36 +- .../expected/isolation_append_copy_vs_all.out | 44 +- .../isolation_citus_dist_activity.out | 28 +- .../expected/isolation_cluster_management.out | 4 +- ...ation_copy_placement_vs_copy_placement.out | 12 +- ...olation_copy_placement_vs_modification.out | 88 ++-- .../expected/isolation_copy_vs_all_on_mx.out | 10 +- .../isolation_create_restore_point.out | 4 +- ...lation_create_table_vs_add_remove_node.out | 102 ++-- .../regress/expected/isolation_ddl_vs_all.out | 156 +++--- .../expected/isolation_delete_vs_all.out | 44 +- .../isolation_dis2ref_foreign_keys_on_mx.out | 28 +- .../isolation_distributed_transaction_id.out | 4 +- .../expected/isolation_dml_vs_repair.out | 76 +-- ...op_alter_index_select_for_update_on_mx.out | 12 +- .../expected/isolation_drop_vs_all.out | 44 +- ...lation_ensure_dependency_activate_node.out | 450 +++++++++--------- .../expected/isolation_extension_commands.out | 184 +++---- .../isolation_get_all_active_transactions.out | 8 +- ...lation_get_distributed_wait_queries_mx.out | 64 +-- .../expected/isolation_hash_copy_vs_all.out | 44 +- .../isolation_insert_select_vs_all.out | 92 ++-- .../isolation_insert_select_vs_all_on_mx.out | 52 +- .../expected/isolation_insert_vs_all.out | 88 ++-- .../isolation_insert_vs_all_on_mx.out | 44 +- .../isolation_master_append_table.out | 4 +- .../expected/isolation_master_update_node.out | 12 +- .../isolation_master_update_node_0.out | 12 +- .../isolation_partitioned_copy_vs_all.out | 24 +- .../expected/isolation_range_copy_vs_all.out | 44 +- .../isolation_ref2ref_foreign_keys_on_mx.out | 144 +++--- ...ion_ref_select_for_update_vs_all_on_mx.out | 30 +- ..._ref_update_delete_upsert_vs_all_on_mx.out | 16 +- .../isolation_reference_copy_vs_all.out | 44 +- .../expected/isolation_reference_on_mx.out | 36 +- ...licate_reference_tables_to_coordinator.out | 12 +- .../expected/isolation_select_vs_all.out | 132 ++--- .../isolation_select_vs_all_on_mx.out | 24 +- .../expected/isolation_shouldhaveshards.out | 24 +- .../expected/isolation_truncate_vs_all.out | 44 +- .../isolation_truncate_vs_all_on_mx.out | 26 +- ...tion_update_delete_upsert_vs_all_on_mx.out | 16 +- .../expected/isolation_update_node.out | 50 +- .../isolation_update_node_lock_writes.out | 12 +- .../expected/isolation_update_vs_all.out | 44 +- .../expected/isolation_upsert_vs_all.out | 44 +- .../expected/local_shard_execution.out | 10 +- .../locally_execute_intermediate_results.out | 202 ++++---- .../regress/expected/materialized_view.out | 2 +- .../regress/expected/multi_703_upgrade.out | 6 +- .../multi_alter_table_add_constraints.out | 46 +- .../expected/multi_cache_invalidation.out | 16 +- .../regress/expected/multi_citus_tools.out | 150 +++--- .../expected/multi_cluster_management.out | 360 +++++++------- .../multi_colocated_shard_transfer.out | 184 +++---- .../expected/multi_colocation_utils.out | 216 ++++----- .../regress/expected/multi_create_table.out | 22 +- .../multi_create_table_constraints.out | 22 +- .../expected/multi_deparse_function.out | 208 ++++---- .../expected/multi_deparse_procedure.out | 152 +++--- .../expected/multi_distribution_metadata.out | 6 +- .../regress/expected/multi_drop_extension.out | 4 +- src/test/regress/expected/multi_explain.out | 92 ++-- src/test/regress/expected/multi_extension.out | 18 +- .../regress/expected/multi_follower_dml.out | 2 +- .../multi_follower_select_statements.out | 20 +- .../regress/expected/multi_foreign_key.out | 36 +- .../expected/multi_generate_ddl_commands.out | 20 +- .../expected/multi_having_pushdown.out | 12 +- .../expected/multi_index_statements.out | 2 +- .../regress/expected/multi_insert_select.out | 6 +- .../expected/multi_insert_select_conflict.out | 2 +- .../regress/expected/multi_limit_clause.out | 2 +- .../expected/multi_master_protocol.out | 6 +- .../expected/multi_metadata_attributes.out | 2 +- .../regress/expected/multi_metadata_sync.out | 270 +++++------ .../regress/expected/multi_modifications.out | 2 +- .../expected/multi_modifying_xacts.out | 162 +++---- src/test/regress/expected/multi_multiuser.out | 36 +- .../expected/multi_mx_add_coordinator.out | 16 +- src/test/regress/expected/multi_mx_call.out | 8 +- .../expected/multi_mx_create_table.out | 4 +- .../regress/expected/multi_mx_explain.out | 66 +-- .../multi_mx_function_call_delegation.out | 8 +- .../multi_mx_function_table_reference.out | 10 +- .../expected/multi_mx_hide_shard_names.out | 74 +-- .../multi_mx_insert_select_repartition.out | 2 +- .../regress/expected/multi_mx_metadata.out | 10 +- ...i_mx_modifications_to_reference_tables.out | 4 +- .../expected/multi_mx_modifying_xacts.out | 8 +- .../expected/multi_mx_node_metadata.out | 120 ++--- .../expected/multi_mx_partitioning.out | 8 +- .../regress/expected/multi_name_lengths.out | 22 +- .../multi_null_minmax_value_pruning.out | 4 +- .../expected/multi_orderby_limit_pushdown.out | 12 +- .../regress/expected/multi_partitioning.out | 24 +- .../expected/multi_partitioning_utils.out | 8 +- .../regress/expected/multi_prepare_sql.out | 6 +- .../expected/multi_read_from_secondaries.out | 12 +- .../expected/multi_real_time_transaction.out | 12 +- .../expected/multi_reference_table.out | 6 +- .../multi_remove_node_reference_table.out | 122 ++--- .../regress/expected/multi_repair_shards.out | 16 +- .../multi_repartition_join_planning.out | 102 ++-- ...multi_repartition_join_task_assignment.out | 38 +- .../multi_replicate_reference_table.out | 98 ++-- .../regress/expected/multi_router_planner.out | 28 +- .../regress/expected/multi_row_insert.out | 4 +- .../regress/expected/multi_schema_support.out | 28 +- .../expected/multi_select_distinct.out | 60 +-- .../expected/multi_shard_update_delete.out | 6 +- .../regress/expected/multi_sql_function.out | 4 +- .../multi_subquery_behavioral_analytics.out | 16 +- .../multi_subquery_window_functions.out | 2 +- src/test/regress/expected/multi_table_ddl.out | 4 +- .../expected/multi_task_assignment_policy.out | 24 +- .../regress/expected/multi_test_helpers.out | 6 +- .../expected/multi_transaction_recovery.out | 4 +- .../multi_transactional_drop_shards.out | 148 +++--- .../multi_unsupported_worker_operations.out | 34 +- .../multi_upgrade_reference_table.out | 6 +- src/test/regress/expected/multi_upsert.out | 4 +- src/test/regress/expected/multi_utilities.out | 24 +- .../expected/multi_utility_warnings.out | 2 +- src/test/regress/expected/multi_view.out | 10 +- src/test/regress/expected/pg12.out | 4 +- .../expected/propagate_extension_commands.out | 60 +-- .../regress/expected/remove_coordinator.out | 2 +- ...licate_reference_tables_to_coordinator.out | 4 +- .../expected/replicated_partitioned_table.out | 6 +- src/test/regress/expected/sql_procedure.out | 4 +- src/test/regress/expected/ssl_by_default.out | 16 +- src/test/regress/expected/subquery_basics.out | 10 +- .../expected/subquery_prepared_statements.out | 4 +- .../regress/expected/upgrade_basic_after.out | 4 +- .../upgrade_pg_dist_object_test_before.out | 16 +- .../upgrade_rebalance_strategy_before.out | 4 +- .../regress/expected/window_functions.out | 10 +- 201 files changed, 4194 insertions(+), 4194 deletions(-) diff --git a/src/test/regress/bin/test/expected/different.out b/src/test/regress/bin/test/expected/different.out index d764492f7..aa86cc305 100644 --- a/src/test/regress/bin/test/expected/different.out +++ b/src/test/regress/bin/test/expected/different.out @@ -2,15 +2,15 @@ +++ file_different.out.modified @@ -1,3 +1,2 @@ -This line is missing in file_different - Ports are replaced with xxxxx: localhost:2187 + Ports are replaced with xxxxx: :2187 This line is the same @@ -7,6 +6,8 @@ - Filler 2, localhost:1111 - Filler 3, localhost:111 + Filler 2, :1111 + Filler 3, :111 -This line is missing in file_different +This line has been inserted -+This line has also been inserted, localhost:10812 - Line below will be removed, localhost:2781 ++This line has also been inserted, :10812 + Line below will be removed, :2781 -This line will be changed ✓ +This line has been changed ✇ End of file diff --git a/src/test/regress/expected/add_coordinator.out b/src/test/regress/expected/add_coordinator.out index d1e5d7cd8..27d3ebe6b 100644 --- a/src/test/regress/expected/add_coordinator.out +++ b/src/test/regress/expected/add_coordinator.out @@ -1,20 +1,20 @@ -- -- ADD_COORDINATOR -- -SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset +SELECT master_add_node('', :master_port, groupid => 0) AS master_nodeid \gset -- adding the same node again should return the existing nodeid -SELECT master_add_node('localhost', :master_port, groupid => 0) = :master_nodeid; +SELECT master_add_node('', :master_port, groupid => 0) = :master_nodeid; ?column? --------------------------------------------------------------------- t (1 row) -- adding another node with groupid=0 should error out -SELECT master_add_node('localhost', 12345, groupid => 0) = :master_nodeid; +SELECT master_add_node('', 12345, groupid => 0) = :master_nodeid; ERROR: group 0 already has a primary node -- start_metadata_sync_to_node() for coordinator should raise a notice -SELECT start_metadata_sync_to_node('localhost', :master_port); -NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata +SELECT start_metadata_sync_to_node('', :master_port); +NOTICE: :xxxxx is the coordinator and already contains metadata, skipping syncing the metadata start_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index bf4ddb41d..4a6e9ebe3 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -220,7 +220,7 @@ create aggregate sumstring(text) ( ); select sumstring(valf::text) from aggdata where valf is not null; ERROR: function "aggregate_support.sumstring(text)" does not exist -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx select create_distributed_function('sumstring(text)'); create_distributed_function --------------------------------------------------------------------- @@ -264,8 +264,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a select run_command_on_workers($$create user notsuper$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) grant all on schema aggregate_support to notsuper; @@ -276,8 +276,8 @@ grant all on all tables in schema aggregate_support to notsuper; $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) + (,xxxxx,t,GRANT) + (,xxxxx,t,GRANT) (2 rows) set role notsuper; @@ -431,6 +431,6 @@ RESET citus.task_executor_type; select key, count(distinct aggdata) from aggdata group by key order by 1, 2; ERROR: type "aggregate_support.aggdata" does not exist -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx set client_min_messages to error; drop schema aggregate_support cascade; diff --git a/src/test/regress/expected/alter_role_propagation.out b/src/test/regress/expected/alter_role_propagation.out index 0d31dc733..2232da107 100644 --- a/src/test/regress/expected/alter_role_propagation.out +++ b/src/test/regress/expected/alter_role_propagation.out @@ -6,11 +6,11 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers($$CREATE ROLE alter_role_1 WITH LOGIN;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) --- postgres errors out +-- errors out ALTER ROLE alter_role_1 WITH SUPERUSER NOSUPERUSER; ERROR: conflicting or redundant options -- make sure that we propagate all options accurately @@ -24,8 +24,8 @@ SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlog SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)") - (localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)") + (,xxxxx,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)") + (,xxxxx,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)") (2 rows) -- make sure that we propagate all options accurately @@ -39,8 +39,8 @@ SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlog SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)") - (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)") + (,xxxxx,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)") + (,xxxxx,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)") (2 rows) -- make sure that non-existent users are handled properly @@ -59,8 +59,8 @@ SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER; SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,123) - (localhost,57638,t,123) + (,xxxxx,t,123) + (,xxxxx,t,123) (2 rows) -- make sure that SESSION_USER just works fine @@ -74,8 +74,8 @@ SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER; SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,124) - (localhost,57638,t,124) + (,xxxxx,t,124) + (,xxxxx,t,124) (2 rows) -- now lets test the passwords in more detail @@ -89,8 +89,8 @@ SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1'; SELECT run_command_on_workers($$SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,t) - (localhost,57638,t,t) + (,xxxxx,t,t) + (,xxxxx,t,t) (2 rows) ALTER ROLE alter_role_1 WITH PASSWORD 'test1'; @@ -103,8 +103,8 @@ SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,md52f9cc8d65e37edcc45c4a489bdfc699d) - (localhost,57638,t,md52f9cc8d65e37edcc45c4a489bdfc699d) + (,xxxxx,t,md52f9cc8d65e37edcc45c4a489bdfc699d) + (,xxxxx,t,md52f9cc8d65e37edcc45c4a489bdfc699d) (2 rows) ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'test2'; @@ -117,8 +117,8 @@ SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,md5e17f7818c5ec023fa87bdb97fd3e842e) - (localhost,57638,t,md5e17f7818c5ec023fa87bdb97fd3e842e) + (,xxxxx,t,md5e17f7818c5ec023fa87bdb97fd3e842e) + (,xxxxx,t,md5e17f7818c5ec023fa87bdb97fd3e842e) (2 rows) ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'md59cce240038b7b335c6aa9674a6f13e72'; @@ -131,8 +131,8 @@ SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,md59cce240038b7b335c6aa9674a6f13e72) - (localhost,57638,t,md59cce240038b7b335c6aa9674a6f13e72) + (,xxxxx,t,md59cce240038b7b335c6aa9674a6f13e72) + (,xxxxx,t,md59cce240038b7b335c6aa9674a6f13e72) (2 rows) -- edge case role names @@ -142,8 +142,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers($$CREATE ROLE "alter_role'1" WITH LOGIN;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) ALTER ROLE "alter_role'1" CREATEROLE; @@ -156,8 +156,8 @@ SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1'; SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,t) - (localhost,57638,t,t) + (,xxxxx,t,t) + (,xxxxx,t,t) (2 rows) CREATE ROLE "alter_role""1" WITH LOGIN; @@ -166,8 +166,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers($$CREATE ROLE "alter_role""1" WITH LOGIN;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) ALTER ROLE "alter_role""1" CREATEROLE; @@ -180,8 +180,8 @@ SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1'; SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,t) - (localhost,57638,t,t) + (,xxxxx,t,t) + (,xxxxx,t,t) (2 rows) -- add node @@ -195,11 +195,11 @@ SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlog SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)") - (localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)") + (,xxxxx,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)") + (,xxxxx,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)") (2 rows) -SELECT master_remove_node('localhost', :worker_1_port); +SELECT master_remove_node('', :worker_1_port); master_remove_node --------------------------------------------------------------------- @@ -215,10 +215,10 @@ SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlog SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") + (,xxxxx,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_1_port); +SELECT 1 FROM master_add_node('', :worker_1_port); ?column? --------------------------------------------------------------------- 1 @@ -233,8 +233,8 @@ SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlog SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") - (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") + (,xxxxx,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") + (,xxxxx,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") (2 rows) -- table belongs to a role diff --git a/src/test/regress/expected/base_enable_mx.out b/src/test/regress/expected/base_enable_mx.out index 403921e22..c9ed33062 100644 --- a/src/test/regress/expected/base_enable_mx.out +++ b/src/test/regress/expected/base_enable_mx.out @@ -1,13 +1,13 @@ -- -- Setup MX data syncing -- -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('', :worker_2_port); start_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/ch_bench_having.out b/src/test/regress/expected/ch_bench_having.out index e266e37e8..58473767f 100644 --- a/src/test/regress/expected/ch_bench_having.out +++ b/src/test/regress/expected/ch_bench_having.out @@ -35,7 +35,7 @@ order by s_i_id; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on stock_1640000 stock -> Distributed Subplan XXX_2 @@ -44,13 +44,13 @@ order by s_i_id; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: stock.s_i_id InitPlan 1 (returns $0) @@ -81,13 +81,13 @@ order by s_i_id; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: stock.s_i_id -> Seq Scan on stock_1640000 stock @@ -112,13 +112,13 @@ having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from st Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: stock.s_i_id -> Seq Scan on stock_1640000 stock @@ -142,7 +142,7 @@ order by s_i_id; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate (cost=40.60..42.60 rows=200 width=12) Group Key: s.s_i_id -> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8) @@ -163,7 +163,7 @@ having (select true); Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate (cost=40.60..42.60 rows=200 width=12) Group Key: s.s_i_id -> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8) diff --git a/src/test/regress/expected/ch_bench_having_mx.out b/src/test/regress/expected/ch_bench_having_mx.out index 85b109ddc..659911758 100644 --- a/src/test/regress/expected/ch_bench_having_mx.out +++ b/src/test/regress/expected/ch_bench_having_mx.out @@ -40,7 +40,7 @@ order by s_i_id; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on stock_1640000 stock -> Distributed Subplan XXX_2 @@ -49,13 +49,13 @@ order by s_i_id; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: stock.s_i_id InitPlan 1 (returns $0) @@ -86,13 +86,13 @@ order by s_i_id; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: stock.s_i_id -> Seq Scan on stock_1640000 stock @@ -117,13 +117,13 @@ having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from st Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on stock_1640000 stock Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: stock.s_i_id -> Seq Scan on stock_1640000 stock @@ -147,7 +147,7 @@ order by s_i_id; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate (cost=40.60..42.60 rows=200 width=12) Group Key: s.s_i_id -> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8) @@ -168,7 +168,7 @@ having (select true); Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate (cost=40.60..42.60 rows=200 width=12) Group Key: s.s_i_id -> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8) diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out index 54571f077..ce800809b 100644 --- a/src/test/regress/expected/coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/coordinator_shouldhaveshards.out @@ -4,14 +4,14 @@ SET search_path TO coordinator_shouldhaveshards; SET citus.next_shard_id TO 1503000; -- idempotently add node to allow this test to run without add_coordinator SET client_min_messages TO WARNING; -SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); +SELECT 1 FROM master_add_node('', :master_port, groupid => 0); ?column? --------------------------------------------------------------------- 1 (1 row) RESET client_min_messages; -SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); +SELECT 1 FROM master_set_node_property('', :master_port, 'shouldhaveshards', true); ?column? --------------------------------------------------------------------- 1 @@ -47,7 +47,7 @@ NOTICE: executing the command locally: SELECT y FROM coordinator_shouldhaveshar 1 (1 row) --- multi-shard queries connect to localhost +-- multi-shard queries connect to SELECT count(*) FROM test; count --------------------------------------------------------------------- @@ -123,7 +123,7 @@ END; DELETE FROM test; DROP TABLE test; DROP SCHEMA coordinator_shouldhaveshards CASCADE; -SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', false); +SELECT 1 FROM master_set_node_property('', :master_port, 'shouldhaveshards', false); ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/expected/cte_inline.out b/src/test/regress/expected/cte_inline.out index 7c23a3701..4d902fc60 100644 --- a/src/test/regress/expected/cte_inline.out +++ b/src/test/regress/expected/cte_inline.out @@ -375,7 +375,7 @@ DETAIL: distribution column value: 1 Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on test_table_1960000 test_table Filter: (key = 1) @@ -402,12 +402,12 @@ DEBUG: Plan is router executable Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Seq Scan on test_table_1960000 test_table Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Function Scan on read_intermediate_result intermediate_result Filter: (key = 1) @@ -484,12 +484,12 @@ DEBUG: Plan is router executable Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Seq Scan on test_table_1960000 test_table Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Merge Join Merge Cond: (intermediate_result.key = intermediate_result_1.key) @@ -530,7 +530,7 @@ DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823] Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Hash Join Hash Cond: (test_table.key = test_table_1.key) @@ -833,7 +833,7 @@ DEBUG: Router planner cannot handle multi-shard select queries 1021 (1 row) --- the CTEs are very simple, so postgres +-- the CTEs are very simple, so -- can pull-up the subqueries after inlining -- the CTEs, and the query that we send to workers -- becomes a join between two tables diff --git a/src/test/regress/expected/cte_inline_0.out b/src/test/regress/expected/cte_inline_0.out index 75fea39fb..63227898f 100644 --- a/src/test/regress/expected/cte_inline_0.out +++ b/src/test/regress/expected/cte_inline_0.out @@ -342,7 +342,7 @@ DEBUG: Router planner cannot handle multi-shard select queries Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on test_table_1960000 test_table Filter: (key = 1) @@ -409,12 +409,12 @@ DEBUG: Plan is router executable Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Seq Scan on test_table_1960000 test_table Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Merge Join Merge Cond: (intermediate_result.key = intermediate_result_1.key) @@ -723,7 +723,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c DEBUG: Router planner cannot handle multi-shard select queries ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join --- the CTEs are very simple, so postgres +-- the CTEs are very simple, so -- can pull-up the subqueries after inlining -- the CTEs, and the query that we send to workers -- becomes a join between two tables diff --git a/src/test/regress/expected/custom_aggregate_support.out b/src/test/regress/expected/custom_aggregate_support.out index f3816363d..69dc0fa05 100644 --- a/src/test/regress/expected/custom_aggregate_support.out +++ b/src/test/regress/expected/custom_aggregate_support.out @@ -115,22 +115,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -149,22 +149,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -184,22 +184,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -218,22 +218,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -253,22 +253,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -287,22 +287,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -322,22 +322,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -357,25 +357,25 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) diff --git a/src/test/regress/expected/custom_aggregate_support_1.out b/src/test/regress/expected/custom_aggregate_support_1.out index 21dedfaaa..12fd0bfe1 100644 --- a/src/test/regress/expected/custom_aggregate_support_1.out +++ b/src/test/regress/expected/custom_aggregate_support_1.out @@ -117,22 +117,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -155,22 +155,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -192,22 +192,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -230,22 +230,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -267,22 +267,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -305,22 +305,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -342,22 +342,22 @@ GROUP BY(1); Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques @@ -382,7 +382,7 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> GroupAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) @@ -390,7 +390,7 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; Sort Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> GroupAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) @@ -398,7 +398,7 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; Sort Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> GroupAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) @@ -406,7 +406,7 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; Sort Key: day -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> GroupAggregate Group Key: day Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision) diff --git a/src/test/regress/expected/disable_object_propagation.out b/src/test/regress/expected/disable_object_propagation.out index 78247223c..456f1b267 100644 --- a/src/test/regress/expected/disable_object_propagation.out +++ b/src/test/regress/expected/disable_object_propagation.out @@ -19,7 +19,7 @@ CREATE TYPE tt1 AS (a int , b int); CREATE TABLE t2 (a int PRIMARY KEY, b tt1); SELECT create_distributed_table('t2', 'a'); ERROR: type "disabled_object_propagation.tt1" does not exist -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT 1 FROM run_command_on_workers($$ BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; @@ -43,7 +43,7 @@ CREATE TYPE tt2 AS ENUM ('a', 'b'); CREATE TABLE t3 (a int PRIMARY KEY, b tt2); SELECT create_distributed_table('t3', 'a'); ERROR: type "disabled_object_propagation.tt2" does not exist -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT 1 FROM run_command_on_workers($$ BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; @@ -99,8 +99,8 @@ SELECT row(nspname, typname, usename) $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(disabled_object_propagation,tt3,postgres)") - (localhost,57638,t,"(disabled_object_propagation,tt3,postgres)") + (,xxxxx,t,"(disabled_object_propagation,tt3,)") + (,xxxxx,t,"(disabled_object_propagation,tt3,)") (2 rows) SELECT run_command_on_workers($$ @@ -114,8 +114,8 @@ GROUP BY pg_type.typname; $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(tt3,""a int4, b int4"")") - (localhost,57638,t,"(tt3,""a int4, b int4"")") + (,xxxxx,t,"(tt3,""a int4, b int4"")") + (,xxxxx,t,"(tt3,""a int4, b int4"")") (2 rows) -- suppress any warnings during cleanup diff --git a/src/test/regress/expected/distributed_collations.out b/src/test/regress/expected/distributed_collations.out index 521147e15..3730e59cb 100644 --- a/src/test/regress/expected/distributed_collations.out +++ b/src/test/regress/expected/distributed_collations.out @@ -5,8 +5,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers($$CREATE USER collationuser;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) CREATE SCHEMA collation_tests AUTHORIZATION collationuser; @@ -25,7 +25,7 @@ WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; collname | nspname | rolname --------------------------------------------------------------------- - german_phonebook | collation_tests | postgres + german_phonebook | collation_tests | (1 row) \c - - - :master_port @@ -90,8 +90,8 @@ WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; collname | nspname | rolname --------------------------------------------------------------------- - german_phonebook | collation_tests | postgres - german_phonebook_unpropagated | collation_tests | postgres + german_phonebook | collation_tests | + german_phonebook_unpropagated | collation_tests | (2 rows) \c - - - :master_port @@ -108,7 +108,7 @@ ORDER BY 1,2,3; collname | nspname | rolname --------------------------------------------------------------------- german_phonebook2 | collation_tests2 | collationuser - german_phonebook_unpropagated | collation_tests | postgres + german_phonebook_unpropagated | collation_tests | (2 rows) \c - - - :master_port @@ -129,7 +129,7 @@ DROP USER collationuser; SELECT run_command_on_workers($$DROP USER collationuser;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") + (,xxxxx,t,"DROP ROLE") + (,xxxxx,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_collations_conflict.out b/src/test/regress/expected/distributed_collations_conflict.out index 8643ae290..c50581be5 100644 --- a/src/test/regress/expected/distributed_collations_conflict.out +++ b/src/test/regress/expected/distributed_collations_conflict.out @@ -2,8 +2,8 @@ CREATE SCHEMA collation_conflict; SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") (2 rows) \c - - - :worker_1_port @@ -34,7 +34,7 @@ WHERE collname like 'caseinsensitive%' ORDER BY 1,2,3; collname | nspname | rolname --------------------------------------------------------------------- - caseinsensitive | collation_conflict | postgres + caseinsensitive | collation_conflict | (1 row) \c - - - :master_port @@ -72,8 +72,8 @@ WHERE collname like 'caseinsensitive%' ORDER BY 1,2,3; collname | nspname | rolname --------------------------------------------------------------------- - caseinsensitive | collation_conflict | postgres - caseinsensitive(citus_backup_0) | collation_conflict | postgres + caseinsensitive | collation_conflict | + caseinsensitive(citus_backup_0) | collation_conflict | (2 rows) \c - - - :master_port diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index 5e25f0a4f..e8972674f 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -5,8 +5,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers($$CREATE USER functionuser;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) CREATE SCHEMA function_tests AUTHORIZATION functionuser; @@ -24,7 +24,7 @@ CREATE FUNCTION eq8(macaddr8, macaddr8) RETURNS bool LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; --- $function$ is what postgres escapes functions with when deparsing +-- $function$ is what escapes functions with when deparsing -- make sure $function$ doesn't cause invalid syntax CREATE FUNCTION add_text(text, text) RETURNS text AS 'select $function$test$function$ || $1::int || $2::int;' @@ -97,7 +97,7 @@ CREATE AGGREGATE sum2(int) ( minitcond = '1', sortop = ">" ); --- Test VARIADIC, example taken from postgres test suite +-- Test VARIADIC, example taken from test suite CREATE AGGREGATE my_rank(VARIADIC "any" ORDER BY VARIADIC "any") ( stype = internal, sfunc = ordered_set_transition_multi, @@ -172,8 +172,8 @@ WHERE objid = 'eq_mi''xed_param_names(macaddr, macaddr)'::regprocedure; SELECT * FROM run_command_on_workers($$SELECT function_tests."eq_mi'xed_param_names"('0123456789ab','ba9876543210');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | f - localhost | 57638 | t | f + | xxxxx | t | f + | xxxxx | t | f (2 rows) -- make sure that none of the active and primary nodes hasmetadata @@ -210,8 +210,8 @@ SELECT create_distributed_function('dup(macaddr)', '$1', colocate_with := 'strea SELECT * FROM run_command_on_workers($$SELECT function_tests.dup('0123456789ab');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | (01:23:45:67:89:ab,"01:23:45:67:89:ab is text") - localhost | 57638 | t | (01:23:45:67:89:ab,"01:23:45:67:89:ab is text") + | xxxxx | t | (01:23:45:67:89:ab,"01:23:45:67:89:ab is text") + | xxxxx | t | (01:23:45:67:89:ab,"01:23:45:67:89:ab is text") (2 rows) SELECT create_distributed_function('eq(macaddr,macaddr)', '$1', colocate_with := 'streaming_table'); @@ -223,8 +223,8 @@ SELECT create_distributed_function('eq(macaddr,macaddr)', '$1', colocate_with := SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('012345689ab','0123456789ab');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | f - localhost | 57638 | t | f + | xxxxx | t | f + | xxxxx | t | f (2 rows) SELECT public.verify_function_is_same_on_workers('function_tests.eq(macaddr,macaddr)'); @@ -366,15 +366,15 @@ SELECT public.verify_function_is_same_on_workers('function_tests.eq2(macaddr,mac SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('012346789ab','012345689ab');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist - localhost | 57638 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist + | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist + | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist (2 rows) SELECT * FROM run_command_on_workers($$SELECT function_tests.eq2('012345689ab','012345689ab');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | t - localhost | 57638 | t | t + | xxxxx | t | t + | xxxxx | t | t (2 rows) ALTER ROUTINE eq2(macaddr,macaddr) RENAME TO eq; @@ -382,8 +382,8 @@ ALTER AGGREGATE sum2(int) RENAME TO sum27; SELECT * FROM run_command_on_workers($$SELECT 1 from pg_proc where proname = 'sum27';$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 1 - localhost | 57638 | t | 1 + | xxxxx | t | 1 + | xxxxx | t | 1 (2 rows) ALTER AGGREGATE sum27(int) RENAME TO sum2; @@ -407,8 +407,8 @@ WHERE proname IN ('eq', 'sum2', 'my_rank'); $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"{""(functionuser,function_tests,eq)"",""(functionuser,function_tests,my_rank)"",""(functionuser,function_tests,sum2)""}") - (localhost,57638,t,"{""(functionuser,function_tests,eq)"",""(functionuser,function_tests,my_rank)"",""(functionuser,function_tests,sum2)""}") + (,xxxxx,t,"{""(functionuser,function_tests,eq)"",""(functionuser,function_tests,my_rank)"",""(functionuser,function_tests,sum2)""}") + (,xxxxx,t,"{""(functionuser,function_tests,eq)"",""(functionuser,function_tests,my_rank)"",""(functionuser,function_tests,sum2)""}") (2 rows) -- change the schema of the function and verify the old schema doesn't exist anymore while @@ -423,15 +423,15 @@ SELECT public.verify_function_is_same_on_workers('function_tests2.eq(macaddr,mac SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('0123456789ab','ba9876543210');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist - localhost | 57638 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist + | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist + | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist (2 rows) SELECT * FROM run_command_on_workers($$SELECT function_tests2.eq('012345689ab','ba9876543210');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | f - localhost | 57638 | t | f + | xxxxx | t | f + | xxxxx | t | f (2 rows) ALTER ROUTINE function_tests2.eq(macaddr,macaddr) SET SCHEMA function_tests; @@ -451,8 +451,8 @@ SELECT public.verify_function_is_same_on_workers('function_tests.eq(macaddr,maca SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('012345689ab','012345689ab');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | f - localhost | 57638 | t | f + | xxxxx | t | f + | xxxxx | t | f (2 rows) -- distributed functions should not be allowed to depend on an extension, also functions @@ -468,8 +468,8 @@ DROP FUNCTION eq(macaddr,macaddr); SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('0123456789ab','ba9876543210');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist - localhost | 57638 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist + | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist + | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist (2 rows) -- Test DROP for ROUTINE @@ -489,8 +489,8 @@ DROP ROUTINE eq(macaddr, macaddr); SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('0123456789ab','ba9876543210');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist - localhost | 57638 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist + | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist + | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist (2 rows) DROP AGGREGATE function_tests2.sum2(int); @@ -498,11 +498,11 @@ DROP AGGREGATE function_tests2.sum2(int); SELECT * FROM run_command_on_workers('SELECT function_tests2.sum2(id) FROM (select 1 id, 2) subq;') ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | f | ERROR: function function_tests2.sum2(integer) does not exist - localhost | 57638 | f | ERROR: function function_tests2.sum2(integer) does not exist + | xxxxx | f | ERROR: function function_tests2.sum2(integer) does not exist + | xxxxx | f | ERROR: function function_tests2.sum2(integer) does not exist (2 rows) --- postgres doesn't accept parameter names in the regprocedure input +-- doesn't accept parameter names in the regprocedure input SELECT create_distributed_function('eq_with_param_names(val1 macaddr, macaddr)', 'val1'); ERROR: invalid type name "val1 macaddr" -- invalid distribution_arg_name @@ -557,8 +557,8 @@ ROLLBACK; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='eq_with_param_names';$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) + (,xxxxx,t,0) + (,xxxxx,t,0) (2 rows) -- make sure that none of the active and primary nodes hasmetadata @@ -586,8 +586,8 @@ select bool_and(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'p SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='eq_with_param_names';$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1) - (localhost,57638,t,1) + (,xxxxx,t,1) + (,xxxxx,t,1) (2 rows) -- valid distribution with distribution_arg_name -- case insensitive @@ -703,7 +703,7 @@ WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass -- function with a macaddr8 dist. arg can be colocated with macaddr -- column of a distributed table. In general, if there is a coercion --- path, we rely on postgres for implicit coersions, and users for explicit coersions +-- path, we rely on for implicit coersions, and users for explicit coersions -- to coerce the values SELECT create_distributed_function('eq8(macaddr8, macaddr8)', '$1', colocate_with:='replicated_table_func_test_4'); create_distributed_function @@ -837,7 +837,7 @@ DROP USER functionuser; SELECT run_command_on_workers($$DROP USER functionuser$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") + (,xxxxx,t,"DROP ROLE") + (,xxxxx,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_functions_conflict.out b/src/test/regress/expected/distributed_functions_conflict.out index 995668e64..39d5634b1 100644 --- a/src/test/regress/expected/distributed_functions_conflict.out +++ b/src/test/regress/expected/distributed_functions_conflict.out @@ -4,8 +4,8 @@ CREATE SCHEMA proc_conflict; SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") (2 rows) \c - - - :worker_1_port diff --git a/src/test/regress/expected/distributed_intermediate_results.out b/src/test/regress/expected/distributed_intermediate_results.out index 0582c36ab..5d6891582 100644 --- a/src/test/regress/expected/distributed_intermediate_results.out +++ b/src/test/regress/expected/distributed_intermediate_results.out @@ -63,19 +63,19 @@ CREATE TABLE distributed_result_info AS SELECT * FROM distributed_result_info ORDER BY resultId; resultid | nodeport | rowcount | targetshardid | targetshardindex --------------------------------------------------------------------- - test_from_4213581_to_0 | 57637 | 33 | 4213584 | 0 - test_from_4213582_to_0 | 57638 | 16 | 4213584 | 0 - test_from_4213582_to_1 | 57638 | 15 | 4213585 | 1 - test_from_4213583_to_1 | 57637 | 36 | 4213585 | 1 + test_from_4213581_to_0 | xxxxx | 33 | 4213584 | 0 + test_from_4213582_to_0 | xxxxx | 16 | 4213584 | 0 + test_from_4213582_to_1 | xxxxx | 15 | 4213585 | 1 + test_from_4213583_to_1 | xxxxx | 36 | 4213585 | 1 (4 rows) -- fetch from workers -SELECT nodeport, fetch_intermediate_results((array_agg(resultId)), 'localhost', nodeport) > 0 AS fetched +SELECT nodeport, fetch_intermediate_results((array_agg(resultId)), '', nodeport) > 0 AS fetched FROM distributed_result_info GROUP BY nodeport ORDER BY nodeport; nodeport | fetched --------------------------------------------------------------------- - 57637 | t - 57638 | t + xxxxx | t + xxxxx | t (2 rows) -- read all fetched result files @@ -169,31 +169,31 @@ CREATE TABLE distributed_result_info AS SELECT * FROM distributed_result_info ORDER BY resultId; resultid | nodeport | rowcount | targetshardid | targetshardindex --------------------------------------------------------------------- - test_from_4213588_to_0 | 57638 | 7 | 4213592 | 0 - test_from_4213588_to_1 | 57638 | 6 | 4213593 | 1 - test_from_4213588_to_2 | 57638 | 7 | 4213594 | 2 - test_from_4213588_to_3 | 57638 | 4 | 4213595 | 3 - test_from_4213589_to_0 | 57637 | 7 | 4213592 | 0 - test_from_4213589_to_1 | 57637 | 6 | 4213593 | 1 - test_from_4213589_to_2 | 57637 | 8 | 4213594 | 2 - test_from_4213589_to_3 | 57637 | 4 | 4213595 | 3 - test_from_4213590_to_0 | 57638 | 8 | 4213592 | 0 - test_from_4213590_to_1 | 57638 | 6 | 4213593 | 1 - test_from_4213590_to_2 | 57638 | 8 | 4213594 | 2 - test_from_4213590_to_3 | 57638 | 4 | 4213595 | 3 - test_from_4213591_to_0 | 57637 | 8 | 4213592 | 0 - test_from_4213591_to_1 | 57637 | 6 | 4213593 | 1 - test_from_4213591_to_2 | 57637 | 7 | 4213594 | 2 - test_from_4213591_to_3 | 57637 | 4 | 4213595 | 3 + test_from_4213588_to_0 | xxxxx | 7 | 4213592 | 0 + test_from_4213588_to_1 | xxxxx | 6 | 4213593 | 1 + test_from_4213588_to_2 | xxxxx | 7 | 4213594 | 2 + test_from_4213588_to_3 | xxxxx | 4 | 4213595 | 3 + test_from_4213589_to_0 | xxxxx | 7 | 4213592 | 0 + test_from_4213589_to_1 | xxxxx | 6 | 4213593 | 1 + test_from_4213589_to_2 | xxxxx | 8 | 4213594 | 2 + test_from_4213589_to_3 | xxxxx | 4 | 4213595 | 3 + test_from_4213590_to_0 | xxxxx | 8 | 4213592 | 0 + test_from_4213590_to_1 | xxxxx | 6 | 4213593 | 1 + test_from_4213590_to_2 | xxxxx | 8 | 4213594 | 2 + test_from_4213590_to_3 | xxxxx | 4 | 4213595 | 3 + test_from_4213591_to_0 | xxxxx | 8 | 4213592 | 0 + test_from_4213591_to_1 | xxxxx | 6 | 4213593 | 1 + test_from_4213591_to_2 | xxxxx | 7 | 4213594 | 2 + test_from_4213591_to_3 | xxxxx | 4 | 4213595 | 3 (16 rows) -- fetch from workers -SELECT nodeport, fetch_intermediate_results((array_agg(resultId)), 'localhost', nodeport) > 0 AS fetched +SELECT nodeport, fetch_intermediate_results((array_agg(resultId)), '', nodeport) > 0 AS fetched FROM distributed_result_info GROUP BY nodeport ORDER BY nodeport; nodeport | fetched --------------------------------------------------------------------- - 57637 | t - 57638 | t + xxxxx | t + xxxxx | t (2 rows) -- Read all fetched result files. Sum(x) should be 4550, verified by diff --git a/src/test/regress/expected/distributed_procedure.out b/src/test/regress/expected/distributed_procedure.out index d819c4294..addeff6b3 100644 --- a/src/test/regress/expected/distributed_procedure.out +++ b/src/test/regress/expected/distributed_procedure.out @@ -5,8 +5,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers($$CREATE USER procedureuser;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) CREATE SCHEMA procedure_tests AUTHORIZATION procedureuser; @@ -58,8 +58,8 @@ SELECT wait_until_metadata_sync(); SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | CALL - localhost | 57638 | t | CALL + | xxxxx | t | CALL + | xxxxx | t | CALL (2 rows) SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); @@ -125,15 +125,15 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info2(te SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist - localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist + | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist + | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info2('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | CALL - localhost | 57638 | t | CALL + | xxxxx | t | CALL + | xxxxx | t | CALL (2 rows) ALTER PROCEDURE raise_info2(text) RENAME TO raise_info; @@ -154,8 +154,8 @@ WHERE proname = 'raise_info'; $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(procedureuser,procedure_tests,raise_info)") - (localhost,57638,t,"(procedureuser,procedure_tests,raise_info)") + (,xxxxx,t,"(procedureuser,procedure_tests,raise_info)") + (,xxxxx,t,"(procedureuser,procedure_tests,raise_info)") (2 rows) -- change the schema of the procedure and verify the old schema doesn't exist anymore while @@ -170,15 +170,15 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests2.raise_info(te SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist - localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist + | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist + | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) SELECT * FROM run_command_on_workers($$CALL procedure_tests2.raise_info('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | CALL - localhost | 57638 | t | CALL + | xxxxx | t | CALL + | xxxxx | t | CALL (2 rows) ALTER PROCEDURE procedure_tests2.raise_info(text) SET SCHEMA procedure_tests; @@ -187,8 +187,8 @@ DROP PROCEDURE raise_info(text); SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist - localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist + | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist + | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) SET client_min_messages TO error; -- suppress cascading objects dropping @@ -196,23 +196,23 @@ DROP SCHEMA procedure_tests CASCADE; SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") (2 rows) DROP SCHEMA procedure_tests2 CASCADE; SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") (2 rows) DROP USER procedureuser; SELECT run_command_on_workers($$DROP USER procedureuser;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") + (,xxxxx,t,"DROP ROLE") + (,xxxxx,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_types.out b/src/test/regress/expected/distributed_types.out index 33957dfb4..e0a5cda1f 100644 --- a/src/test/regress/expected/distributed_types.out +++ b/src/test/regress/expected/distributed_types.out @@ -5,8 +5,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers($$CREATE USER typeuser;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) CREATE SCHEMA type_tests AUTHORIZATION typeuser; @@ -110,8 +110,8 @@ SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"yes,no") - (localhost,57638,t,"yes,no") + (,xxxxx,t,"yes,no") + (,xxxxx,t,"yes,no") (2 rows) -- test some combination of types without ddl propagation, this will prevent the workers @@ -160,8 +160,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'te4' and typowner SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(te4,typeuser)") - (localhost,57638,t,"(te4,typeuser)") + (,xxxxx,t,"(te4,typeuser)") + (,xxxxx,t,"(te4,typeuser)") (2 rows) ALTER TYPE tc6 OWNER TO typeuser; @@ -174,8 +174,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'tc6' and typowner SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(tc6,typeuser)") - (localhost,57638,t,"(tc6,typeuser)") + (,xxxxx,t,"(tc6,typeuser)") + (,xxxxx,t,"(tc6,typeuser)") (2 rows) -- create a type as a different user @@ -206,8 +206,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'tc7' and typowner SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(tc7,typeuser)") - (localhost,57638,t,"(tc7,typeuser)") + (,xxxxx,t,"(tc7,typeuser)") + (,xxxxx,t,"(tc7,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid; @@ -219,8 +219,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'te5' and typowner SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(te5,typeuser)") - (localhost,57638,t,"(te5,typeuser)") + (,xxxxx,t,"(te5,typeuser)") + (,xxxxx,t,"(te5,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid; @@ -232,8 +232,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'tc8' and typowner SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(tc8,typeuser)") - (localhost,57638,t,"(tc8,typeuser)") + (,xxxxx,t,"(tc8,typeuser)") + (,xxxxx,t,"(tc8,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid; @@ -245,8 +245,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'te6' and typowner SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"(te6,typeuser)") - (localhost,57638,t,"(te6,typeuser)") + (,xxxxx,t,"(te6,typeuser)") + (,xxxxx,t,"(te6,typeuser)") (2 rows) -- deleting the enum cascade will remove the type from the table and the workers @@ -264,8 +264,8 @@ SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') SELECT run_command_on_workers($$SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"") - (localhost,57638,t,"") + (,xxxxx,t,"") + (,xxxxx,t,"") (2 rows) -- make sure attribute names are quoted correctly, no errors indicates types are propagated correctly @@ -338,8 +338,8 @@ SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'f SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) + (,xxxxx,t,0) + (,xxxxx,t,0) (2 rows) -- verify they are still distributed when required @@ -359,8 +359,8 @@ SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'f SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,2) - (localhost,57638,t,2) + (,xxxxx,t,2) + (,xxxxx,t,2) (2 rows) RESET citus.enable_create_type_propagation; @@ -370,23 +370,23 @@ DROP SCHEMA type_tests CASCADE; SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") (2 rows) DROP SCHEMA type_tests2 CASCADE; SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") (2 rows) DROP USER typeuser; SELECT run_command_on_workers($$DROP USER typeuser;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") + (,xxxxx,t,"DROP ROLE") + (,xxxxx,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_types_conflict.out b/src/test/regress/expected/distributed_types_conflict.out index 077b9c6a2..68e7e4a21 100644 --- a/src/test/regress/expected/distributed_types_conflict.out +++ b/src/test/regress/expected/distributed_types_conflict.out @@ -3,8 +3,8 @@ CREATE SCHEMA type_conflict; SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") (2 rows) -- create a type on a worker that should not cause data loss once overwritten with a type diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value.out b/src/test/regress/expected/distributed_types_xact_add_enum_value.out index fbfee78ac..fdded2522 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value.out @@ -40,8 +40,8 @@ SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"yes,no") - (localhost,57638,t,"yes,no") + (,xxxxx,t,"yes,no") + (,xxxxx,t,"yes,no") (2 rows) BEGIN; @@ -57,8 +57,8 @@ SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"yes,no,maybe") - (localhost,57638,t,"yes,no,maybe") + (,xxxxx,t,"yes,no,maybe") + (,xxxxx,t,"yes,no,maybe") (2 rows) -- clear objects @@ -67,7 +67,7 @@ DROP SCHEMA xact_enum_type CASCADE; SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") (2 rows) diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out b/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out index 398c616c5..2be4253b8 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out @@ -41,8 +41,8 @@ SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"yes,no") - (localhost,57638,t,"yes,no") + (,xxxxx,t,"yes,no") + (,xxxxx,t,"yes,no") (2 rows) BEGIN; @@ -59,8 +59,8 @@ SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"yes,no") - (localhost,57638,t,"yes,no") + (,xxxxx,t,"yes,no") + (,xxxxx,t,"yes,no") (2 rows) -- clear objects @@ -69,7 +69,7 @@ DROP SCHEMA xact_enum_type CASCADE; SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") (2 rows) diff --git a/src/test/regress/expected/escape_extension_name.out b/src/test/regress/expected/escape_extension_name.out index 45ca2a9a0..a6de6288f 100644 --- a/src/test/regress/expected/escape_extension_name.out +++ b/src/test/regress/expected/escape_extension_name.out @@ -16,8 +16,8 @@ WHERE name = 'uuid-ossp' SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1) - (localhost,57638,t,1) + (,xxxxx,t,1) + (,xxxxx,t,1) (2 rows) SET client_min_messages TO WARNING; @@ -27,12 +27,12 @@ RESET client_min_messages; SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) + (,xxxxx,t,0) + (,xxxxx,t,0) (2 rows) -- show that extension recreation on new nodes works also fine with extension names that require escaping -SELECT 1 from master_remove_node('localhost', :worker_2_port); +SELECT 1 from master_remove_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -49,7 +49,7 @@ WHERE name = 'uuid-ossp' \gset :uuid_present_command; -- and add the other node -SELECT 1 from master_add_node('localhost', :worker_2_port); +SELECT 1 from master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -59,8 +59,8 @@ SELECT 1 from master_add_node('localhost', :worker_2_port); SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1) - (localhost,57638,t,1) + (,xxxxx,t,1) + (,xxxxx,t,1) (2 rows) SET client_min_messages TO WARNING; diff --git a/src/test/regress/expected/escape_extension_name_0.out b/src/test/regress/expected/escape_extension_name_0.out index d27415e4a..07affe965 100644 --- a/src/test/regress/expected/escape_extension_name_0.out +++ b/src/test/regress/expected/escape_extension_name_0.out @@ -21,8 +21,8 @@ WHERE name = 'uuid-ossp' SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) + (,xxxxx,t,0) + (,xxxxx,t,0) (2 rows) SET client_min_messages TO WARNING; @@ -33,12 +33,12 @@ RESET client_min_messages; SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) + (,xxxxx,t,0) + (,xxxxx,t,0) (2 rows) -- show that extension recreation on new nodes works also fine with extension names that require escaping -SELECT 1 from master_remove_node('localhost', :worker_2_port); +SELECT 1 from master_remove_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -60,7 +60,7 @@ WHERE name = 'uuid-ossp' (1 row) -- and add the other node -SELECT 1 from master_add_node('localhost', :worker_2_port); +SELECT 1 from master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -70,8 +70,8 @@ SELECT 1 from master_add_node('localhost', :worker_2_port); SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) + (,xxxxx,t,0) + (,xxxxx,t,0) (2 rows) SET client_min_messages TO WARNING; diff --git a/src/test/regress/expected/failure_1pc_copy_append.out b/src/test/regress/expected/failure_1pc_copy_append.out index bae1675cb..b67e4e124 100644 --- a/src/test/regress/expected/failure_1pc_copy_append.out +++ b/src/test/regress/expected/failure_1pc_copy_append.out @@ -35,10 +35,10 @@ SELECT citus.dump_network_traffic(); dump_network_traffic --------------------------------------------------------------------- (0,coordinator,"[initial message]") - (0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") + (0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") (0,coordinator,"[""Query(query=SELECT worker_apply_shard_ddl_command (100400, 'CREATE TABLE public.copy_test (key integer, value integer)'))""]") (0,worker,"[""RowDescription(fieldcount=1,fields=['F(name=worker_apply_shard_ddl_command,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']") - (0,coordinator,"[""Query(query=SELECT worker_apply_shard_ddl_command (100400, 'ALTER TABLE public.copy_test OWNER TO postgres'))""]") + (0,coordinator,"[""Query(query=SELECT worker_apply_shard_ddl_command (100400, 'ALTER TABLE public.copy_test OWNER TO '))""]") (0,worker,"[""RowDescription(fieldcount=1,fields=['F(name=worker_apply_shard_ddl_command,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']") (0,coordinator,"[""Query(query=BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(0, XX, 'XXXX-XX-XX XX:XX:XX.XXXXXX-XX');)""]") (0,worker,"['CommandComplete(command=BEGIN)', ""RowDescription(fieldcount=1,fields=['F(name=assign_distributed_transaction_id,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=in_transaction_block)']") @@ -74,14 +74,14 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | xxxxx | 100 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; @@ -99,15 +99,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id"). COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -ERROR: failure on connection marked as essential: localhost:xxxxx +CONTEXT: while executing command on :xxxxx +ERROR: failure on connection marked as essential: :xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | xxxxx | 100 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; @@ -127,14 +127,14 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | xxxxx | 100 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; @@ -151,14 +151,14 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()'); (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | xxxxx | 100 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | 9060 | 101 (2 rows) SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()'); @@ -168,7 +168,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()'); (1 row) SELECT count(1) FROM copy_test; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -191,8 +191,8 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | xxxxx | 100 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; @@ -208,17 +208,17 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -ERROR: failure on connection marked as essential: localhost:xxxxx +CONTEXT: while executing command on :xxxxx +ERROR: failure on connection marked as essential: :xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | xxxxx | 100 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; @@ -238,17 +238,17 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -ERROR: failure on connection marked as essential: localhost:xxxxx +CONTEXT: while executing command on :xxxxx +ERROR: failure on connection marked as essential: :xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | xxxxx | 100 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; @@ -266,19 +266,19 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: failed to commit transaction on localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 - copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 - copy_test | 100408 | t | 0 | 3 | 100408 | 1 | 8192 | localhost | 57637 | 112 - copy_test | 100408 | t | 0 | 3 | 100408 | 3 | 8192 | localhost | 9060 | 113 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | xxxxx | 100 + copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | | 9060 | 101 + copy_test | 100408 | t | 0 | 3 | 100408 | 1 | 8192 | | xxxxx | 112 + copy_test | 100408 | t | 0 | 3 | 100408 | 3 | 8192 | | 9060 | 113 (4 rows) SELECT count(1) FROM copy_test; diff --git a/src/test/regress/expected/failure_1pc_copy_hash.out b/src/test/regress/expected/failure_1pc_copy_hash.out index 1a9d36355..ecb8db40f 100644 --- a/src/test/regress/expected/failure_1pc_copy_hash.out +++ b/src/test/regress/expected/failure_1pc_copy_hash.out @@ -36,7 +36,7 @@ SELECT citus.dump_network_traffic(); dump_network_traffic --------------------------------------------------------------------- (0,coordinator,"[initial message]") - (0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") + (0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") (0,coordinator,"[""Query(query=BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(0, XX, 'XXXX-XX-XX XX:XX:XX.XXXXXX-XX');)""]") (0,worker,"['CommandComplete(command=BEGIN)', ""RowDescription(fieldcount=1,fields=['F(name=assign_distributed_transaction_id,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=in_transaction_block)']") (0,coordinator,"['Query(query=COPY public.copy_test_XXXXXX (key, value) FROM STDIN WITH (FORMAT BINARY))']") @@ -46,7 +46,7 @@ SELECT citus.dump_network_traffic(); (0,coordinator,"['Query(query=COMMIT)']") (0,worker,"['CommandComplete(command=COMMIT)', 'ReadyForQuery(state=idle)']") (1,coordinator,"[initial message]") - (1,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") + (1,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") (1,coordinator,"['Query(query=SELECT count(1) AS count FROM public.copy_test_XXXXXX copy_test)']") (1,worker,"[""RowDescription(fieldcount=1,fields=['F(name=count,tableoid=0,colattrnum=0,typoid=20,typlen=8,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']") (14 rows) @@ -61,9 +61,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction").kil COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COPY copy_test, line 1: "0, 0" -ERROR: failure on connection marked as essential: localhost:xxxxx +ERROR: failure on connection marked as essential: :xxxxx CONTEXT: COPY copy_test, line 1: "0, 0" -- ==== kill the connection when we try to start the COPY ==== -- the query should abort @@ -77,7 +77,7 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COPY copy_test, line 1: "0, 0" -- ==== kill the connection when we first start sending data ==== -- the query should abort @@ -88,7 +88,7 @@ SELECT citus.mitmproxy('conn.onCopyData().killall()'); -- raw rows from the clie (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx -- ==== kill the connection when the worker confirms it's received the data ==== -- the query should abort SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").killall()'); @@ -98,7 +98,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").killall()'); (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx -- ==== kill the connection when we try to send COMMIT ==== -- the query should succeed, and the placement should be marked inactive SELECT citus.mitmproxy('conn.allow()'); @@ -129,10 +129,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()'); COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: failed to commit transaction on localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- the shard is marked invalid SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -196,8 +196,8 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( ) ORDER BY nodeport, placementid; shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - 100400 | 1 | 0 | localhost | 9060 | 100 - 100400 | 3 | 0 | localhost | 57637 | 101 + 100400 | 1 | 0 | | 9060 | 100 + 100400 | 3 | 0 | | xxxxx | 101 (2 rows) -- ==== okay, run some tests where there's only one active shard ==== @@ -219,7 +219,7 @@ SELECT citus.mitmproxy('conn.killall()'); (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -250,9 +250,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id"). COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COPY copy_test, line 1: "0, 0" -ERROR: failure on connection marked as essential: localhost:xxxxx +ERROR: failure on connection marked as essential: :xxxxx CONTEXT: COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -280,7 +280,7 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -305,7 +305,7 @@ SELECT citus.mitmproxy('conn.onCopyData().killall()'); (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -330,10 +330,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()'); COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: failed to commit transaction on localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node SELECT citus.mitmproxy('conn.allow()'); @@ -357,8 +357,8 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( ) ORDER BY nodeport, placementid; shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - 100400 | 1 | 0 | localhost | 9060 | 100 - 100400 | 3 | 0 | localhost | 57637 | 101 + 100400 | 1 | 0 | | 9060 | 100 + 100400 | 3 | 0 | | xxxxx | 101 (2 rows) -- the COMMIT makes it through but the connection dies before we get a response @@ -370,10 +370,10 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT").killall()'); COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: failed to commit transaction on localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node SELECT citus.mitmproxy('conn.allow()'); @@ -387,8 +387,8 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( ) ORDER BY nodeport, placementid; shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - 100400 | 1 | 0 | localhost | 9060 | 100 - 100400 | 3 | 0 | localhost | 57637 | 101 + 100400 | 1 | 0 | | 9060 | 100 + 100400 | 3 | 0 | | xxxxx | 101 (2 rows) SELECT * FROM copy_test; diff --git a/src/test/regress/expected/failure_add_disable_node.out b/src/test/regress/expected/failure_add_disable_node.out index 473df0f4a..a2574d0f4 100644 --- a/src/test/regress/expected/failure_add_disable_node.out +++ b/src/test/regress/expected/failure_add_disable_node.out @@ -16,8 +16,8 @@ SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 9060 - localhost | 57637 + | 9060 + | xxxxx (2 rows) -- verify there are no tables that could prevent add/remove node operations @@ -52,8 +52,8 @@ ORDER BY placementid; 200000 | 1 (2 rows) -SELECT master_disable_node('localhost', :worker_2_proxy_port); -NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back. +SELECT master_disable_node('', :worker_2_proxy_port); +NOTICE: Node :xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('', 9060) to activate this node back. master_disable_node --------------------------------------------------------------------- @@ -63,7 +63,7 @@ SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 57637 + | xxxxx (1 row) SELECT shardid, shardstate @@ -82,12 +82,12 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) -SELECT master_activate_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx +SELECT master_activate_node('', :worker_2_proxy_port); +NOTICE: Replicating reference table "user_table" to the node :xxxxx ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -99,7 +99,7 @@ SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 57637 + | xxxxx (1 row) SELECT shardid, shardstate @@ -118,17 +118,17 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA").kill()'); (1 row) -SELECT master_activate_node('localhost', :worker_2_proxy_port); +SELECT master_activate_node('', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 57637 + | xxxxx (1 row) SELECT shardid, shardstate @@ -147,15 +147,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backen (1 row) -SELECT master_activate_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx +SELECT master_activate_node('', :worker_2_proxy_port); +NOTICE: Replicating reference table "user_table" to the node :xxxxx ERROR: canceling statement due to user request -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 57637 + | xxxxx (1 row) SELECT shardid, shardstate @@ -174,11 +174,11 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) -- master_remove_node fails when there are shards on that worker -SELECT master_remove_node('localhost', :worker_2_proxy_port); +SELECT master_remove_node('', :worker_2_proxy_port); ERROR: you cannot remove the primary node of a node group which has shard placements -- drop event table and re-run remove DROP TABLE event_table; -SELECT master_remove_node('localhost', :worker_2_proxy_port); +SELECT master_remove_node('', :worker_2_proxy_port); master_remove_node --------------------------------------------------------------------- @@ -189,7 +189,7 @@ SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 57637 + | xxxxx (1 row) SELECT shardid, shardstate @@ -204,13 +204,13 @@ ORDER BY placementid; -- test master_add_inactive_node -- it does not create any network activity therefore can not -- be injected failure through network -SELECT master_add_inactive_node('localhost', :worker_2_proxy_port); +SELECT master_add_inactive_node('', :worker_2_proxy_port); master_add_inactive_node --------------------------------------------------------------------- 3 (1 row) -SELECT master_remove_node('localhost', :worker_2_proxy_port); +SELECT master_remove_node('', :worker_2_proxy_port); master_remove_node --------------------------------------------------------------------- @@ -233,18 +233,18 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) -SELECT master_add_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx +SELECT master_add_node('', :worker_2_proxy_port); +NOTICE: Replicating reference table "user_table" to the node :xxxxx ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 57637 + | xxxxx (1 row) SELECT shardid, shardstate @@ -262,15 +262,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backen (1 row) -SELECT master_add_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx +SELECT master_add_node('', :worker_2_proxy_port); +NOTICE: Replicating reference table "user_table" to the node :xxxxx ERROR: canceling statement due to user request -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 57637 + | xxxxx (1 row) SELECT shardid, shardstate @@ -289,8 +289,8 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) -SELECT master_add_node('localhost', :worker_2_proxy_port); -NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx +SELECT master_add_node('', :worker_2_proxy_port); +NOTICE: Replicating reference table "user_table" to the node :xxxxx master_add_node --------------------------------------------------------------------- 6 @@ -301,8 +301,8 @@ SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 9060 - localhost | 57637 + | 9060 + | xxxxx (2 rows) SELECT shardid, shardstate @@ -316,7 +316,7 @@ ORDER BY placementid; (2 rows) -- fail master_add_node by failing copy out operation -SELECT master_remove_node('localhost', :worker_1_port); +SELECT master_remove_node('', :worker_1_port); master_remove_node --------------------------------------------------------------------- @@ -328,16 +328,16 @@ SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); (1 row) -SELECT master_add_node('localhost', :worker_1_port); -NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx -ERROR: could not copy table "user_table_200000" from "localhost:xxxxx" -CONTEXT: while executing command on localhost:xxxxx +SELECT master_add_node('', :worker_1_port); +NOTICE: Replicating reference table "user_table" to the node :xxxxx +ERROR: could not copy table "user_table_200000" from ":xxxxx" +CONTEXT: while executing command on :xxxxx -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 9060 + | 9060 (1 row) SELECT citus.mitmproxy('conn.allow()'); @@ -346,8 +346,8 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) -SELECT master_add_node('localhost', :worker_1_port); -NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx +SELECT master_add_node('', :worker_1_port); +NOTICE: Replicating reference table "user_table" to the node :xxxxx master_add_node --------------------------------------------------------------------- 8 @@ -358,8 +358,8 @@ SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port --------------------------------------------------------------------- - localhost | 9060 - localhost | 57637 + | 9060 + | xxxxx (2 rows) SELECT shardid, shardstate @@ -379,7 +379,7 @@ SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASC ORDER BY nodeport; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 9060 | t | DROP SCHEMA - localhost | 57637 | t | DROP SCHEMA + | 9060 | t | DROP SCHEMA + | xxxxx | t | DROP SCHEMA (2 rows) diff --git a/src/test/regress/expected/failure_connection_establishment.out b/src/test/regress/expected/failure_connection_establishment.out index 778fcad8e..0dd9afcaf 100644 --- a/src/test/regress/expected/failure_connection_establishment.out +++ b/src/test/regress/expected/failure_connection_establishment.out @@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.delay(500)'); (1 row) ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); -ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms +ERROR: could not establish any connections to the node :xxxxx after 400 ms SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -169,7 +169,7 @@ SELECT citus.mitmproxy('conn.delay(500)'); (1 row) SELECT count(*) FROM single_replicatated; -ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms +ERROR: could not establish any connections to the node :xxxxx after 400 ms SET citus.force_max_query_parallelization TO OFF; -- one similar test, but this time on modification queries -- to see that connection establishement failures could @@ -224,7 +224,7 @@ RESET client_min_messages; -- verify get_global_active_transactions works when a timeout happens on a connection SELECT get_global_active_transactions(); WARNING: could not establish connection after 400 ms -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx get_global_active_transactions --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/failure_copy_on_hash.out b/src/test/regress/expected/failure_copy_on_hash.out index b867005af..b41d855f9 100644 --- a/src/test/regress/expected/failure_copy_on_hash.out +++ b/src/test/regress/expected/failure_copy_on_hash.out @@ -36,7 +36,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \COPY test_table FROM stdin delimiter ','; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -69,7 +69,7 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()'); (1 row) \COPY test_table FROM stdin delimiter ','; -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -124,7 +124,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").kill()'); (1 row) \COPY test_table FROM stdin delimiter ','; -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -179,7 +179,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); \COPY test_table FROM stdin delimiter ','; ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -239,7 +239,7 @@ BEGIN; \COPY test_table FROM stdin delimiter ','; ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -276,22 +276,22 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \COPY test_table_2 FROM stdin delimiter ','; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: COPY test_table_2, line 1: "1,2" -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: COPY test_table_2, line 2: "3,4" -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: COPY test_table_2, line 3: "6,7" -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -341,7 +341,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COPY test_table_2, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -386,7 +386,7 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()'); (1 row) \COPY test_table_2 FROM stdin delimiter ','; -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_copy_to_reference.out b/src/test/regress/expected/failure_copy_to_reference.out index e34d24f3f..713d2f49a 100644 --- a/src/test/regress/expected/failure_copy_to_reference.out +++ b/src/test/regress/expected/failure_copy_to_reference.out @@ -35,7 +35,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: failure on connection marked as essential: localhost:xxxxx +ERROR: failure on connection marked as essential: :xxxxx CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -63,7 +63,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: failure on connection marked as essential: localhost:xxxxx +ERROR: failure on connection marked as essential: :xxxxx CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -122,7 +122,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -178,7 +178,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").kill()'); (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -236,7 +236,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -265,7 +265,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); \copy test_table FROM STDIN DELIMITER ',' ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -396,7 +396,7 @@ SET LOCAL client_min_messages TO WARNING; \copy test_table FROM STDIN DELIMITER ',' ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -429,7 +429,7 @@ SET LOCAL client_min_messages TO WARNING; \copy test_table FROM STDIN DELIMITER ',' ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index 9402dde3e..aceab8064 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -26,7 +26,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -62,7 +62,7 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -72,8 +72,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,1) + (,9060,t,0) + (,xxxxx,t,1) (2 rows) -- cancel as soon as the coordinator sends CREATE SCHEMA @@ -97,15 +97,15 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,1) + (,9060,t,0) + (,xxxxx,t,1) (2 rows) SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_non_empty_failure$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,"DROP SCHEMA") - (localhost,57637,t,"DROP SCHEMA") + (,9060,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") (2 rows) -- this triggers a schema creation which prevents further transactions around dependency propagation @@ -125,7 +125,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -144,8 +144,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,1) - (localhost,57637,t,1) + (,9060,t,1) + (,xxxxx,t,1) (2 rows) -- cancel as soon as the coordinator sends begin @@ -175,8 +175,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,1) - (localhost,57637,t,1) + (,9060,t,1) + (,xxxxx,t,1) (2 rows) DROP TABLE test_table ; @@ -190,7 +190,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -211,7 +211,7 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -227,7 +227,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()'); SELECT create_distributed_table('test_table', 'id'); NOTICE: Copying data from local table... -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -278,7 +278,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); SELECT create_distributed_table('test_table', 'id'); ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -467,7 +467,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -480,8 +480,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Now, cancel the connection just after the COPY started to @@ -514,7 +514,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_ ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -524,8 +524,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Now, cancel the connection when we issue CREATE TABLE on @@ -555,7 +555,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_comma (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -568,8 +568,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Now run the same tests with 1pc @@ -593,7 +593,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -612,8 +612,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- in the first test, cancel the first connection we sent from the coordinator @@ -640,8 +640,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- this triggers a schema creation which prevents further transactions around dependency propagation @@ -661,7 +661,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -680,8 +680,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,1) - (localhost,57637,t,1) + (,9060,t,1) + (,xxxxx,t,1) (2 rows) -- cancel as soon as the coordinator sends begin @@ -711,8 +711,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,1) - (localhost,57637,t,1) + (,9060,t,1) + (,xxxxx,t,1) (2 rows) DROP TABLE test_table ; @@ -726,7 +726,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -747,7 +747,7 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -762,7 +762,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -940,7 +940,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -980,7 +980,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_ ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -990,8 +990,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/expected/failure_create_index_concurrently.out b/src/test/regress/expected/failure_create_index_concurrently.out index 1e53e7d4f..876c375b6 100644 --- a/src/test/regress/expected/failure_create_index_concurrently.out +++ b/src/test/regress/expected/failure_create_index_concurrently.out @@ -40,7 +40,7 @@ SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE ind WHERE nodeport = :worker_2_proxy_port; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 9060 | t | 0 + | 9060 | t | 0 (1 row) DROP TABLE index_test; @@ -151,7 +151,7 @@ SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE ind WHERE nodeport = :worker_2_proxy_port; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 9060 | t | 4 + | 9060 | t | 4 (1 row) RESET SEARCH_PATH; @@ -162,6 +162,6 @@ SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE ind WHERE nodeport = :worker_2_proxy_port; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 9060 | t | 0 + | 9060 | t | 0 (1 row) diff --git a/src/test/regress/expected/failure_create_reference_table.out b/src/test/regress/expected/failure_create_reference_table.out index a9d15187c..865163410 100644 --- a/src/test/regress/expected/failure_create_reference_table.out +++ b/src/test/regress/expected/failure_create_reference_table.out @@ -25,7 +25,7 @@ SELECT citus.mitmproxy('conn.onQuery().kill()'); (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -43,7 +43,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").kill()'); (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -76,7 +76,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").kill()'); (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -110,7 +110,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").kill()'); SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... -ERROR: failed to COPY to shard xxxxx on localhost:xxxxx +ERROR: failed to COPY to shard xxxxx on :xxxxx SELECT count(*) FROM pg_dist_shard_placement; count --------------------------------------------------------------------- @@ -145,7 +145,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki SELECT create_reference_table('ref_table'); ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT count(*) FROM pg_dist_shard_placement; count --------------------------------------------------------------------- @@ -175,7 +175,7 @@ SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shard shardid | nodeport | shardstate --------------------------------------------------------------------- 10000008 | 9060 | 1 - 10000008 | 57637 | 1 + 10000008 | xxxxx | 1 (2 rows) SET client_min_messages TO NOTICE; @@ -202,8 +202,8 @@ SELECT create_reference_table('ref_table'); WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx -ERROR: failure on connection marked as essential: localhost:xxxxx +CONTEXT: while executing command on :xxxxx +ERROR: failure on connection marked as essential: :xxxxx COMMIT; -- kill on ROLLBACK, should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); @@ -222,7 +222,7 @@ NOTICE: Copying data from local table... ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index fe839b791..e1a552345 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -20,7 +20,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -39,8 +39,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- kill as soon as the coordinator sends CREATE SCHEMA @@ -57,7 +57,7 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -73,8 +73,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'failure_create_table'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,1) + (,9060,t,0) + (,xxxxx,t,1) (2 rows) -- this is merely used to get the schema creation propagated. Without there are failures @@ -89,7 +89,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -108,8 +108,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Now, kill the connection after sending create table command with worker_apply_shard_ddl_command UDF @@ -120,7 +120,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_comman (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -139,8 +139,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Kill the connection while creating a distributed table in sequential mode on sending create command @@ -154,7 +154,7 @@ BEGIN; (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -174,8 +174,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Now, cancel the connection while creating transaction @@ -204,8 +204,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) DROP TABLE test_table; @@ -225,7 +225,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -244,8 +244,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')'); @@ -271,8 +271,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Kill and cancel the connection after worker sends "PREPARE TRANSACTION" ack with colocate_with option @@ -284,7 +284,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -300,8 +300,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); @@ -327,8 +327,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- drop tables and schema and recreate to start from a non-distributed schema again @@ -347,7 +347,7 @@ SELECT citus.mitmproxy('conn.kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -367,8 +367,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- this is merely used to get the schema creation propagated. Without there are failures @@ -384,7 +384,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -404,8 +404,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Now, cancel the connection while creating the transaction on @@ -443,8 +443,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- drop tables and schema and recreate to start from a non-distributed schema again @@ -463,7 +463,7 @@ SELECT citus.mitmproxy('conn.kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -483,8 +483,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Kill connection while sending create table command with 1pc. @@ -496,7 +496,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -516,8 +516,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- this is merely used to get the schema creation propagated. Without there are failures @@ -533,7 +533,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -553,8 +553,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Now, cancel the connection while creating transactions on @@ -591,8 +591,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) DROP TABLE test_table; @@ -615,7 +615,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT master_create_worker_shards('test_table_2', 4, 2); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -640,8 +640,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Kill the connection after worker sends "PREPARE TRANSACTION" ack @@ -653,7 +653,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").k SELECT master_create_worker_shards('test_table_2', 4, 2); ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -669,8 +669,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) -- Cancel the connection after sending prepare transaction in master_create_worker_shards @@ -704,8 +704,8 @@ SELECT count(*) FROM pg_dist_shard; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,9060,t,0) - (localhost,57637,t,0) + (,9060,t,0) + (,xxxxx,t,0) (2 rows) DROP SCHEMA failure_create_table CASCADE; diff --git a/src/test/regress/expected/failure_cte_subquery.out b/src/test/regress/expected/failure_cte_subquery.out index bad22eeed..9aa359cd3 100644 --- a/src/test/regress/expected/failure_cte_subquery.out +++ b/src/test/regress/expected/failure_cte_subquery.out @@ -57,7 +57,7 @@ FROM ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- kill at the second copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM cte_failure.events_table_16000002").kill()'); mitmproxy @@ -88,7 +88,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -122,7 +122,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -262,7 +262,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -295,7 +295,7 @@ INSERT INTO users_table SELECT * FROM cte_delete; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -378,7 +378,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_ddl.out b/src/test/regress/expected/failure_ddl.out index 8b31e291c..9de51adcb 100644 --- a/src/test/regress/expected/failure_ddl.out +++ b/src/test/regress/expected/failure_ddl.out @@ -37,7 +37,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -71,7 +71,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -104,7 +104,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -159,10 +159,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,value}") - (localhost,9060,100802,t,"{key,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,value}") + (,9060,100802,t,"{key,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- manually drop & re-create the table for the next tests @@ -203,10 +203,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,new_column,value}") - (localhost,9060,100802,t,"{key,new_column,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,new_column,value}") + (,9060,100802,t,"{key,new_column,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- the following tests rely the column not exists, so drop manually @@ -223,15 +223,15 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); ALTER TABLE test_table ADD COLUMN new_column INT; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: failed to commit transaction on localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: failed to commit transaction on localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: could not commit transaction for shard xxxxx on any active node WARNING: could not commit transaction for shard xxxxx on any active node SELECT citus.mitmproxy('conn.allow()'); @@ -250,10 +250,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,new_column,value}") - (localhost,9060,100802,t,"{key,new_column,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,new_column,value}") + (,9060,100802,t,"{key,new_column,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- now cancel just after the worker sends response to @@ -289,9 +289,9 @@ SET LOCAL client_min_messages TO WARNING; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- now cancel just after the worker sends response to -- but Postgres doesn't accepts interrupts during COMMIT and ROLLBACK -- so should not cancel at all, so not an effective test but adding in @@ -332,10 +332,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,new_column,value}") - (localhost,9060,100802,t,"{key,new_column,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,new_column,value}") + (,9060,100802,t,"{key,new_column,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- now, lets test with 2PC @@ -349,7 +349,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -383,7 +383,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -416,7 +416,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -450,7 +450,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki ALTER TABLE test_table DROP COLUMN new_column; ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -466,10 +466,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,new_column,value}") - (localhost,9060,100802,t,"{key,new_column,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,new_column,value}") + (,9060,100802,t,"{key,new_column,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- we should be able to recover the transaction and @@ -483,10 +483,10 @@ SELECT recover_prepared_transactions(); SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,new_column,value}") - (localhost,9060,100802,t,"{key,new_column,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,new_column,value}") + (,9060,100802,t,"{key,new_column,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- cancelling on PREPARE should be fine, everything should be rollbacked @@ -513,10 +513,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,new_column,value}") - (localhost,9060,100802,t,"{key,new_column,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,new_column,value}") + (,9060,100802,t,"{key,new_column,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- we should be able to recover the transaction and @@ -530,10 +530,10 @@ SELECT recover_prepared_transactions(); SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,new_column,value}") - (localhost,9060,100802,t,"{key,new_column,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,new_column,value}") + (,9060,100802,t,"{key,new_column,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds @@ -560,10 +560,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,value}") - (localhost,9060,100802,t,"{key,value}") - (localhost,57637,100801,t,"{key,value}") - (localhost,57637,100803,t,"{key,value}") + (,9060,100800,t,"{key,value}") + (,9060,100802,t,"{key,value}") + (,xxxxx,100801,t,"{key,value}") + (,xxxxx,100803,t,"{key,value}") (4 rows) -- we shouldn't have any prepared transactions in the workers @@ -576,10 +576,10 @@ SELECT recover_prepared_transactions(); SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,value}") - (localhost,9060,100802,t,"{key,value}") - (localhost,57637,100801,t,"{key,value}") - (localhost,57637,100803,t,"{key,value}") + (,9060,100800,t,"{key,value}") + (,9060,100802,t,"{key,value}") + (,xxxxx,100801,t,"{key,value}") + (,xxxxx,100803,t,"{key,value}") (4 rows) -- kill as soon as the coordinator sends COMMIT @@ -607,10 +607,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,value}") - (localhost,9060,100802,t,"{key,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,value}") + (,9060,100802,t,"{key,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- we should be able to recover the transaction and @@ -624,10 +624,10 @@ SELECT recover_prepared_transactions(); SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,new_column,value}") - (localhost,9060,100802,t,"{key,new_column,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,new_column,value}") + (,9060,100802,t,"{key,new_column,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- finally, test failing on ROLLBACK with 2PC @@ -658,10 +658,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,new_column,value}") - (localhost,9060,100802,t,"{key,new_column,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,new_column,value}") + (,9060,100802,t,"{key,new_column,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- but now kill just after the worker sends response to @@ -692,10 +692,10 @@ SELECT recover_prepared_transactions(); SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100800,t,"{key,new_column,value}") - (localhost,9060,100802,t,"{key,new_column,value}") - (localhost,57637,100801,t,"{key,new_column,value}") - (localhost,57637,100803,t,"{key,new_column,value}") + (,9060,100800,t,"{key,new_column,value}") + (,9060,100802,t,"{key,new_column,value}") + (,xxxxx,100801,t,"{key,new_column,value}") + (,xxxxx,100803,t,"{key,new_column,value}") (4 rows) -- another set of tests with 2PC and replication factor = 2 @@ -720,7 +720,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -754,7 +754,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -787,7 +787,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -821,7 +821,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -842,14 +842,14 @@ SELECT recover_prepared_transactions(); SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100804,t,"{key,value}") - (localhost,9060,100805,t,"{key,value}") - (localhost,9060,100806,t,"{key,value}") - (localhost,9060,100807,t,"{key,value}") - (localhost,57637,100804,t,"{key,value}") - (localhost,57637,100805,t,"{key,value}") - (localhost,57637,100806,t,"{key,value}") - (localhost,57637,100807,t,"{key,value}") + (,9060,100804,t,"{key,value}") + (,9060,100805,t,"{key,value}") + (,9060,100806,t,"{key,value}") + (,9060,100807,t,"{key,value}") + (,xxxxx,100804,t,"{key,value}") + (,xxxxx,100805,t,"{key,value}") + (,xxxxx,100806,t,"{key,value}") + (,xxxxx,100807,t,"{key,value}") (8 rows) -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds @@ -876,14 +876,14 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100804,t,"{key,new_column,value}") - (localhost,9060,100805,t,"{key,new_column,value}") - (localhost,9060,100806,t,"{key,new_column,value}") - (localhost,9060,100807,t,"{key,new_column,value}") - (localhost,57637,100804,t,"{key,new_column,value}") - (localhost,57637,100805,t,"{key,new_column,value}") - (localhost,57637,100806,t,"{key,new_column,value}") - (localhost,57637,100807,t,"{key,new_column,value}") + (,9060,100804,t,"{key,new_column,value}") + (,9060,100805,t,"{key,new_column,value}") + (,9060,100806,t,"{key,new_column,value}") + (,9060,100807,t,"{key,new_column,value}") + (,xxxxx,100804,t,"{key,new_column,value}") + (,xxxxx,100805,t,"{key,new_column,value}") + (,xxxxx,100806,t,"{key,new_column,value}") + (,xxxxx,100807,t,"{key,new_column,value}") (8 rows) -- we shouldn't have any prepared transactions in the workers @@ -896,14 +896,14 @@ SELECT recover_prepared_transactions(); SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100804,t,"{key,new_column,value}") - (localhost,9060,100805,t,"{key,new_column,value}") - (localhost,9060,100806,t,"{key,new_column,value}") - (localhost,9060,100807,t,"{key,new_column,value}") - (localhost,57637,100804,t,"{key,new_column,value}") - (localhost,57637,100805,t,"{key,new_column,value}") - (localhost,57637,100806,t,"{key,new_column,value}") - (localhost,57637,100807,t,"{key,new_column,value}") + (,9060,100804,t,"{key,new_column,value}") + (,9060,100805,t,"{key,new_column,value}") + (,9060,100806,t,"{key,new_column,value}") + (,9060,100807,t,"{key,new_column,value}") + (,xxxxx,100804,t,"{key,new_column,value}") + (,xxxxx,100805,t,"{key,new_column,value}") + (,xxxxx,100806,t,"{key,new_column,value}") + (,xxxxx,100807,t,"{key,new_column,value}") (8 rows) -- kill as soon as the coordinator sends COMMIT @@ -931,14 +931,14 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100804,t,"{key,new_column,value}") - (localhost,9060,100805,t,"{key,new_column,value}") - (localhost,9060,100806,t,"{key,new_column,value}") - (localhost,9060,100807,t,"{key,new_column,value}") - (localhost,57637,100804,t,"{key,value}") - (localhost,57637,100805,t,"{key,value}") - (localhost,57637,100806,t,"{key,value}") - (localhost,57637,100807,t,"{key,value}") + (,9060,100804,t,"{key,new_column,value}") + (,9060,100805,t,"{key,new_column,value}") + (,9060,100806,t,"{key,new_column,value}") + (,9060,100807,t,"{key,new_column,value}") + (,xxxxx,100804,t,"{key,value}") + (,xxxxx,100805,t,"{key,value}") + (,xxxxx,100806,t,"{key,value}") + (,xxxxx,100807,t,"{key,value}") (8 rows) -- we should be able to recover the transaction and @@ -952,14 +952,14 @@ SELECT recover_prepared_transactions(); SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100804,t,"{key,value}") - (localhost,9060,100805,t,"{key,value}") - (localhost,9060,100806,t,"{key,value}") - (localhost,9060,100807,t,"{key,value}") - (localhost,57637,100804,t,"{key,value}") - (localhost,57637,100805,t,"{key,value}") - (localhost,57637,100806,t,"{key,value}") - (localhost,57637,100807,t,"{key,value}") + (,9060,100804,t,"{key,value}") + (,9060,100805,t,"{key,value}") + (,9060,100806,t,"{key,value}") + (,9060,100807,t,"{key,value}") + (,xxxxx,100804,t,"{key,value}") + (,xxxxx,100805,t,"{key,value}") + (,xxxxx,100806,t,"{key,value}") + (,xxxxx,100807,t,"{key,value}") (8 rows) -- finally, test failing on ROLLBACK with 2PC @@ -990,14 +990,14 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100804,t,"{key,value}") - (localhost,9060,100805,t,"{key,value}") - (localhost,9060,100806,t,"{key,value}") - (localhost,9060,100807,t,"{key,value}") - (localhost,57637,100804,t,"{key,value}") - (localhost,57637,100805,t,"{key,value}") - (localhost,57637,100806,t,"{key,value}") - (localhost,57637,100807,t,"{key,value}") + (,9060,100804,t,"{key,value}") + (,9060,100805,t,"{key,value}") + (,9060,100806,t,"{key,value}") + (,9060,100807,t,"{key,value}") + (,xxxxx,100804,t,"{key,value}") + (,xxxxx,100805,t,"{key,value}") + (,xxxxx,100806,t,"{key,value}") + (,xxxxx,100807,t,"{key,value}") (8 rows) -- but now kill just after the worker sends response to @@ -1028,14 +1028,14 @@ SELECT recover_prepared_transactions(); SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements --------------------------------------------------------------------- - (localhost,9060,100804,t,"{key,value}") - (localhost,9060,100805,t,"{key,value}") - (localhost,9060,100806,t,"{key,value}") - (localhost,9060,100807,t,"{key,value}") - (localhost,57637,100804,t,"{key,value}") - (localhost,57637,100805,t,"{key,value}") - (localhost,57637,100806,t,"{key,value}") - (localhost,57637,100807,t,"{key,value}") + (,9060,100804,t,"{key,value}") + (,9060,100805,t,"{key,value}") + (,9060,100806,t,"{key,value}") + (,9060,100807,t,"{key,value}") + (,xxxxx,100804,t,"{key,value}") + (,xxxxx,100805,t,"{key,value}") + (,xxxxx,100806,t,"{key,value}") + (,xxxxx,100807,t,"{key,value}") (8 rows) -- now do some tests with sequential mode @@ -1048,7 +1048,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -1081,7 +1081,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -1093,7 +1093,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").aft (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_distributed_results.out b/src/test/regress/expected/failure_distributed_results.out index 5681a5173..cda344cfb 100644 --- a/src/test/regress/expected/failure_distributed_results.out +++ b/src/test/regress/expected/failure_distributed_results.out @@ -50,16 +50,16 @@ SELECT * FROM distributed_result_info ORDER BY resultId; resultid | nodeport | rowcount | targetshardid | targetshardindex --------------------------------------------------------------------- test_from_100800_to_0 | 9060 | 22 | 100805 | 0 - test_from_100801_to_0 | 57637 | 2 | 100805 | 0 - test_from_100801_to_1 | 57637 | 15 | 100806 | 1 + test_from_100801_to_0 | xxxxx | 2 | 100805 | 0 + test_from_100801_to_1 | xxxxx | 15 | 100806 | 1 test_from_100802_to_1 | 9060 | 10 | 100806 | 1 test_from_100802_to_2 | 9060 | 5 | 100807 | 2 - test_from_100803_to_2 | 57637 | 18 | 100807 | 2 - test_from_100803_to_3 | 57637 | 4 | 100808 | 3 + test_from_100803_to_2 | xxxxx | 18 | 100807 | 2 + test_from_100803_to_3 | xxxxx | 4 | 100808 | 3 test_from_100804_to_3 | 9060 | 24 | 100808 | 3 (8 rows) -SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], 'localhost', :worker_2_port) > 0 AS fetched; +SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], '', :worker_2_port) > 0 AS fetched; fetched --------------------------------------------------------------------- t @@ -73,7 +73,7 @@ SELECT count(*), sum(x) FROM (1 row) ROLLBACk; --- with failure, results from 100802 should be retried and succeed on 57637 +-- with failure, results from 100802 should be retried and succeed on xxxxx SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result.*test_from_100802").kill()'); mitmproxy --------------------------------------------------------------------- @@ -85,7 +85,7 @@ CREATE TABLE distributed_result_info AS SELECT resultId, nodeport, rowcount, targetShardId, targetShardIndex FROM partition_task_list_results('test', $$ SELECT * FROM source_table $$, 'target_table') NATURAL JOIN pg_dist_node; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -93,23 +93,23 @@ SELECT * FROM distributed_result_info ORDER BY resultId; resultid | nodeport | rowcount | targetshardid | targetshardindex --------------------------------------------------------------------- test_from_100800_to_0 | 9060 | 22 | 100805 | 0 - test_from_100801_to_0 | 57637 | 2 | 100805 | 0 - test_from_100801_to_1 | 57637 | 15 | 100806 | 1 - test_from_100802_to_1 | 57637 | 10 | 100806 | 1 - test_from_100802_to_2 | 57637 | 5 | 100807 | 2 - test_from_100803_to_2 | 57637 | 18 | 100807 | 2 - test_from_100803_to_3 | 57637 | 4 | 100808 | 3 + test_from_100801_to_0 | xxxxx | 2 | 100805 | 0 + test_from_100801_to_1 | xxxxx | 15 | 100806 | 1 + test_from_100802_to_1 | xxxxx | 10 | 100806 | 1 + test_from_100802_to_2 | xxxxx | 5 | 100807 | 2 + test_from_100803_to_2 | xxxxx | 18 | 100807 | 2 + test_from_100803_to_3 | xxxxx | 4 | 100808 | 3 test_from_100804_to_3 | 9060 | 24 | 100808 | 3 (8 rows) -- fetch from worker 2 should fail SAVEPOINT s1; -SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], 'localhost', :worker_2_port) > 0 AS fetched; +SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], '', :worker_2_port) > 0 AS fetched; ERROR: could not open file "base/pgsql_job_cache/xx_x_xxx/test_from_100802_to_1.data": No such file or directory -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx ROLLBACK TO SAVEPOINT s1; -- fetch from worker 1 should succeed -SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], 'localhost', :worker_1_port) > 0 AS fetched; +SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], '', :worker_1_port) > 0 AS fetched; fetched --------------------------------------------------------------------- t diff --git a/src/test/regress/expected/failure_insert_select_pushdown.out b/src/test/regress/expected/failure_insert_select_pushdown.out index b92b49b36..27cb860df 100644 --- a/src/test/regress/expected/failure_insert_select_pushdown.out +++ b/src/test/regress/expected/failure_insert_select_pushdown.out @@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown" (1 row) INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -98,7 +98,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown" (1 row) INSERT INTO events_table SELECT * FROM events_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_insert_select_repartition.out b/src/test/regress/expected/failure_insert_select_repartition.out index a0d91e0ef..cec472f7f 100644 --- a/src/test/regress/expected/failure_insert_select_repartition.out +++ b/src/test/regress/expected/failure_insert_select_repartition.out @@ -54,7 +54,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result").kill (1 row) INSERT INTO target_table SELECT * FROM source_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -70,11 +70,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result").kill (1 row) INSERT INTO target_table SELECT * FROM replicated_source_table; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -106,7 +106,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="fetch_intermediate_results").kill()' (1 row) INSERT INTO target_table SELECT * FROM source_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -122,7 +122,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="fetch_intermediate_results").kill()' (1 row) INSERT INTO target_table SELECT * FROM replicated_source_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -143,7 +143,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO target_table SELECT * FROM source_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -159,7 +159,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO target_table SELECT * FROM replicated_source_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -179,7 +179,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO replicated_target_table SELECT * FROM source_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_insert_select_via_coordinator.out b/src/test/regress/expected/failure_insert_select_via_coordinator.out index b46a73c27..7193624c4 100644 --- a/src/test/regress/expected/failure_insert_select_via_coordinator.out +++ b/src/test/regress/expected/failure_insert_select_via_coordinator.out @@ -56,7 +56,7 @@ INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_tab ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); mitmproxy @@ -68,7 +68,7 @@ INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_tab ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy @@ -112,7 +112,7 @@ INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); mitmproxy @@ -124,7 +124,7 @@ INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy @@ -170,7 +170,7 @@ INSERT INTO events_reference_distributed SELECT * FROM events_reference; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); mitmproxy @@ -182,7 +182,7 @@ INSERT INTO events_reference_distributed SELECT * FROM events_reference; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_multi_dml.out b/src/test/regress/expected/failure_multi_dml.out index 89f48536a..c4feb1150 100644 --- a/src/test/regress/expected/failure_multi_dml.out +++ b/src/test/regress/expected/failure_multi_dml.out @@ -33,7 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").kill()'); BEGIN; DELETE FROM dml_test WHERE id = 1; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -96,7 +96,7 @@ BEGIN; DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -154,7 +154,7 @@ DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); UPDATE dml_test SET name = 'alpha' WHERE id = 1; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -208,7 +208,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); -- error message that is caused during commit. -- we'll test for the txn side-effects to ensure it didn't run SELECT master_run_on_worker( - ARRAY['localhost']::text[], + ARRAY['']::text[], ARRAY[:master_port]::int[], ARRAY[' BEGIN; @@ -223,7 +223,7 @@ COMMIT; ); master_run_on_worker --------------------------------------------------------------------- - (localhost,57636,t,BEGIN) + (,xxxxx,t,BEGIN) (1 row) SELECT citus.mitmproxy('conn.allow()'); @@ -392,10 +392,10 @@ UPDATE dml_test SET name = 'alpha' WHERE id = 1; UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: failed to commit transaction on localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx --- should see all changes, but they only went to one placement (other is unhealthy) SELECT * FROM dml_test ORDER BY id ASC; id | name @@ -444,7 +444,7 @@ UPDATE dml_test SET name = 'alpha' WHERE id = 1; UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx --- shouldn't see any changes after failed COMMIT SELECT * FROM dml_test ORDER BY id ASC; id | name diff --git a/src/test/regress/expected/failure_multi_row_insert.out b/src/test/regress/expected/failure_multi_row_insert.out index a5ab9b357..3e83878c3 100644 --- a/src/test/regress/expected/failure_multi_row_insert.out +++ b/src/test/regress/expected/failure_multi_row_insert.out @@ -43,7 +43,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -58,7 +58,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,7), (5,8); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -73,7 +73,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,11), (6,12); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -93,7 +93,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()'); (1 row) INSERT INTO distributed_table VALUES (1,15), (6,16); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -113,7 +113,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (2,19),(1,20); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_multi_shard_update_delete.out b/src/test/regress/expected/failure_multi_shard_update_delete.out index cebd7f8c6..098d85d19 100644 --- a/src/test/regress/expected/failure_multi_shard_update_delete.out +++ b/src/test/regress/expected/failure_multi_shard_update_delete.out @@ -63,7 +63,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -82,7 +82,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005"). (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -145,7 +145,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -164,7 +164,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill( (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -221,7 +221,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -240,7 +240,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005"). (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -303,7 +303,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -322,7 +322,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill( (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -396,7 +396,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); (1 row) DELETE FROM r1 WHERE a = 2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -414,7 +414,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -465,7 +465,7 @@ RETURNING *; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -496,7 +496,7 @@ UPDATE t3 SET c = q.c FROM ( SELECT b, max(c) as c FROM t2 GROUP BY b) q WHERE t3.b = q.b RETURNING *; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -552,7 +552,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t3_201013").kill( (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -587,7 +587,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -606,7 +606,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO (1 row) UPDATE t3 SET b = 1 WHERE b = 2 RETURNING *; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -626,7 +626,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -661,7 +661,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index a4aeb7704..66ccb644a 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -36,7 +36,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT start_metadata_sync_to_node('', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()'); mitmproxy @@ -44,11 +44,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT start_metadata_sync_to_node('', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- Failure to drop all tables in pg_dist_partition SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')'); mitmproxy @@ -56,7 +56,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_tabl (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT start_metadata_sync_to_node('', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").kill()'); mitmproxy @@ -64,11 +64,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_tabl (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT start_metadata_sync_to_node('', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- Failure to truncate pg_dist_node in the worker SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')'); mitmproxy @@ -76,7 +76,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").can (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT start_metadata_sync_to_node('', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kill()'); mitmproxy @@ -84,11 +84,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kil (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT start_metadata_sync_to_node('', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- Failure to populate pg_dist_node in the worker SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' || :pid || ')'); mitmproxy @@ -96,7 +96,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT start_metadata_sync_to_node('', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").kill()'); mitmproxy @@ -104,11 +104,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").kill()') (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT start_metadata_sync_to_node('', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- Verify that coordinator knows worker does not have valid metadata SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; hasmetadata @@ -123,7 +123,7 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT start_metadata_sync_to_node('', :worker_2_proxy_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -147,7 +147,7 @@ SELECT create_distributed_table('t2', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_shard").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_ref_tables.out b/src/test/regress/expected/failure_ref_tables.out index a397f8dfd..8eb752cf8 100644 --- a/src/test/regress/expected/failure_ref_tables.out +++ b/src/test/regress/expected/failure_ref_tables.out @@ -33,7 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO ref_table VALUES (5, 6); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -51,7 +51,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); (1 row) UPDATE ref_table SET key=7 RETURNING value; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -71,7 +71,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); BEGIN; DELETE FROM ref_table WHERE key=5; UPDATE ref_table SET key=value; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_replicated_partitions.out b/src/test/regress/expected/failure_replicated_partitions.out index 3d6d748a8..5fff85027 100644 --- a/src/test/regress/expected/failure_replicated_partitions.out +++ b/src/test/regress/expected/failure_replicated_partitions.out @@ -29,7 +29,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO partitioned_table VALUES (0, 0); -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_savepoints.out b/src/test/regress/expected/failure_savepoints.out index a2a113e9a..fbe13aead 100644 --- a/src/test/regress/expected/failure_savepoints.out +++ b/src/test/regress/expected/failure_savepoints.out @@ -39,15 +39,15 @@ BEGIN; INSERT INTO artists VALUES (5, 'Asher Lev'); SAVEPOINT s1; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: connection error: localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: connection error: :xxxxx DETAIL: connection not open WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx DELETE FROM artists WHERE id=4; ERROR: current transaction is aborted, commands ignored until end of transaction block RELEASE SAVEPOINT s1; @@ -73,17 +73,17 @@ DELETE FROM artists WHERE id=4; RELEASE SAVEPOINT s1; WARNING: AbortSubTransaction while in COMMIT state WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: connection error: localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: connection error: :xxxxx DETAIL: connection not open WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: savepoint "savepoint_2" does not exist -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx ROLLBACK; SELECT * FROM artists WHERE id IN (4, 5); id | name @@ -104,9 +104,9 @@ SAVEPOINT s1; DELETE FROM artists WHERE id=4; ROLLBACK TO SAVEPOINT s1; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COMMIT; ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id IN (4, 5); @@ -131,15 +131,15 @@ INSERT INTO artists VALUES (5, 'Jacob Kahn'); RELEASE SAVEPOINT s2; WARNING: AbortSubTransaction while in COMMIT state WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: connection error: localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: connection error: :xxxxx DETAIL: connection not open WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COMMIT; SELECT * FROM artists WHERE id IN (4, 5); id | name @@ -162,9 +162,9 @@ SAVEPOINT s2; DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s2; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COMMIT; ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id IN (4, 5); @@ -213,7 +213,7 @@ ROLLBACK TO SAVEPOINT s1; WARNING: connection not open WARNING: connection not open WARNING: connection not open -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx WARNING: connection not open WARNING: connection not open COMMIT; @@ -248,7 +248,7 @@ BEGIN; INSERT INTO researchers VALUES (7, 4, 'Jan Plaza'); SAVEPOINT s1; WARNING: connection not open -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx WARNING: connection not open WARNING: connection not open ERROR: connection not open @@ -290,7 +290,7 @@ WARNING: connection not open WARNING: connection not open RELEASE SAVEPOINT s1; COMMIT; -ERROR: failure on connection marked as essential: localhost:xxxxx +ERROR: failure on connection marked as essential: :xxxxx -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; id | lab_id | name @@ -321,7 +321,7 @@ ROLLBACK TO s1; RELEASE SAVEPOINT s1; WARNING: AbortSubTransaction while in COMMIT state WARNING: connection not open -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx WARNING: connection not open WARNING: connection not open WARNING: savepoint "savepoint_3" does not exist diff --git a/src/test/regress/expected/failure_setup.out b/src/test/regress/expected/failure_setup.out index 4cbb4b0a4..c7cc82480 100644 --- a/src/test/regress/expected/failure_setup.out +++ b/src/test/regress/expected/failure_setup.out @@ -5,13 +5,13 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) -- add the workers -SELECT master_add_node('localhost', :worker_1_port); +SELECT master_add_node('', :worker_1_port); master_add_node --------------------------------------------------------------------- 1 (1 row) -SELECT master_add_node('localhost', :worker_2_proxy_port); -- an mitmproxy which forwards to the second worker +SELECT master_add_node('', :worker_2_proxy_port); -- an mitmproxy which forwards to the second worker master_add_node --------------------------------------------------------------------- 2 diff --git a/src/test/regress/expected/failure_single_mod.out b/src/test/regress/expected/failure_single_mod.out index 65c5ffbef..28c3af897 100644 --- a/src/test/regress/expected/failure_single_mod.out +++ b/src/test/regress/expected/failure_single_mod.out @@ -27,7 +27,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO mod_test VALUES (2, 6); -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -63,7 +63,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); (1 row) UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -102,7 +102,7 @@ INSERT INTO mod_test VALUES (2, 6); INSERT INTO mod_test VALUES (2, 7); DELETE FROM mod_test WHERE key=2 AND value = '7'; UPDATE mod_test SET value='ok' WHERE key=2; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_single_select.out b/src/test/regress/expected/failure_single_select.out index d5087779f..51cde7834 100644 --- a/src/test/regress/expected/failure_single_select.out +++ b/src/test/regress/expected/failure_single_select.out @@ -28,7 +28,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -38,7 +38,7 @@ DETAIL: server closed the connection unexpectedly (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -57,7 +57,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -69,7 +69,7 @@ DETAIL: server closed the connection unexpectedly INSERT INTO select_test VALUES (3, 'even more data'); SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -160,7 +160,7 @@ SELECT * FROM select_test WHERE key = 3; INSERT INTO select_test VALUES (3, 'even more data'); SELECT * FROM select_test WHERE key = 3; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -187,7 +187,7 @@ SELECT recover_prepared_transactions(); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- bug from https://github.com/citusdata/citus/issues/1926 SET citus.max_cached_conns_per_worker TO 0; -- purge cache DROP TABLE select_test; @@ -215,7 +215,7 @@ SELECT * FROM select_test WHERE key = 1; (1 row) SELECT * FROM select_test WHERE key = 1; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_truncate.out b/src/test/regress/expected/failure_truncate.out index 28cbe1b46..fe24fbb2f 100644 --- a/src/test/regress/expected/failure_truncate.out +++ b/src/test/regress/expected/failure_truncate.out @@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -102,7 +102,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -159,7 +159,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -284,15 +284,15 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); TRUNCATE test_table; WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: failed to commit transaction on localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: failed to commit transaction on localhost:xxxxx +CONTEXT: while executing command on :xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx WARNING: could not commit transaction for shard xxxxx on any active node WARNING: could not commit transaction for shard xxxxx on any active node SELECT citus.mitmproxy('conn.allow()'); @@ -365,7 +365,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); TRUNCATE reference_table CASCADE; ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -433,7 +433,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).kill()'); (1 row) TRUNCATE reference_table CASCADE; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -506,7 +506,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki TRUNCATE reference_table CASCADE; ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -577,7 +577,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -635,7 +635,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -692,7 +692,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.tes (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -750,7 +750,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").k TRUNCATE test_table; ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -956,7 +956,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -1014,7 +1014,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -1071,7 +1071,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test (1 row) TRUNCATE test_table; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -1129,7 +1129,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki TRUNCATE test_table; ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_vacuum.out b/src/test/regress/expected/failure_vacuum.out index 66801ec22..d997839f5 100644 --- a/src/test/regress/expected/failure_vacuum.out +++ b/src/test/regress/expected/failure_vacuum.out @@ -31,7 +31,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); (1 row) VACUUM vacuum_test; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -42,7 +42,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); (1 row) ANALYZE vacuum_test; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -111,7 +111,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); (1 row) VACUUM vacuum_test, other_vacuum_test; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/failure_vacuum_1.out b/src/test/regress/expected/failure_vacuum_1.out index c13096f6d..99d4d96ea 100644 --- a/src/test/regress/expected/failure_vacuum_1.out +++ b/src/test/regress/expected/failure_vacuum_1.out @@ -31,7 +31,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); (1 row) VACUUM vacuum_test; -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. @@ -42,7 +42,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); (1 row) ANALYZE vacuum_test; -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/test/regress/expected/foreign_key_restriction_enforcement.out b/src/test/regress/expected/foreign_key_restriction_enforcement.out index 0fcade1c5..c55014e18 100644 --- a/src/test/regress/expected/foreign_key_restriction_enforcement.out +++ b/src/test/regress/expected/foreign_key_restriction_enforcement.out @@ -473,7 +473,7 @@ DETAIL: Reference relation "transitive_reference_table" is modified, which migh UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 1; ERROR: insert or update on table "on_update_fkey_table_xxxxxxx" violates foreign key constraint "fkey_xxxxxxx" DETAIL: Key (value_1)=(101) is not present in table "reference_table_2380001". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 2; ERROR: current transaction is aborted, commands ignored until end of transaction block UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 3; diff --git a/src/test/regress/expected/foreign_key_to_reference_table.out b/src/test/regress/expected/foreign_key_to_reference_table.out index 9f30cd236..d2ea41695 100644 --- a/src/test/regress/expected/foreign_key_to_reference_table.out +++ b/src/test/regress/expected/foreign_key_to_reference_table.out @@ -454,7 +454,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFER INSERT INTO referencing_table VALUES(1, 1); ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "fkey_ref_xxxxxxx" DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table SELECT x, x from generate_series(1,1000) as f(x); INSERT INTO referencing_table SELECT x, x from generate_series(1,500) as f(x); @@ -463,7 +463,7 @@ INSERT INTO referencing_table SELECT x, x from generate_series(1,500) as f(x); DELETE FROM referenced_table WHERE id > 3; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "fkey_ref_xxxxxxx" on table "referencing_table_xxxxxxx" DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referenced_table WHERE id = 501; -- test cascading truncate @@ -1570,7 +1570,7 @@ INSERT INTO test_table_2 VALUES (4,2147483648); -- should fail since there is a bigint out of integer range > (2^32 - 1) ALTER TABLE test_table_2 ALTER COLUMN value_1 SET DATA TYPE int; ERROR: integer out of range -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count --------------------------------------------------------------------- @@ -1816,7 +1816,7 @@ ALTER TABLE referencing_table_4 ADD CONSTRAINT fkey_to_ref FOREIGN KEY (value_1) INSERT INTO referencing_table VALUES (0, 5); ERROR: insert or update on table "referencing_table_4_7000540" violates foreign key constraint "fkey_xxxxxxx" DETAIL: Key (id)=(X) is not present in table "referencing_table_0_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- should succeed on partitioning_test_0 INSERT INTO referencing_table VALUES (0, 1); SELECT * FROM referencing_table; @@ -1829,7 +1829,7 @@ SELECT * FROM referencing_table; INSERT INTO referencing_table VALUES (0, 5); ERROR: insert or update on table "referencing_table_4_7000540" violates foreign key constraint "fkey_to_ref_7000540" DETAIL: Key (value_1)=(5) is not present in table "referenced_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx INSERT INTO referenced_table VALUES(5,5); -- should succeed since both of the foreign constraints are positive INSERT INTO referencing_table VALUES (0, 5); diff --git a/src/test/regress/expected/grant_on_schema_propagation.out b/src/test/regress/expected/grant_on_schema_propagation.out index 9c7affd46..ed89fa470 100644 --- a/src/test/regress/expected/grant_on_schema_propagation.out +++ b/src/test/regress/expected/grant_on_schema_propagation.out @@ -62,14 +62,14 @@ SELECT create_distributed_table('another_dist_schema.dist_table', 'id'); SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema'; nspname | nspacl --------------------------------------------------------------------- - dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres} + dist_schema | {=UC/,role_1=U*C*/,role_2=U/,role_3=U*C/role_1,=UC/role_1,=U/} (1 row) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema'; nspname | nspacl --------------------------------------------------------------------- - dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres} + dist_schema | {=UC/,role_1=U*C*/,role_2=U/,role_3=U*C/role_1,=UC/role_1,=U/} (1 row) \c - - - :master_port @@ -78,17 +78,17 @@ GRANT ALL ON SCHEMA dist_schema, another_dist_schema, non_dist_schema TO role_1, SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres} - dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres,role_3=U*C*/postgres} - non_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres} + another_dist_schema | {=UC/,role_1=U*C*/,role_2=U*C*/,role_3=U*C*/} + dist_schema | {=UC/,role_1=U*C*/,role_2=U*C*/,role_3=U*C/role_1,=UC/role_1,=U/,role_3=U*C*/} + non_dist_schema | {=UC/,role_1=U*C*/,role_2=U*C*/,role_3=U*C*/} (3 rows) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres} - dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres,role_3=U*C*/postgres} + another_dist_schema | {=UC/,role_1=U*C*/,role_2=U*C*/,role_3=U*C*/} + dist_schema | {=UC/,role_1=U*C*/,role_2=U*C*/,role_3=U*C/role_1,=UC/role_1,=U/,role_3=U*C*/} (2 rows) \c - - - :master_port @@ -97,17 +97,17 @@ REVOKE ALL ON SCHEMA dist_schema, another_dist_schema, non_dist_schema FROM role SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres} - dist_schema | {postgres=UC/postgres} - non_dist_schema | {postgres=UC/postgres} + another_dist_schema | {=UC/} + dist_schema | {=UC/} + non_dist_schema | {=UC/} (3 rows) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres} - dist_schema | {postgres=UC/postgres} + another_dist_schema | {=UC/} + dist_schema | {=UC/} (2 rows) \c - - - :master_port @@ -116,17 +116,17 @@ GRANT USAGE, CREATE ON SCHEMA dist_schema, another_dist_schema, non_dist_schema SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres} - dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres} - non_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres} + another_dist_schema | {=UC/,role_1=UC/,role_2=UC/,role_3=UC/} + dist_schema | {=UC/,role_1=UC/,role_2=UC/,role_3=UC/} + non_dist_schema | {=UC/,role_1=UC/,role_2=UC/,role_3=UC/} (3 rows) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres} - dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres} + another_dist_schema | {=UC/,role_1=UC/,role_2=UC/,role_3=UC/} + dist_schema | {=UC/,role_1=UC/,role_2=UC/,role_3=UC/} (2 rows) \c - - - :master_port @@ -135,17 +135,17 @@ REVOKE USAGE, CREATE ON SCHEMA dist_schema, another_dist_schema, non_dist_schema SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres} - dist_schema | {postgres=UC/postgres,role_3=UC/postgres} - non_dist_schema | {postgres=UC/postgres,role_3=UC/postgres} + another_dist_schema | {=UC/,role_3=UC/} + dist_schema | {=UC/,role_3=UC/} + non_dist_schema | {=UC/,role_3=UC/} (3 rows) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres} - dist_schema | {postgres=UC/postgres,role_3=UC/postgres} + another_dist_schema | {=UC/,role_3=UC/} + dist_schema | {=UC/,role_3=UC/} (2 rows) \c - - - :master_port @@ -155,8 +155,8 @@ GRANT USAGE ON SCHEMA dist_schema TO role_1, role_3 WITH GRANT OPTION; SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres} - dist_schema | {postgres=UC/postgres,role_3=U*C/postgres,role_1=U*/postgres} + another_dist_schema | {=UC/,role_3=UC/} + dist_schema | {=UC/,role_3=U*C/,role_1=U*/} (2 rows) \c - - - :master_port @@ -166,8 +166,8 @@ REVOKE GRANT OPTION FOR USAGE ON SCHEMA dist_schema FROM role_3; SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres} - dist_schema | {postgres=UC/postgres,role_3=UC/postgres,role_1=U*/postgres} + another_dist_schema | {=UC/,role_3=UC/} + dist_schema | {=UC/,role_3=UC/,role_1=U*/} (2 rows) \c - - - :master_port @@ -181,8 +181,8 @@ GRANT CREATE ON SCHEMA dist_schema TO CURRENT_USER; SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres} - dist_schema | {postgres=UC/postgres,role_3=UC/postgres,role_1=U*C/postgres} + another_dist_schema | {=UC/,role_3=UC/} + dist_schema | {=UC/,role_3=UC/,role_1=U*C/} (2 rows) \c - - - :master_port @@ -212,7 +212,7 @@ SELECT run_command_on_coordinator_and_workers('DROP SCHEMA non_dist_schema'); -- test if the grantors are propagated correctly -- first remove one of the worker nodes SET citus.shard_replication_factor TO 1; -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -243,19 +243,19 @@ SELECT create_distributed_table('grantor_schema.grantor_table', 'id'); SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2} + grantor_schema | {=UC/,role_1=U*C*/,=C/,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2} (1 row) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2} + grantor_schema | {=UC/,role_1=U*C*/,=C/,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2} (1 row) \c - - - :master_port -- add the previously removed node -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -265,14 +265,14 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2} + grantor_schema | {=UC/,role_1=U*C*/,=C/,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2} (1 row) \c - - - :worker_2_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2} + grantor_schema | {=UC/,role_1=U*C*/,=C/,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2} (1 row) \c - - - :master_port @@ -282,14 +282,14 @@ REVOKE USAGE ON SCHEMA grantor_schema FROM role_1 CASCADE; SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - grantor_schema | {postgres=UC/postgres,role_1=C*/postgres,=C/postgres,role_2=C/role_1,role_3=C/role_1,=C/role_1} + grantor_schema | {=UC/,role_1=C*/,=C/,role_2=C/role_1,role_3=C/role_1,=C/role_1} (1 row) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - grantor_schema | {postgres=UC/postgres,role_1=C*/postgres,=C/postgres,role_2=C/role_1,role_3=C/role_1,=C/role_1} + grantor_schema | {=UC/,role_1=C*/,=C/,role_2=C/role_1,role_3=C/role_1,=C/role_1} (1 row) \c - - - :master_port @@ -305,14 +305,14 @@ RESET ROLE; SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3} + grantor_schema | {=UC/,role_1=U*C*/,=C/,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3} (1 row) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3} + grantor_schema | {=UC/,role_1=U*C*/,=C/,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3} (1 row) \c - - - :master_port @@ -338,14 +338,14 @@ SELECT create_distributed_table('dist_schema.dist_table', 'id'); SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/role_1} + dist_schema | {=UC/,role_1=U*C*/,role_2=U*C*/role_1} (1 row) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/role_1} + dist_schema | {=UC/,role_1=U*C*/,role_2=U*C*/role_1} (1 row) \c - - - :master_port @@ -359,7 +359,7 @@ SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema CASCADE') -- test grants on public schema -- first remove one of the worker nodes SET citus.shard_replication_factor TO 1; -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -382,19 +382,19 @@ RESET ROLE; SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1} + public | {=UC/,=UC/,role_1=U*C*/,=U/role_1} (1 row) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1} + public | {=UC/,=UC/,role_1=U*C*/,=U/role_1} (1 row) \c - - - :master_port -- add the previously removed node -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -404,14 +404,14 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1} + public | {=UC/,=UC/,role_1=U*C*/,=U/role_1} (1 row) \c - - - :worker_2_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1} + public | {=UC/,=UC/,role_1=U*C*/,=U/role_1} (1 row) \c - - - :master_port @@ -421,14 +421,14 @@ REVOKE CREATE, USAGE ON SCHEMA PUBLIC FROM role_1 CASCADE; SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - public | {postgres=UC/postgres,=UC/postgres} + public | {=UC/,=UC/} (1 row) \c - - - :worker_1_port SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname; nspname | nspacl --------------------------------------------------------------------- - public | {postgres=UC/postgres,=UC/postgres} + public | {=UC/,=UC/} (1 row) \c - - - :master_port diff --git a/src/test/regress/expected/insert_select_connection_leak.out b/src/test/regress/expected/insert_select_connection_leak.out index a971c608e..3f153e7df 100644 --- a/src/test/regress/expected/insert_select_connection_leak.out +++ b/src/test/regress/expected/insert_select_connection_leak.out @@ -33,7 +33,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT * FROM source_table; Task Count: 64 Tasks Shown: One of 64 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Seq Scan on source_table_4213581 source_table (8 rows) diff --git a/src/test/regress/expected/insert_select_repartition.out b/src/test/regress/expected/insert_select_repartition.out index b483b3d7f..cf8907a16 100644 --- a/src/test/regress/expected/insert_select_repartition.out +++ b/src/test/regress/expected/insert_select_repartition.out @@ -567,7 +567,7 @@ EXPLAIN INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate (cost=43.90..45.90 rows=200 width=8) Group Key: a -> Seq Scan on source_table_4213606 source_table (cost=0.00..32.60 rows=2260 width=8) @@ -747,7 +747,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT * FROM source_table; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Seq Scan on source_table_4213613 source_table (8 rows) @@ -760,7 +760,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT * FROM source_table WHERE b Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Seq Scan on source_table_4213613 source_table Filter: (b IS NOT NULL) (9 rows) @@ -791,14 +791,14 @@ SELECT * FROM target_table ORDER BY b; SELECT * FROM run_command_on_placements('target_table', 'select count(*) from %s') ORDER BY shardid, nodeport; nodename | nodeport | shardid | success | result --------------------------------------------------------------------- - localhost | 57637 | 4213617 | t | 1 - localhost | 57638 | 4213617 | t | 1 - localhost | 57637 | 4213618 | t | 2 - localhost | 57638 | 4213618 | t | 2 - localhost | 57637 | 4213619 | t | 3 - localhost | 57638 | 4213619 | t | 3 - localhost | 57637 | 4213620 | t | 4 - localhost | 57638 | 4213620 | t | 4 + | xxxxx | 4213617 | t | 1 + | xxxxx | 4213617 | t | 1 + | xxxxx | 4213618 | t | 2 + | xxxxx | 4213618 | t | 2 + | xxxxx | 4213619 | t | 3 + | xxxxx | 4213619 | t | 3 + | xxxxx | 4213620 | t | 4 + | xxxxx | 4213620 | t | 4 (8 rows) -- @@ -885,14 +885,14 @@ SELECT * FROM target_table ORDER BY a; SELECT * FROM run_command_on_placements('target_table', 'select count(*) from %s') ORDER BY shardid, nodeport; nodename | nodeport | shardid | success | result --------------------------------------------------------------------- - localhost | 57637 | 4213625 | t | 2 - localhost | 57638 | 4213625 | t | 2 - localhost | 57637 | 4213626 | t | 3 - localhost | 57638 | 4213626 | t | 3 - localhost | 57637 | 4213627 | t | 3 - localhost | 57638 | 4213627 | t | 3 - localhost | 57637 | 4213628 | t | 2 - localhost | 57638 | 4213628 | t | 2 + | xxxxx | 4213625 | t | 2 + | xxxxx | 4213625 | t | 2 + | xxxxx | 4213626 | t | 3 + | xxxxx | 4213626 | t | 3 + | xxxxx | 4213627 | t | 3 + | xxxxx | 4213627 | t | 3 + | xxxxx | 4213628 | t | 2 + | xxxxx | 4213628 | t | 2 (8 rows) DROP TABLE source_table, target_table; @@ -947,7 +947,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT a AS aa, b AS aa, 1 AS aa, 2 Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Seq Scan on source_table_4213629 source_table (8 rows) @@ -973,7 +973,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT a AS aa, b AS aa, 1 AS aa, 2 Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Seq Scan on source_table_4213629 source_table (8 rows) @@ -1149,7 +1149,7 @@ DO UPDATE SET Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: c1, c2, c3, c4, '-1'::double precision, insert_select_repartition.dist_func(c1, 4) -> Seq Scan on source_table_4213644 source_table diff --git a/src/test/regress/expected/intermediate_result_pruning.out b/src/test/regress/expected/intermediate_result_pruning.out index 697d2ce21..bd5a80e8b 100644 --- a/src/test/regress/expected/intermediate_result_pruning.out +++ b/src/test/regress/expected/intermediate_result_pruning.out @@ -51,8 +51,8 @@ FROM some_values_1 JOIN table_2 USING (key); DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count --------------------------------------------------------------------- 2 @@ -60,7 +60,7 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -- a very basic case, where the intermediate result -- should only go to one worker because the final query is a router --- we use random() to prevent postgres inline the CTE(s) +-- we use random() to prevent inline the CTE(s) WITH some_values_1 AS (SELECT key, random() FROM table_1 WHERE value IN ('3', '4')) SELECT @@ -69,7 +69,7 @@ FROM some_values_1 JOIN table_2 USING (key) WHERE table_2.key = 1; DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count --------------------------------------------------------------------- 0 @@ -86,8 +86,8 @@ FROM some_values_1 JOIN ref_table USING (key); DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count --------------------------------------------------------------------- 2 @@ -107,7 +107,7 @@ DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT key, random() AS random FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- 0 @@ -126,9 +126,9 @@ FROM DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- 1 @@ -148,9 +148,9 @@ FROM DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- 1 @@ -170,9 +170,9 @@ FROM DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- 0 @@ -193,8 +193,8 @@ FROM DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- 0 @@ -212,10 +212,10 @@ FROM DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.<>) 3) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- 1 @@ -235,10 +235,10 @@ FROM DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.<>) 3) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- 0 @@ -255,8 +255,8 @@ FROM (some_values_1 JOIN ref_table USING (key)) JOIN table_2 USING (key); DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.ref_table WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count --------------------------------------------------------------------- 2 @@ -290,7 +290,7 @@ FROM DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_2 will be written to local file count --------------------------------------------------------------------- @@ -318,9 +318,9 @@ DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) top_cte JOIN intermediate_result_pruning.table_2 USING (key)) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_2 will be written to local file count --------------------------------------------------------------------- @@ -348,8 +348,8 @@ DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) top_cte JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 2) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_2 will be written to local file count --------------------------------------------------------------------- @@ -369,12 +369,12 @@ DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1) DEBUG: generating subplan XXX_3 for CTE some_values_3: SELECT some_values_2.key FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT some_values_3.key, ref_table.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_3 JOIN intermediate_result_pruning.ref_table ON (true)) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx key | key | value --------------------------------------------------------------------- (0 rows) @@ -482,13 +482,13 @@ DEBUG: generating subplan XXX_5 for subquery SELECT min(table_1.value) AS min F DEBUG: generating subplan XXX_6 for subquery SELECT avg((level_6.min)::integer) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min text)) level_6, intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) (level_6.min)::integer) GROUP BY table_1.value DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_4 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_4 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_5 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_5 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_4 will be sent to :xxxxx +DEBUG: Subplan XXX_4 will be sent to :xxxxx +DEBUG: Subplan XXX_5 will be sent to :xxxxx +DEBUG: Subplan XXX_5 will be sent to :xxxxx DEBUG: Subplan XXX_6 will be written to local file count --------------------------------------------------------------------- @@ -541,10 +541,10 @@ DEBUG: generating subplan XXX_5 for subquery SELECT min(table_1.value) AS min F DEBUG: generating subplan XXX_6 for subquery SELECT avg((level_6.min)::integer) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min text)) level_6, intermediate_result_pruning.table_1 WHERE ((table_1.key OPERATOR(pg_catalog.=) (level_6.min)::integer) AND (table_1.key OPERATOR(pg_catalog.=) 4)) GROUP BY table_1.value DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_4 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_5 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_4 will be sent to :xxxxx +DEBUG: Subplan XXX_5 will be sent to :xxxxx DEBUG: Subplan XXX_6 will be written to local file count --------------------------------------------------------------------- @@ -616,8 +616,8 @@ DEBUG: generating subplan XXX_2 for subquery SELECT key FROM intermediate_resul DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT count(*) AS count FROM (intermediate_result_pruning.table_1 JOIN (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 USING (key)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) cte_2 -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file DEBUG: Subplan XXX_2 will be written to local file @@ -638,8 +638,8 @@ WHERE foo.key != bar.key; DEBUG: generating subplan XXX_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count --------------------------------------------------------------------- 14 @@ -656,7 +656,7 @@ WHERE foo.key != bar.key; DEBUG: generating subplan XXX_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count --------------------------------------------------------------------- 4 @@ -682,8 +682,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM intermed DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx key | value --------------------------------------------------------------------- 3 | 3 @@ -712,8 +712,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM intermed DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx key | value --------------------------------------------------------------------- 3 | 3 @@ -738,7 +738,7 @@ DEBUG: generating subplan XXX_1 for subquery SELECT min(key) AS min FROM interm DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE (((value)::integer OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (key OPERATOR(pg_catalog.=) 6)) RETURNING key, value DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx key | value --------------------------------------------------------------------- 6 | 6 @@ -760,7 +760,7 @@ DEBUG: volatile functions are not allowed in distributed INSERT ... SELECT quer DEBUG: generating subplan XXX_1 for subquery SELECT value FROM intermediate_result_pruning.table_1 WHERE (random() OPERATOR(pg_catalog.>) (1)::double precision) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM intermediate_result_pruning.table_2 WHERE ((value OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text))) AND (key OPERATOR(pg_catalog.=) 1)) DEBUG: Collecting INSERT ... SELECT results on coordinator -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx -- a similar query, with more complex subquery INSERT INTO table_1 SELECT * FROM table_2 where key = 1 AND @@ -793,7 +793,7 @@ DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx -- same query, cte is on the FROM clause -- and this time the final query (and top-level intermediate result) -- hits all the shards because table_2.key != 1 @@ -830,8 +830,8 @@ DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx -- append partitioned/heap-type SET citus.replication_model TO statement; -- do not print out 'building index pg_toast_xxxxx_index' messages @@ -889,7 +889,7 @@ WHERE data IN (SELECT data FROM range_partitioned); DEBUG: generating subplan XXX_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) 'A'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count --------------------------------------------------------------------- 0 @@ -905,8 +905,8 @@ WHERE data IN (SELECT data FROM range_partitioned); DEBUG: generating subplan XXX_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.>=) 'A'::text) AND (range_column OPERATOR(pg_catalog.<=) 'K'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count --------------------------------------------------------------------- 0 @@ -925,8 +925,8 @@ WHERE range_partitioned.data IN (SELECT data FROM some_data); DEBUG: generating subplan XXX_1 for CTE some_data: SELECT data FROM intermediate_result_pruning.range_partitioned DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) ANY (ARRAY['A'::text, 'E'::text])) AND (data OPERATOR(pg_catalog.=) ANY (SELECT some_data.data FROM (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)) some_data))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count --------------------------------------------------------------------- 0 diff --git a/src/test/regress/expected/intermediate_results.out b/src/test/regress/expected/intermediate_results.out index cfdf3382a..b65c45744 100644 --- a/src/test/regress/expected/intermediate_results.out +++ b/src/test/regress/expected/intermediate_results.out @@ -242,7 +242,7 @@ END; -- pipe query output into a result file and create a table to check the result COPY (SELECT s, s*s FROM generate_series(1,5) s) TO PROGRAM - $$psql -h localhost -p 57636 -U postgres -d regression -c "BEGIN; COPY squares FROM STDIN WITH (format result); CREATE TABLE intermediate_results.squares AS SELECT * FROM read_intermediate_result('squares', 'text') AS res(x int, x2 int); END;"$$ + $$psql -h -p xxxxx -U -d regression -c "BEGIN; COPY squares FROM STDIN WITH (format result); CREATE TABLE intermediate_results.squares AS SELECT * FROM read_intermediate_result('squares', 'text') AS res(x int, x2 int); END;"$$ WITH (FORMAT text); SELECT * FROM squares ORDER BY x; x | x2 @@ -429,7 +429,7 @@ SELECT broadcast_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_s 5 (1 row) -SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_2_port); +SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], '', :worker_2_port); fetch_intermediate_results --------------------------------------------------------------------- 111 @@ -445,7 +445,7 @@ SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 5 | 25 (5 rows) -SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_1_port); +SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], '', :worker_1_port); fetch_intermediate_results --------------------------------------------------------------------- 111 @@ -464,14 +464,14 @@ SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 END; -- multiple results, and some error cases BEGIN; -SELECT store_intermediate_result_on_node('localhost', :worker_1_port, +SELECT store_intermediate_result_on_node('', :worker_1_port, 'squares_1', 'SELECT s, s*s FROM generate_series(1, 2) s'); store_intermediate_result_on_node --------------------------------------------------------------------- (1 row) -SELECT store_intermediate_result_on_node('localhost', :worker_1_port, +SELECT store_intermediate_result_on_node('', :worker_1_port, 'squares_2', 'SELECT s, s*s FROM generate_series(3, 4) s'); store_intermediate_result_on_node --------------------------------------------------------------------- @@ -484,16 +484,16 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], ERROR: result "squares_1" does not exist ROLLBACK TO SAVEPOINT s1; -- fetch from worker 2 should fail -SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_2_port); +SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], '', :worker_2_port); ERROR: could not open file "base/pgsql_job_cache/xx_x_xxx/squares_1.data": No such file or directory -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx ROLLBACK TO SAVEPOINT s1; -- still, results aren't available on coordinator yet SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int); ERROR: result "squares_1" does not exist ROLLBACK TO SAVEPOINT s1; -- fetch from worker 1 should succeed -SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port); +SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], '', :worker_1_port); fetch_intermediate_results --------------------------------------------------------------------- 114 @@ -509,7 +509,7 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], (4 rows) -- fetching again should succeed -SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port); +SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], '', :worker_1_port); fetch_intermediate_results --------------------------------------------------------------------- 114 @@ -526,14 +526,14 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], ROLLBACK TO SAVEPOINT s1; -- empty result id list should succeed -SELECT * FROM fetch_intermediate_results(ARRAY[]::text[], 'localhost', :worker_1_port); +SELECT * FROM fetch_intermediate_results(ARRAY[]::text[], '', :worker_1_port); fetch_intermediate_results --------------------------------------------------------------------- 0 (1 row) -- null in result id list should error gracefully -SELECT * FROM fetch_intermediate_results(ARRAY[NULL, 'squares_1', 'squares_2']::text[], 'localhost', :worker_1_port); +SELECT * FROM fetch_intermediate_results(ARRAY[NULL, 'squares_1', 'squares_2']::text[], '', :worker_1_port); ERROR: worker array object cannot contain null values END; -- results should have been deleted after transaction commit diff --git a/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out b/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out index 4fa0f754a..739de987e 100644 --- a/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out +++ b/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out @@ -11,7 +11,7 @@ step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -33,8 +33,8 @@ step s2-print-content: nodeport success result -57637 t 10 -57638 t 10 +xxxxx t 10 +xxxxx t 10 master_remove_node @@ -54,7 +54,7 @@ step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -73,8 +73,8 @@ step s2-print-content: nodeport success result -57637 t 10 -57638 t 10 +xxxxx t 10 +xxxxx t 10 master_remove_node @@ -91,7 +91,7 @@ step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -113,8 +113,8 @@ step s2-print-content: nodeport success result -57637 t 6 -57638 t 6 +xxxxx t 6 +xxxxx t 6 master_remove_node @@ -134,7 +134,7 @@ step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -153,8 +153,8 @@ step s2-print-content: nodeport success result -57637 t 6 -57638 t 6 +xxxxx t 6 +xxxxx t 6 master_remove_node @@ -171,7 +171,7 @@ step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -193,8 +193,8 @@ step s2-print-index-count: nodeport success result -57637 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 master_remove_node @@ -214,7 +214,7 @@ step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -233,8 +233,8 @@ step s2-print-index-count: nodeport success result -57637 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 master_remove_node @@ -251,7 +251,7 @@ step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -276,8 +276,8 @@ step s2-print-content-2: nodeport success result -57637 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 master_remove_node @@ -300,7 +300,7 @@ create_reference_table step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -319,8 +319,8 @@ step s2-print-content-2: nodeport success result -57637 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 master_remove_node @@ -334,7 +334,7 @@ step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -356,8 +356,8 @@ step s2-print-content: nodeport success result -57637 t 5 -57638 t 5 +xxxxx t 5 +xxxxx t 5 master_remove_node @@ -374,7 +374,7 @@ step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -393,8 +393,8 @@ step s2-print-content: nodeport success result -57637 t 5 -57638 t 5 +xxxxx t 5 +xxxxx t 5 master_remove_node @@ -408,7 +408,7 @@ step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -430,8 +430,8 @@ step s2-print-content: nodeport success result -57637 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 master_remove_node @@ -448,7 +448,7 @@ step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -467,8 +467,8 @@ step s2-print-content: nodeport success result -57637 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 master_remove_node @@ -482,7 +482,7 @@ step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -504,8 +504,8 @@ step s2-print-index-count: nodeport success result -57637 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 master_remove_node @@ -522,7 +522,7 @@ step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -541,8 +541,8 @@ step s2-print-index-count: nodeport success result -57637 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 master_remove_node @@ -556,7 +556,7 @@ step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -581,8 +581,8 @@ step s2-print-content-2: nodeport success result -57637 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 master_remove_node @@ -602,7 +602,7 @@ create_reference_table step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -621,8 +621,8 @@ step s2-print-content-2: nodeport success result -57637 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 master_remove_node diff --git a/src/test/regress/expected/isolation_add_remove_node.out b/src/test/regress/expected/isolation_add_remove_node.out index 5203acbd8..be4727609 100644 --- a/src/test/regress/expected/isolation_add_remove_node.out +++ b/src/test/regress/expected/isolation_add_remove_node.out @@ -8,13 +8,13 @@ step s1-begin: BEGIN; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? 1 step s2-remove-node-1: - SELECT * FROM master_remove_node('localhost', 57637); + SELECT * FROM master_remove_node('', xxxxx); step s1-commit: COMMIT; @@ -39,13 +39,13 @@ step s1-begin: BEGIN; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? 1 step s2-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s1-commit: COMMIT; @@ -59,8 +59,8 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 t -localhost 57638 t + xxxxx t + xxxxx t master_remove_node @@ -74,13 +74,13 @@ step s1-begin: BEGIN; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? 1 step s2-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); step s1-commit: COMMIT; @@ -94,7 +94,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 t + xxxxx t master_remove_node @@ -107,13 +107,13 @@ step s1-begin: BEGIN; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? 1 step s2-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s1-abort: ABORT; @@ -127,7 +127,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57638 t + xxxxx t master_remove_node @@ -140,13 +140,13 @@ step s1-begin: BEGIN; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? 1 step s2-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); step s1-abort: ABORT; @@ -160,7 +160,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 t + xxxxx t master_remove_node @@ -170,13 +170,13 @@ starting permutation: s1-add-node-1 s1-add-node-2 s1-begin s1-remove-node-1 s2-r 1 step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? 1 step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -185,13 +185,13 @@ step s1-begin: BEGIN; step s1-remove-node-1: - SELECT * FROM master_remove_node('localhost', 57637); + SELECT * FROM master_remove_node('', xxxxx); master_remove_node step s2-remove-node-2: - SELECT * FROM master_remove_node('localhost', 57638); + SELECT * FROM master_remove_node('', xxxxx); step s1-commit: COMMIT; @@ -213,7 +213,7 @@ starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-remove-node-1 s 1 step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -222,19 +222,19 @@ step s1-begin: BEGIN; step s1-remove-node-1: - SELECT * FROM master_remove_node('localhost', 57637); + SELECT * FROM master_remove_node('', xxxxx); master_remove_node step s2-remove-node-1: - SELECT * FROM master_remove_node('localhost', 57637); + SELECT * FROM master_remove_node('', xxxxx); step s1-commit: COMMIT; step s2-remove-node-1: <... completed> -error in steps s1-commit s2-remove-node-1: ERROR: node at "localhost:xxxxx" does not exist +error in steps s1-commit s2-remove-node-1: ERROR: node at ":xxxxx" does not exist step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; @@ -248,7 +248,7 @@ starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-activate-node 1 step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -257,13 +257,13 @@ step s1-begin: BEGIN; step s1-activate-node-1: - SELECT 1 FROM master_activate_node('localhost', 57637); + SELECT 1 FROM master_activate_node('', xxxxx); ?column? 1 step s2-activate-node-1: - SELECT 1 FROM master_activate_node('localhost', 57637); + SELECT 1 FROM master_activate_node('', xxxxx); step s1-commit: COMMIT; @@ -277,7 +277,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 t + xxxxx t master_remove_node @@ -287,7 +287,7 @@ starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 1 step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -296,13 +296,13 @@ step s1-begin: BEGIN; step s1-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); ?column? 1 step s2-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); step s1-commit: COMMIT; @@ -316,7 +316,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 f + xxxxx f master_remove_node @@ -326,7 +326,7 @@ starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-activate- 1 step s1-add-inactive-1: - SELECT 1 FROM master_add_inactive_node('localhost', 57637); + SELECT 1 FROM master_add_inactive_node('', xxxxx); ?column? @@ -335,13 +335,13 @@ step s1-begin: BEGIN; step s1-activate-node-1: - SELECT 1 FROM master_activate_node('localhost', 57637); + SELECT 1 FROM master_activate_node('', xxxxx); ?column? 1 step s2-activate-node-1: - SELECT 1 FROM master_activate_node('localhost', 57637); + SELECT 1 FROM master_activate_node('', xxxxx); step s1-commit: COMMIT; @@ -355,7 +355,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 t + xxxxx t master_remove_node @@ -365,7 +365,7 @@ starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-disable-no 1 step s1-add-inactive-1: - SELECT 1 FROM master_add_inactive_node('localhost', 57637); + SELECT 1 FROM master_add_inactive_node('', xxxxx); ?column? @@ -374,13 +374,13 @@ step s1-begin: BEGIN; step s1-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); ?column? 1 step s2-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); step s1-commit: COMMIT; @@ -394,7 +394,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 f + xxxxx f master_remove_node @@ -404,7 +404,7 @@ starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-activate-node- 1 step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -413,13 +413,13 @@ step s1-begin: BEGIN; step s1-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); ?column? 1 step s2-activate-node-1: - SELECT 1 FROM master_activate_node('localhost', 57637); + SELECT 1 FROM master_activate_node('', xxxxx); step s1-commit: COMMIT; @@ -433,7 +433,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 t + xxxxx t master_remove_node @@ -443,7 +443,7 @@ starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-disable-node- 1 step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -452,13 +452,13 @@ step s1-begin: BEGIN; step s1-activate-node-1: - SELECT 1 FROM master_activate_node('localhost', 57637); + SELECT 1 FROM master_activate_node('', xxxxx); ?column? 1 step s2-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); step s1-commit: COMMIT; @@ -472,7 +472,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 f + xxxxx f master_remove_node @@ -482,7 +482,7 @@ starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-activate-n 1 step s1-add-inactive-1: - SELECT 1 FROM master_add_inactive_node('localhost', 57637); + SELECT 1 FROM master_add_inactive_node('', xxxxx); ?column? @@ -491,13 +491,13 @@ step s1-begin: BEGIN; step s1-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); ?column? 1 step s2-activate-node-1: - SELECT 1 FROM master_activate_node('localhost', 57637); + SELECT 1 FROM master_activate_node('', xxxxx); step s1-commit: COMMIT; @@ -511,7 +511,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 t + xxxxx t master_remove_node @@ -521,7 +521,7 @@ starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-n 1 step s1-add-inactive-1: - SELECT 1 FROM master_add_inactive_node('localhost', 57637); + SELECT 1 FROM master_add_inactive_node('', xxxxx); ?column? @@ -530,13 +530,13 @@ step s1-begin: BEGIN; step s1-activate-node-1: - SELECT 1 FROM master_activate_node('localhost', 57637); + SELECT 1 FROM master_activate_node('', xxxxx); ?column? 1 step s2-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); step s1-commit: COMMIT; @@ -550,7 +550,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 f + xxxxx f master_remove_node @@ -560,7 +560,7 @@ starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-n 1 step s1-add-inactive-1: - SELECT 1 FROM master_add_inactive_node('localhost', 57637); + SELECT 1 FROM master_add_inactive_node('', xxxxx); ?column? @@ -569,13 +569,13 @@ step s1-begin: BEGIN; step s1-activate-node-1: - SELECT 1 FROM master_activate_node('localhost', 57637); + SELECT 1 FROM master_activate_node('', xxxxx); ?column? 1 step s2-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); step s1-abort: ABORT; @@ -589,7 +589,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 f + xxxxx f master_remove_node @@ -599,7 +599,7 @@ starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 1 step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -608,13 +608,13 @@ step s1-begin: BEGIN; step s1-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); ?column? 1 step s2-disable-node-1: - SELECT 1 FROM master_disable_node('localhost', 57637); + SELECT 1 FROM master_disable_node('', xxxxx); step s1-abort: ABORT; @@ -628,7 +628,7 @@ step s1-show-nodes: nodename nodeport isactive -localhost 57637 f + xxxxx f master_remove_node diff --git a/src/test/regress/expected/isolation_alter_role_propagation.out b/src/test/regress/expected/isolation_alter_role_propagation.out index 4e30be626..c5183c6f0 100644 --- a/src/test/regress/expected/isolation_alter_role_propagation.out +++ b/src/test/regress/expected/isolation_alter_role_propagation.out @@ -3,8 +3,8 @@ Parsed test spec with 2 sessions starting permutation: s1-enable-propagation s2-enable-propagation s1-begin s1-alter-role-1 s2-add-node s1-commit run_command_on_workers -(localhost,57637,t,"CREATE ROLE") -(localhost,57638,t,"CREATE ROLE") +(,xxxxx,t,"CREATE ROLE") +(,xxxxx,t,"CREATE ROLE") step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; @@ -18,7 +18,7 @@ step s1-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; step s2-add-node: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); step s1-commit: COMMIT; @@ -29,14 +29,14 @@ step s2-add-node: <... completed> 1 run_command_on_workers -(localhost,57637,t,"DROP ROLE") -(localhost,57638,t,"DROP ROLE") +(,xxxxx,t,"DROP ROLE") +(,xxxxx,t,"DROP ROLE") starting permutation: s1-enable-propagation s2-enable-propagation s1-begin s1-add-node s2-alter-role-1 s1-commit run_command_on_workers -(localhost,57637,t,"CREATE ROLE") -(localhost,57638,t,"CREATE ROLE") +(,xxxxx,t,"CREATE ROLE") +(,xxxxx,t,"CREATE ROLE") step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; @@ -47,7 +47,7 @@ step s1-begin: BEGIN; step s1-add-node: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -61,14 +61,14 @@ step s1-commit: step s2-alter-role-1: <... completed> run_command_on_workers -(localhost,57637,t,"DROP ROLE") -(localhost,57638,t,"DROP ROLE") +(,xxxxx,t,"DROP ROLE") +(,xxxxx,t,"DROP ROLE") starting permutation: s1-enable-propagation s2-enable-propagation s1-begin s1-alter-role-1 s2-alter-role-1 s1-commit run_command_on_workers -(localhost,57637,t,"CREATE ROLE") -(localhost,57638,t,"CREATE ROLE") +(,xxxxx,t,"CREATE ROLE") +(,xxxxx,t,"CREATE ROLE") step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; @@ -91,14 +91,14 @@ step s2-alter-role-1: <... completed> error in steps s1-commit s2-alter-role-1: ERROR: tuple concurrently updated run_command_on_workers -(localhost,57637,t,"DROP ROLE") -(localhost,57638,t,"DROP ROLE") +(,xxxxx,t,"DROP ROLE") +(,xxxxx,t,"DROP ROLE") starting permutation: s1-enable-propagation s2-enable-propagation s1-begin s1-alter-role-1 s2-alter-role-2 s1-commit run_command_on_workers -(localhost,57637,t,"CREATE ROLE") -(localhost,57638,t,"CREATE ROLE") +(,xxxxx,t,"CREATE ROLE") +(,xxxxx,t,"CREATE ROLE") step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; @@ -119,5 +119,5 @@ step s1-commit: run_command_on_workers -(localhost,57637,t,"DROP ROLE") -(localhost,57638,t,"DROP ROLE") +(,xxxxx,t,"DROP ROLE") +(,xxxxx,t,"DROP ROLE") diff --git a/src/test/regress/expected/isolation_append_copy_vs_all.out b/src/test/regress/expected/isolation_append_copy_vs_all.out index 65f8de20f..0aafd4d66 100644 --- a/src/test/regress/expected/isolation_append_copy_vs_all.out +++ b/src/test/regress/expected/isolation_append_copy_vs_all.out @@ -177,8 +177,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -198,8 +198,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -218,8 +218,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -238,8 +238,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -259,8 +259,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -279,8 +279,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table @@ -518,8 +518,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -539,8 +539,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -560,8 +560,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -581,8 +581,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -601,8 +601,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table diff --git a/src/test/regress/expected/isolation_citus_dist_activity.out b/src/test/regress/expected/isolation_citus_dist_activity.out index 355c59e89..4fa802cae 100644 --- a/src/test/regress/expected/isolation_citus_dist_activity.out +++ b/src/test/regress/expected/isolation_citus_dist_activity.out @@ -34,7 +34,7 @@ query query_hostname query_hostport master_query_host_namemaster_query_ ALTER TABLE test_table ADD COLUMN x INT; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead regression step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; @@ -42,16 +42,16 @@ query query_hostname query_hostport master_query_host_namemaster_query_ SELECT worker_apply_shard_ddl_command (1300004, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +') xxxxx coordinator_host57636 idle in transactionClient ClientRead regression SELECT worker_apply_shard_ddl_command (1300003, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +') xxxxx coordinator_host57636 idle in transactionClient ClientRead regression SELECT worker_apply_shard_ddl_command (1300002, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +') xxxxx coordinator_host57636 idle in transactionClient ClientRead regression SELECT worker_apply_shard_ddl_command (1300001, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +') xxxxx coordinator_host57636 idle in transactionClient ClientRead regression step s2-rollback: ROLLBACK; @@ -96,13 +96,13 @@ query query_hostname query_hostport master_query_host_namemaster_query_ INSERT INTO test_table VALUES (100, 100); -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead regression step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100) xxxxx coordinator_host57636 idle in transactionClient ClientRead regression step s2-rollback: ROLLBACK; @@ -150,16 +150,16 @@ query query_hostname query_hostport master_query_host_namemaster_query_ SELECT count(*) FROM test_table; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead regression step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -SELECT count(*) AS count FROM test_table_1300014 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +SELECT count(*) AS count FROM test_table_1300014 test_table WHERE truelocalhost xxxxx coordinator_host57636 idle in transactionClient ClientRead regression +SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost xxxxx coordinator_host57636 idle in transactionClient ClientRead regression +SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost xxxxx coordinator_host57636 idle in transactionClient ClientRead regression +SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost xxxxx coordinator_host57636 idle in transactionClient ClientRead regression step s2-rollback: ROLLBACK; @@ -207,13 +207,13 @@ query query_hostname query_hostport master_query_host_namemaster_query_ SELECT count(*) FROM test_table WHERE column1 = 55; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead regression step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55) xxxxx coordinator_host57636 idle in transactionClient ClientRead regression step s2-rollback: ROLLBACK; diff --git a/src/test/regress/expected/isolation_cluster_management.out b/src/test/regress/expected/isolation_cluster_management.out index 505157100..66b08af4f 100644 --- a/src/test/regress/expected/isolation_cluster_management.out +++ b/src/test/regress/expected/isolation_cluster_management.out @@ -2,8 +2,8 @@ Parsed test spec with 1 sessions starting permutation: s1a step s1a: - SELECT 1 FROM master_add_node('localhost', 57637); - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); + SELECT 1 FROM master_add_node('', xxxxx); ?column? diff --git a/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out b/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out index 95a9a59cf..24037c23a 100644 --- a/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out +++ b/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out @@ -8,19 +8,19 @@ step s2-load-cache: COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), '', xxxxx, '', xxxxx); master_copy_shard_placement step s1-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), '', xxxxx, '', xxxxx); step s2-commit: COMMIT; @@ -30,19 +30,19 @@ error in steps s2-commit s1-repair-placement: ERROR: target placement must be i starting permutation: s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), '', xxxxx, '', xxxxx); master_copy_shard_placement step s1-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), '', xxxxx, '', xxxxx); step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_copy_placement_vs_modification.out b/src/test/regress/expected/isolation_copy_placement_vs_modification.out index 24abf82b1..23784da8d 100644 --- a/src/test/regress/expected/isolation_copy_placement_vs_modification.out +++ b/src/test/regress/expected/isolation_copy_placement_vs_modification.out @@ -18,13 +18,13 @@ count 1 step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -51,8 +51,8 @@ step s2-print-content: nodeport success result -57637 t 5 -57638 t 5 +xxxxx t 5 +xxxxx t 5 starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content step s1-load-cache: @@ -72,13 +72,13 @@ count 1 step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -105,8 +105,8 @@ step s2-print-content: nodeport success result -57637 t -57638 t +xxxxx t +xxxxx t starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content step s1-load-cache: @@ -123,13 +123,13 @@ count 0 step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -156,8 +156,8 @@ step s2-print-content: nodeport success result -57637 t 10 -57638 t 10 +xxxxx t 10 +xxxxx t 10 starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content step s1-load-cache: @@ -174,13 +174,13 @@ count 0 step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -207,8 +207,8 @@ step s2-print-content: nodeport success result -57637 t 5 -57638 t 5 +xxxxx t 5 +xxxxx t 5 starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count step s1-load-cache: @@ -225,13 +225,13 @@ count 0 step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -256,10 +256,10 @@ step s2-print-index-count: nodeport success result -57637 t 1 -57637 t 1 -57638 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 +xxxxx t 1 +xxxxx t 1 starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content step s1-insert: @@ -276,13 +276,13 @@ count 1 step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -309,8 +309,8 @@ step s2-print-content: nodeport success result -57637 t 5 -57638 t 5 +xxxxx t 5 +xxxxx t 5 starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content step s1-insert: @@ -327,13 +327,13 @@ count 1 step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -360,8 +360,8 @@ step s2-print-content: nodeport success result -57637 t -57638 t +xxxxx t +xxxxx t starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content step s1-begin: @@ -375,13 +375,13 @@ count 0 step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -408,8 +408,8 @@ step s2-print-content: nodeport success result -57637 t 10 -57638 t 10 +xxxxx t 10 +xxxxx t 10 starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content step s1-begin: @@ -423,13 +423,13 @@ count 0 step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -456,8 +456,8 @@ step s2-print-content: nodeport success result -57637 t 5 -57638 t 5 +xxxxx t 5 +xxxxx t 5 starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count step s1-begin: @@ -471,13 +471,13 @@ count 0 step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -502,7 +502,7 @@ step s2-print-index-count: nodeport success result -57637 t 1 -57637 t 1 -57638 t 1 -57638 t 1 +xxxxx t 1 +xxxxx t 1 +xxxxx t 1 +xxxxx t 1 diff --git a/src/test/regress/expected/isolation_copy_vs_all_on_mx.out b/src/test/regress/expected/isolation_copy_vs_all_on_mx.out index 52f882f2d..cfeff1184 100644 --- a/src/test/regress/expected/isolation_copy_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_copy_vs_all_on_mx.out @@ -2,7 +2,7 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -73,7 +73,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -122,7 +122,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -140,7 +140,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_create_restore_point.out b/src/test/regress/expected/isolation_create_restore_point.out index c0bb77d6b..8367d4bf7 100644 --- a/src/test/regress/expected/isolation_create_restore_point.out +++ b/src/test/regress/expected/isolation_create_restore_point.out @@ -166,7 +166,7 @@ step s1-begin: SET citus.multi_shard_commit_protocol TO '2pc'; step s1-add-node: - SELECT 1 FROM master_add_inactive_node('localhost', 9999); + SELECT 1 FROM master_add_inactive_node('', 9999); ?column? @@ -191,7 +191,7 @@ step s1-begin: SET citus.multi_shard_commit_protocol TO '2pc'; step s1-remove-node: - SELECT master_remove_node('localhost', 9999); + SELECT master_remove_node('', 9999); master_remove_node diff --git a/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out b/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out index ed61cf589..1fdc7d702 100644 --- a/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out +++ b/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out @@ -3,12 +3,12 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select node_name node_port -localhost 57637 + xxxxx step s1-begin: BEGIN; step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -38,10 +38,10 @@ step s1-show-placements: nodename nodeport -localhost 57637 -localhost 57637 -localhost 57638 -localhost 57638 + xxxxx + xxxxx + xxxxx + xxxxx step s2-select: SELECT * FROM dist_table; @@ -55,12 +55,12 @@ master_remove_node starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select node_name node_port -localhost 57637 + xxxxx step s1-begin: BEGIN; step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -90,10 +90,10 @@ step s1-show-placements: nodename nodeport -localhost 57637 -localhost 57637 -localhost 57637 -localhost 57637 + xxxxx + xxxxx + xxxxx + xxxxx step s2-select: SELECT * FROM dist_table; @@ -106,7 +106,7 @@ master_remove_node starting permutation: s2-begin s2-create-table-1 s1-add-node-2 s2-commit s1-show-placements s2-select node_name node_port -localhost 57637 + xxxxx step s2-begin: BEGIN; @@ -120,7 +120,7 @@ create_distributed_table step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -141,10 +141,10 @@ step s1-show-placements: nodename nodeport -localhost 57637 -localhost 57637 -localhost 57637 -localhost 57637 + xxxxx + xxxxx + xxxxx + xxxxx step s2-select: SELECT * FROM dist_table; @@ -158,9 +158,9 @@ master_remove_node starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select node_name node_port -localhost 57637 + xxxxx step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -169,7 +169,7 @@ step s1-begin: BEGIN; step s1-remove-node-2: - SELECT * FROM master_remove_node('localhost', 57638); + SELECT * FROM master_remove_node('', xxxxx); master_remove_node @@ -199,10 +199,10 @@ step s1-show-placements: nodename nodeport -localhost 57637 -localhost 57637 -localhost 57637 -localhost 57637 + xxxxx + xxxxx + xxxxx + xxxxx step s2-select: SELECT * FROM dist_table; @@ -215,9 +215,9 @@ master_remove_node starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select node_name node_port -localhost 57637 + xxxxx step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -226,7 +226,7 @@ step s1-begin: BEGIN; step s1-remove-node-2: - SELECT * FROM master_remove_node('localhost', 57638); + SELECT * FROM master_remove_node('', xxxxx); master_remove_node @@ -256,10 +256,10 @@ step s1-show-placements: nodename nodeport -localhost 57637 -localhost 57637 -localhost 57638 -localhost 57638 + xxxxx + xxxxx + xxxxx + xxxxx step s2-select: SELECT * FROM dist_table; @@ -273,9 +273,9 @@ master_remove_node starting permutation: s1-add-node-2 s2-begin s2-create-table-1 s1-remove-node-2 s2-commit s1-show-placements s2-select node_name node_port -localhost 57637 + xxxxx step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -293,7 +293,7 @@ create_distributed_table step s1-remove-node-2: - SELECT * FROM master_remove_node('localhost', 57638); + SELECT * FROM master_remove_node('', xxxxx); step s2-commit: COMMIT; @@ -312,10 +312,10 @@ step s1-show-placements: nodename nodeport -localhost 57637 -localhost 57637 -localhost 57638 -localhost 57638 + xxxxx + xxxxx + xxxxx + xxxxx step s2-select: SELECT * FROM dist_table; @@ -329,9 +329,9 @@ master_remove_node starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-2 s1-commit s2-select node_name node_port -localhost 57637 + xxxxx step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -340,7 +340,7 @@ step s1-begin: BEGIN; step s1-remove-node-2: - SELECT * FROM master_remove_node('localhost', 57638); + SELECT * FROM master_remove_node('', xxxxx); master_remove_node @@ -367,9 +367,9 @@ master_remove_node starting permutation: s1-add-node-2 s2-begin s2-create-table-2 s1-remove-node-2 s2-commit s2-select node_name node_port -localhost 57637 + xxxxx step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -387,7 +387,7 @@ create_distributed_table step s1-remove-node-2: - SELECT * FROM master_remove_node('localhost', 57638); + SELECT * FROM master_remove_node('', xxxxx); step s2-commit: COMMIT; @@ -407,9 +407,9 @@ master_remove_node starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-append-table s1-commit s2-select node_name node_port -localhost 57637 + xxxxx step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -418,7 +418,7 @@ step s1-begin: BEGIN; step s1-remove-node-2: - SELECT * FROM master_remove_node('localhost', 57638); + SELECT * FROM master_remove_node('', xxxxx); master_remove_node @@ -451,9 +451,9 @@ master_remove_node starting permutation: s1-add-node-2 s2-begin s2-create-append-table s1-remove-node-2 s2-commit s2-select node_name node_port -localhost 57637 + xxxxx step s1-add-node-2: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -474,7 +474,7 @@ create_distributed_table 1 step s1-remove-node-2: - SELECT * FROM master_remove_node('localhost', 57638); + SELECT * FROM master_remove_node('', xxxxx); step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_ddl_vs_all.out b/src/test/regress/expected/isolation_ddl_vs_all.out index fbf8677b0..4ad438f8d 100644 --- a/src/test/regress/expected/isolation_ddl_vs_all.out +++ b/src/test/regress/expected/isolation_ddl_vs_all.out @@ -16,8 +16,8 @@ step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -36,8 +36,8 @@ error in steps s1-commit s2-ddl-create-index-concurrently: ERROR: relation "ddl step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -57,13 +57,13 @@ step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -83,13 +83,13 @@ step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -109,13 +109,13 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -133,13 +133,13 @@ step s2-ddl-create-index-concurrently: <... completed> step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -159,8 +159,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -180,8 +180,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -201,13 +201,13 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -225,13 +225,13 @@ step s2-ddl-create-index-concurrently: <... completed> step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -251,8 +251,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -273,8 +273,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -296,8 +296,8 @@ step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -317,8 +317,8 @@ step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -343,8 +343,8 @@ step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,4) -(localhost,57638,t,4) +(,xxxxx,t,4) +(,xxxxx,t,4) restore_isolation_tester_func @@ -367,8 +367,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -388,8 +388,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -414,8 +414,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -438,8 +438,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -459,8 +459,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -485,8 +485,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -508,8 +508,8 @@ step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -529,8 +529,8 @@ step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -555,8 +555,8 @@ step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,4) -(localhost,57638,t,4) +(,xxxxx,t,4) +(,xxxxx,t,4) restore_isolation_tester_func @@ -576,8 +576,8 @@ step s1-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -595,8 +595,8 @@ step s2-ddl-create-index-concurrently: <... completed> step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -619,8 +619,8 @@ step s2-ddl-create-index-concurrently: <... completed> step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers -(localhost,57637,t,4) -(localhost,57638,t,4) +(,xxxxx,t,4) +(,xxxxx,t,4) restore_isolation_tester_func @@ -642,8 +642,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -663,8 +663,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -689,8 +689,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -712,8 +712,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -733,8 +733,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -759,8 +759,8 @@ step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func diff --git a/src/test/regress/expected/isolation_delete_vs_all.out b/src/test/regress/expected/isolation_delete_vs_all.out index 849c8ca7e..1325a85ec 100644 --- a/src/test/regress/expected/isolation_delete_vs_all.out +++ b/src/test/regress/expected/isolation_delete_vs_all.out @@ -77,8 +77,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -103,8 +103,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -126,8 +126,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -151,8 +151,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -177,8 +177,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -202,8 +202,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -313,8 +313,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -339,8 +339,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -364,8 +364,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -390,8 +390,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -415,8 +415,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func diff --git a/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out b/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out index 22ca208ae..dd8bd23ad 100644 --- a/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out +++ b/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out @@ -2,7 +2,7 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -81,7 +81,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -99,7 +99,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -156,7 +156,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -174,7 +174,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -236,7 +236,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -254,7 +254,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -314,7 +314,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -332,7 +332,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -393,7 +393,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -411,7 +411,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -469,7 +469,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select-for-udpate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -487,7 +487,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_distributed_transaction_id.out b/src/test/regress/expected/isolation_distributed_transaction_id.out index 8a9bfe565..575ad9d8b 100644 --- a/src/test/regress/expected/isolation_distributed_transaction_id.out +++ b/src/test/regress/expected/isolation_distributed_transaction_id.out @@ -86,8 +86,8 @@ step s1-verify-current-xact-is-on-worker: nodeport xact_exists -57637 t -57638 t +xxxxx t +xxxxx t step s1-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_dml_vs_repair.out b/src/test/regress/expected/isolation_dml_vs_repair.out index 499632c66..6797bdefc 100644 --- a/src/test/regress/expected/isolation_dml_vs_repair.out +++ b/src/test/regress/expected/isolation_dml_vs_repair.out @@ -1,11 +1,11 @@ Parsed test spec with 2 sessions -starting permutation: s2-invalidate-57637 s1-begin s1-insertone s2-repair s1-commit +starting permutation: s2-invalidate-xxxxx s1-begin s1-insertone s2-repair s1-commit master_create_worker_shards -step s2-invalidate-57637: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s1-begin: BEGIN; @@ -14,7 +14,7 @@ step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); step s2-repair: - SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); + SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), '', xxxxx, '', xxxxx); step s1-commit: COMMIT; @@ -24,15 +24,15 @@ master_copy_shard_placement -starting permutation: s1-insertone s2-invalidate-57637 s1-begin s1-insertall s2-repair s1-commit +starting permutation: s1-insertone s2-invalidate-xxxxx s1-begin s1-insertall s2-repair s1-commit master_create_worker_shards step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); -step s2-invalidate-57637: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s1-begin: BEGIN; @@ -41,7 +41,7 @@ step s1-insertall: INSERT INTO test_dml_vs_repair SELECT test_id, data+1 FROM test_dml_vs_repair; step s2-repair: - SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); + SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), '', xxxxx, '', xxxxx); step s1-commit: COMMIT; @@ -51,18 +51,18 @@ master_copy_shard_placement -starting permutation: s2-invalidate-57637 s2-begin s2-repair s1-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display +starting permutation: s2-invalidate-xxxxx s2-begin s2-repair s1-insertone s2-commit s2-invalidate-xxxxx s1-display s2-invalidate-xxxxx s2-revalidate-xxxxx s1-display master_create_worker_shards -step s2-invalidate-57637: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s2-begin: BEGIN; step s2-repair: - SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); + SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -74,8 +74,8 @@ step s2-commit: COMMIT; step s1-insertone: <... completed> -step s2-invalidate-57638: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; @@ -83,11 +83,11 @@ step s1-display: test_id data 1 1 -step s2-invalidate-57637: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; -step s2-revalidate-57638: - UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; +step s2-revalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; @@ -96,12 +96,12 @@ test_id data 1 1 -starting permutation: s2-invalidate-57637 s1-prepared-insertone s2-begin s2-repair s1-prepared-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display +starting permutation: s2-invalidate-xxxxx s1-prepared-insertone s2-begin s2-repair s1-prepared-insertone s2-commit s2-invalidate-xxxxx s1-display s2-invalidate-xxxxx s2-revalidate-xxxxx s1-display master_create_worker_shards -step s2-invalidate-57637: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s1-prepared-insertone: EXECUTE insertone; @@ -110,7 +110,7 @@ step s2-begin: BEGIN; step s2-repair: - SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); + SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -122,8 +122,8 @@ step s2-commit: COMMIT; step s1-prepared-insertone: <... completed> -step s2-invalidate-57638: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; @@ -132,11 +132,11 @@ test_id data 1 1 1 1 -step s2-invalidate-57637: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; -step s2-revalidate-57638: - UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; +step s2-revalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; @@ -146,12 +146,12 @@ test_id data 1 1 1 1 -starting permutation: s2-invalidate-57637 s1-insertone s1-prepared-insertall s2-begin s2-repair s1-prepared-insertall s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display +starting permutation: s2-invalidate-xxxxx s1-insertone s1-prepared-insertall s2-begin s2-repair s1-prepared-insertall s2-commit s2-invalidate-xxxxx s1-display s2-invalidate-xxxxx s2-revalidate-xxxxx s1-display master_create_worker_shards -step s2-invalidate-57637: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); @@ -163,7 +163,7 @@ step s2-begin: BEGIN; step s2-repair: - SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); + SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), '', xxxxx, '', xxxxx); master_copy_shard_placement @@ -175,8 +175,8 @@ step s2-commit: COMMIT; step s1-prepared-insertall: <... completed> -step s2-invalidate-57638: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; @@ -187,11 +187,11 @@ test_id data 1 2 1 2 1 3 -step s2-invalidate-57637: - UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; +step s2-invalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; -step s2-revalidate-57638: - UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; +step s2-revalidate-xxxxx: + UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; diff --git a/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out b/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out index 10c988f33..c28349feb 100644 --- a/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out +++ b/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out @@ -2,7 +2,7 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-alter s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -77,7 +77,7 @@ step s1-index: CREATE INDEX dist_table_index ON dist_table (id); step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -115,7 +115,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -133,7 +133,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -181,7 +181,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_drop_vs_all.out b/src/test/regress/expected/isolation_drop_vs_all.out index 06c50699c..4e18feb93 100644 --- a/src/test/regress/expected/isolation_drop_vs_all.out +++ b/src/test/regress/expected/isolation_drop_vs_all.out @@ -37,8 +37,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -62,8 +62,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -84,8 +84,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -108,8 +108,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -133,8 +133,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -157,8 +157,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -220,8 +220,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -244,8 +244,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -267,8 +267,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -291,8 +291,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -314,8 +314,8 @@ ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func diff --git a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out index a17b47667..c8665da1b 100644 --- a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out +++ b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out @@ -5,7 +5,7 @@ starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-pub 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -17,7 +17,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -30,22 +30,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -53,7 +53,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -96,22 +96,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -122,7 +122,7 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-work 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -134,7 +134,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -147,22 +147,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -173,7 +173,7 @@ step s2-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -219,22 +219,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -245,7 +245,7 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-s 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -257,7 +257,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -270,22 +270,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -309,7 +309,7 @@ create_distributed_table step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -342,22 +342,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -368,7 +368,7 @@ starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-cre 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -380,7 +380,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -393,22 +393,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -416,7 +416,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -461,22 +461,22 @@ count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -487,7 +487,7 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-work 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -499,7 +499,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -512,22 +512,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -538,7 +538,7 @@ step s2-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -586,22 +586,22 @@ count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -612,7 +612,7 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-s 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -624,7 +624,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -637,22 +637,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -677,7 +677,7 @@ create_distributed_table step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -711,22 +711,22 @@ count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -737,7 +737,7 @@ starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2- 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -749,7 +749,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -762,22 +762,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -795,7 +795,7 @@ step s3-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -855,22 +855,22 @@ count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -881,7 +881,7 @@ starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schem 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -893,7 +893,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -906,27 +906,27 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -992,22 +992,22 @@ count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1018,7 +1018,7 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1030,7 +1030,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -1043,22 +1043,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1072,7 +1072,7 @@ step s3-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -1138,22 +1138,22 @@ count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1164,7 +1164,7 @@ starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-pub 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1176,7 +1176,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -1189,22 +1189,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1212,7 +1212,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -1249,22 +1249,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1275,7 +1275,7 @@ starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2- 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1287,7 +1287,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -1300,22 +1300,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1329,7 +1329,7 @@ step s2-create-type: CREATE TYPE tt1 AS (a int, b int); step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -1359,22 +1359,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1385,7 +1385,7 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-s 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1397,7 +1397,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -1410,22 +1410,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1453,7 +1453,7 @@ create_distributed_table step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -1488,22 +1488,22 @@ count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1514,7 +1514,7 @@ starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-pub 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1526,7 +1526,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -1539,22 +1539,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1562,7 +1562,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -1615,22 +1615,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) master_remove_node @@ -1641,7 +1641,7 @@ starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2- 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1653,7 +1653,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -1666,22 +1666,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1711,7 +1711,7 @@ wait_until_metadata_sync step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -1747,22 +1747,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) master_remove_node @@ -1773,7 +1773,7 @@ starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2- 1 step s1-print-distributed-objects: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1785,7 +1785,7 @@ step s1-print-distributed-objects: -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); + SELECT master_remove_node('', xxxxx); ?column? @@ -1798,22 +1798,22 @@ count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) master_remove_node @@ -1844,7 +1844,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -1881,22 +1881,22 @@ count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) count 0 run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) count 1 run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) master_remove_node diff --git a/src/test/regress/expected/isolation_extension_commands.out b/src/test/regress/expected/isolation_extension_commands.out index 7af348d84..4f49150cf 100644 --- a/src/test/regress/expected/isolation_extension_commands.out +++ b/src/test/regress/expected/isolation_extension_commands.out @@ -5,7 +5,7 @@ step s1-begin: BEGIN; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -32,16 +32,16 @@ extname extversion nspname seg 1.1 public run_command_on_workers -(localhost,57637,t,seg) -(localhost,57638,t,seg) +(,xxxxx,t,seg) +(,xxxxx,t,seg) run_command_on_workers -(localhost,57637,t,1.1) -(localhost,57638,t,1.1) +(,xxxxx,t,1.1) +(,xxxxx,t,1.1) run_command_on_workers -(localhost,57637,t,public) -(localhost,57638,t,public) +(,xxxxx,t,public) +(,xxxxx,t,public) master_remove_node @@ -52,7 +52,7 @@ step s1-begin: BEGIN; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -79,16 +79,16 @@ extname extversion nspname seg 1.2 public run_command_on_workers -(localhost,57637,t,seg) -(localhost,57638,t,seg) +(,xxxxx,t,seg) +(,xxxxx,t,seg) run_command_on_workers -(localhost,57637,t,1.2) -(localhost,57638,t,1.2) +(,xxxxx,t,1.2) +(,xxxxx,t,1.2) run_command_on_workers -(localhost,57637,t,public) -(localhost,57638,t,public) +(,xxxxx,t,public) +(,xxxxx,t,public) master_remove_node @@ -96,7 +96,7 @@ master_remove_node starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-drop-extension s1-commit s1-print step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -105,7 +105,7 @@ step s1-begin: BEGIN; step s1-remove-node-1: - SELECT 1 FROM master_remove_node('localhost', 57637); + SELECT 1 FROM master_remove_node('', xxxxx); ?column? @@ -131,13 +131,13 @@ extname extversion nspname run_command_on_workers -(localhost,57638,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57638,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57638,t,"") +(,xxxxx,t,"") master_remove_node @@ -147,7 +147,7 @@ step s1-begin: BEGIN; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -174,16 +174,16 @@ extname extversion nspname seg 1.3 schema1 run_command_on_workers -(localhost,57637,t,seg) -(localhost,57638,t,seg) +(,xxxxx,t,seg) +(,xxxxx,t,seg) run_command_on_workers -(localhost,57637,t,1.2) -(localhost,57638,t,1.3) +(,xxxxx,t,1.2) +(,xxxxx,t,1.3) run_command_on_workers -(localhost,57637,t,public) -(localhost,57638,t,schema1) +(,xxxxx,t,public) +(,xxxxx,t,schema1) master_remove_node @@ -194,7 +194,7 @@ step s1-begin: BEGIN; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -220,16 +220,16 @@ extname extversion nspname run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") master_remove_node @@ -237,7 +237,7 @@ master_remove_node starting permutation: s1-add-node-1 s1-create-extension-with-schema2 s1-begin s1-remove-node-1 s2-alter-extension-set-schema3 s1-commit s1-print step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -249,7 +249,7 @@ step s1-begin: BEGIN; step s1-remove-node-1: - SELECT 1 FROM master_remove_node('localhost', 57637); + SELECT 1 FROM master_remove_node('', xxxxx); ?column? @@ -276,20 +276,20 @@ extname extversion nspname seg 1.3 schema3 run_command_on_workers -(localhost,57638,t,seg) +(,xxxxx,t,seg) run_command_on_workers -(localhost,57638,t,1.3) +(,xxxxx,t,1.3) run_command_on_workers -(localhost,57638,t,schema3) +(,xxxxx,t,schema3) master_remove_node starting permutation: s1-add-node-1 s2-drop-extension s1-begin s1-remove-node-1 s2-create-extension-with-schema1 s1-commit s1-print step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -301,7 +301,7 @@ step s1-begin: BEGIN; step s1-remove-node-1: - SELECT 1 FROM master_remove_node('localhost', 57637); + SELECT 1 FROM master_remove_node('', xxxxx); ?column? @@ -328,20 +328,20 @@ extname extversion nspname seg 1.3 schema1 run_command_on_workers -(localhost,57638,t,seg) +(,xxxxx,t,seg) run_command_on_workers -(localhost,57638,t,1.3) +(,xxxxx,t,1.3) run_command_on_workers -(localhost,57638,t,schema1) +(,xxxxx,t,schema1) master_remove_node starting permutation: s2-add-node-1 s2-drop-extension s2-remove-node-1 s2-begin s2-create-extension-version-11 s1-add-node-1 s2-commit s1-print step s2-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -350,7 +350,7 @@ step s2-drop-extension: drop extension seg; step s2-remove-node-1: - SELECT 1 FROM master_remove_node('localhost', 57637); + SELECT 1 FROM master_remove_node('', xxxxx); ?column? @@ -362,7 +362,7 @@ step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -385,16 +385,16 @@ extname extversion nspname seg 1.1 public run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") master_remove_node @@ -405,7 +405,7 @@ step s2-drop-extension: drop extension seg; step s2-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -414,7 +414,7 @@ step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; step s2-remove-node-1: - SELECT 1 FROM master_remove_node('localhost', 57637); + SELECT 1 FROM master_remove_node('', xxxxx); ?column? @@ -426,7 +426,7 @@ step s2-alter-extension-update-to-version-12: ALTER extension seg update to "1.2"; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -450,16 +450,16 @@ extname extversion nspname seg 1.2 public run_command_on_workers -(localhost,57637,t,seg) -(localhost,57638,t,seg) +(,xxxxx,t,seg) +(,xxxxx,t,seg) run_command_on_workers -(localhost,57637,t,1.1) -(localhost,57638,t,1.2) +(,xxxxx,t,1.1) +(,xxxxx,t,1.2) run_command_on_workers -(localhost,57637,t,public) -(localhost,57638,t,public) +(,xxxxx,t,public) +(,xxxxx,t,public) master_remove_node @@ -467,7 +467,7 @@ master_remove_node starting permutation: s2-add-node-1 s2-begin s2-drop-extension s1-remove-node-1 s2-commit s1-print step s2-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -479,7 +479,7 @@ step s2-drop-extension: drop extension seg; step s1-remove-node-1: - SELECT 1 FROM master_remove_node('localhost', 57637); + SELECT 1 FROM master_remove_node('', xxxxx); step s2-commit: COMMIT; @@ -502,13 +502,13 @@ extname extversion nspname run_command_on_workers -(localhost,57638,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57638,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57638,t,"") +(,xxxxx,t,"") master_remove_node @@ -521,7 +521,7 @@ step s2-create-extension-with-schema1: CREATE extension seg with schema schema1; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -544,16 +544,16 @@ extname extversion nspname seg 1.3 schema1 run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") master_remove_node @@ -564,7 +564,7 @@ step s2-drop-extension: drop extension seg; step s2-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -579,7 +579,7 @@ step s2-alter-extension-version-13: ALTER extension seg update to "1.3"; step s1-remove-node-1: - SELECT 1 FROM master_remove_node('localhost', 57637); + SELECT 1 FROM master_remove_node('', xxxxx); step s2-commit: COMMIT; @@ -603,13 +603,13 @@ extname extversion nspname seg 1.3 schema2 run_command_on_workers -(localhost,57638,t,seg) +(,xxxxx,t,seg) run_command_on_workers -(localhost,57638,t,1.3) +(,xxxxx,t,1.3) run_command_on_workers -(localhost,57638,t,schema2) +(,xxxxx,t,schema2) master_remove_node @@ -619,7 +619,7 @@ step s2-drop-extension: drop extension seg; step s2-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -631,7 +631,7 @@ step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; step s1-remove-node-1: - SELECT 1 FROM master_remove_node('localhost', 57637); + SELECT 1 FROM master_remove_node('', xxxxx); ?column? @@ -654,13 +654,13 @@ extname extversion nspname seg 1.1 public run_command_on_workers -(localhost,57638,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57638,t,"") +(,xxxxx,t,"") run_command_on_workers -(localhost,57638,t,"") +(,xxxxx,t,"") master_remove_node @@ -670,7 +670,7 @@ step s2-drop-extension: drop extension seg; step s2-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -679,7 +679,7 @@ step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; step s2-remove-node-1: - SELECT 1 FROM master_remove_node('localhost', 57637); + SELECT 1 FROM master_remove_node('', xxxxx); ?column? @@ -691,7 +691,7 @@ step s2-drop-extension: drop extension seg; step s1-add-node-1: - SELECT 1 FROM master_add_node('localhost', 57637); + SELECT 1 FROM master_add_node('', xxxxx); step s2-commit: COMMIT; @@ -714,16 +714,16 @@ extname extversion nspname run_command_on_workers -(localhost,57637,t,seg) -(localhost,57638,t,"") +(,xxxxx,t,seg) +(,xxxxx,t,"") run_command_on_workers -(localhost,57637,t,1.3) -(localhost,57638,t,"") +(,xxxxx,t,1.3) +(,xxxxx,t,"") run_command_on_workers -(localhost,57637,t,schema2) -(localhost,57638,t,"") +(,xxxxx,t,schema2) +(,xxxxx,t,"") master_remove_node diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index 3bc0437b0..a4bbb0b5a 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -3,8 +3,8 @@ Parsed test spec with 3 sessions starting permutation: s1-grant s1-begin-insert s2-begin-insert s3-as-admin s3-as-user-1 s3-as-readonly s3-as-monitor s1-commit s2-commit run_command_on_workers -(localhost,57637,t,"GRANT ROLE") -(localhost,57638,t,"GRANT ROLE") +(,xxxxx,t,"GRANT ROLE") +(,xxxxx,t,"GRANT ROLE") step s1-grant: GRANT ALL ON test_table TO test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1'); @@ -82,5 +82,5 @@ step s2-commit: run_command_on_workers -(localhost,57637,f,"ERROR: role ""test_user_1"" cannot be dropped because some objects depend on it") -(localhost,57638,f,"ERROR: role ""test_user_1"" cannot be dropped because some objects depend on it") +(,xxxxx,f,"ERROR: role ""test_user_1"" cannot be dropped because some objects depend on it") +(,xxxxx,f,"ERROR: role ""test_user_1"" cannot be dropped because some objects depend on it") diff --git a/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out b/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out index 56db49f47..57aaa896f 100644 --- a/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out +++ b/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out @@ -8,7 +8,7 @@ step s1-update-ref-table-from-coordinator: UPDATE ref_table SET value_1 = 15; step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -29,7 +29,7 @@ blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_ UPDATE ref_table SET value_1 = 12 WHERE user_id = 1 UPDATE ref_table SET value_1 = 15; -localhost coordinator_host57638 57636 + coordinator_host57638 xxxxx step s1-commit: COMMIT; @@ -55,7 +55,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -73,7 +73,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -92,7 +92,7 @@ step s3-select-distributed-waiting-queries: blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1UPDATE ref_table SET value_1 = 12 WHERE user_id = 1localhost localhost 57638 57637 +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1UPDATE ref_table SET value_1 = 12 WHERE user_id = 1localhost xxxxx xxxxx step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -127,7 +127,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table s2-start-session-level-connection s2-begin-on-worker s2-update-dist-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -145,7 +145,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -164,7 +164,7 @@ step s3-select-distributed-waiting-queries: blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57638 57637 +UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost xxxxx xxxxx step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -199,7 +199,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -217,7 +217,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -236,7 +236,7 @@ step s3-select-distributed-waiting-queries: blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1DELETE FROM ref_table WHERE user_id = 1localhost localhost 57638 57637 +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1DELETE FROM ref_table WHERE user_id = 1localhost xxxxx xxxxx step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -271,7 +271,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -289,7 +289,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -308,7 +308,7 @@ step s3-select-distributed-waiting-queries: blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1INSERT INTO ref_table VALUES(8,81),(9,91)localhost localhost 57638 57637 +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1INSERT INTO ref_table VALUES(8,81),(9,91) xxxxx xxxxx step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -343,7 +343,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -361,7 +361,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -413,7 +413,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -431,7 +431,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -450,7 +450,7 @@ step s3-select-distributed-waiting-queries: blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1COPY ref_table FROM PROGRAM 'echo 10, 101 && echo 11, 111' WITH CSVlocalhost localhost 57638 57637 +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1COPY ref_table FROM PROGRAM 'echo 10, 101 && echo 11, 111' WITH CSVlocalhost xxxxx xxxxx step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -485,7 +485,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -503,7 +503,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -555,7 +555,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -573,7 +573,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -625,7 +625,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -643,7 +643,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -662,7 +662,7 @@ step s3-select-distributed-waiting-queries: blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1SELECT * FROM ref_table FOR UPDATElocalhost localhost 57638 57637 +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1SELECT * FROM ref_table FOR UPDATElocalhost xxxxx xxxxx step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -697,7 +697,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s3-select-distributed-waiting-queries s2-commit-worker s1-commit s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -727,7 +727,7 @@ blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_ ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); -INSERT INTO ref_table VALUES(8,81),(9,91)coordinator_hostlocalhost 57636 57638 +INSERT INTO ref_table VALUES(8,81),(9,91)coordinator_hostlocalhost xxxxx xxxxx step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -767,7 +767,7 @@ blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_ UPDATE tt1 SET value_1 = 4; UPDATE tt1 SET value_1 = 4; -coordinator_hostcoordinator_host57636 57636 +coordinator_hostcoordinator_host57636 xxxxx step s1-commit: COMMIT; @@ -778,7 +778,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table s4-start-session-level-connection s4-begin-on-worker s4-update-dist-table s3-select-distributed-waiting-queries s1-commit-worker s4-commit-worker s1-stop-connection s4-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -796,7 +796,7 @@ run_commands_on_session_level_connection_to_node step s4-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -815,7 +815,7 @@ step s3-select-distributed-waiting-queries: blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57637 57637 +UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost xxxxx xxxxx step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); diff --git a/src/test/regress/expected/isolation_hash_copy_vs_all.out b/src/test/regress/expected/isolation_hash_copy_vs_all.out index b3a90ec0b..950053520 100644 --- a/src/test/regress/expected/isolation_hash_copy_vs_all.out +++ b/src/test/regress/expected/isolation_hash_copy_vs_all.out @@ -177,8 +177,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -198,8 +198,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -218,8 +218,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -238,8 +238,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -259,8 +259,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -279,8 +279,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table @@ -611,8 +611,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -632,8 +632,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -653,8 +653,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -674,8 +674,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -694,8 +694,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table diff --git a/src/test/regress/expected/isolation_insert_select_vs_all.out b/src/test/regress/expected/isolation_insert_select_vs_all.out index 7987f87c5..7d1aa8891 100644 --- a/src/test/regress/expected/isolation_insert_select_vs_all.out +++ b/src/test/regress/expected/isolation_insert_select_vs_all.out @@ -119,8 +119,8 @@ count step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index-on-inserted s1-begin s1-insert-select s2-ddl-drop-index-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table @@ -145,8 +145,8 @@ count step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-concurrently-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table @@ -170,8 +170,8 @@ count step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-add-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted create_distributed_table @@ -195,8 +195,8 @@ count step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column-on-inserted s1-begin s1-insert-select s2-ddl-drop-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted create_distributed_table @@ -221,8 +221,8 @@ count step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-rename-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted s1-show-columns-inserted create_distributed_table @@ -246,13 +246,13 @@ count step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-insert-select s2-table-size-on-inserted s1-commit s1-select-count create_distributed_table @@ -440,8 +440,8 @@ count step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index-on-selected s1-begin s1-insert-select s2-ddl-drop-index-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table @@ -466,8 +466,8 @@ count step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-concurrently-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table @@ -491,8 +491,8 @@ count step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-add-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table @@ -516,8 +516,8 @@ count step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column-on-selected s1-begin s1-insert-select s2-ddl-drop-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table @@ -542,8 +542,8 @@ count step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-rename-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table @@ -567,8 +567,8 @@ count step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-insert-select s2-table-size-on-selected s1-commit s1-select-count create_distributed_table @@ -759,8 +759,8 @@ count step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index-on-inserted s1-begin s1-ddl-drop-index-on-inserted s2-insert-select s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table @@ -785,8 +785,8 @@ count step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table @@ -810,8 +810,8 @@ count step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column-on-inserted s1-begin s1-ddl-drop-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table @@ -836,8 +836,8 @@ count step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table @@ -861,8 +861,8 @@ count step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table @@ -1052,8 +1052,8 @@ count step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index-on-selected s1-begin s1-ddl-drop-index-on-selected s2-insert-select s1-commit s1-select-count s1-show-indexes-selected create_distributed_table @@ -1078,8 +1078,8 @@ count step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table @@ -1104,8 +1104,8 @@ count step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column-on-selected s1-begin s1-ddl-drop-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table @@ -1130,8 +1130,8 @@ count step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table @@ -1155,8 +1155,8 @@ count step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table diff --git a/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out b/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out index 178c5a4dd..679a35510 100644 --- a/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out @@ -2,7 +2,7 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-colocated-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -73,7 +73,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-colocated-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -91,7 +91,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -144,7 +144,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -162,7 +162,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -215,7 +215,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -233,7 +233,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -286,7 +286,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -304,7 +304,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -357,7 +357,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -375,7 +375,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -428,7 +428,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -446,7 +446,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -499,7 +499,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -517,7 +517,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -570,7 +570,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -588,7 +588,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -641,7 +641,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -659,7 +659,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -712,7 +712,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -767,7 +767,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -822,7 +822,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -840,7 +840,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -893,7 +893,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -911,7 +911,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_insert_vs_all.out b/src/test/regress/expected/isolation_insert_vs_all.out index 197130b47..0ffd3cfa3 100644 --- a/src/test/regress/expected/isolation_insert_vs_all.out +++ b/src/test/regress/expected/isolation_insert_vs_all.out @@ -143,8 +143,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-insert s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -164,8 +164,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-insert s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -184,8 +184,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-begin s1-insert s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -204,8 +204,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-insert s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -225,8 +225,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-insert s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -245,8 +245,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-insert s2-table-size s1-commit s1-select-count create_distributed_table @@ -387,8 +387,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-insert s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -408,8 +408,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table @@ -428,8 +428,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table @@ -449,8 +449,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table @@ -469,8 +469,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-insert s1-commit s1-select-count create_distributed_table @@ -610,8 +610,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-insert-multi-row s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -631,8 +631,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -651,8 +651,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -671,8 +671,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-insert-multi-row s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -692,8 +692,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -712,8 +712,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-table-size s1-commit s1-select-count create_distributed_table @@ -854,8 +854,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-insert-multi-row s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -875,8 +875,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table @@ -895,8 +895,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table @@ -916,8 +916,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table @@ -936,8 +936,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-insert-multi-row s1-commit s1-select-count create_distributed_table diff --git a/src/test/regress/expected/isolation_insert_vs_all_on_mx.out b/src/test/regress/expected/isolation_insert_vs_all_on_mx.out index 5a70f0bf3..42c7df266 100644 --- a/src/test/regress/expected/isolation_insert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_insert_vs_all_on_mx.out @@ -2,7 +2,7 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -73,7 +73,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -91,7 +91,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -144,7 +144,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -162,7 +162,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -215,7 +215,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -233,7 +233,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -286,7 +286,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -304,7 +304,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -357,7 +357,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -375,7 +375,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -428,7 +428,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -446,7 +446,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -499,7 +499,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-update-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -517,7 +517,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -570,7 +570,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -588,7 +588,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -641,7 +641,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -659,7 +659,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -713,7 +713,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -731,7 +731,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_master_append_table.out b/src/test/regress/expected/isolation_master_append_table.out index 539384e2d..183119a31 100644 --- a/src/test/regress/expected/isolation_master_append_table.out +++ b/src/test/regress/expected/isolation_master_append_table.out @@ -9,7 +9,7 @@ step s2-begin: step s1-master_append_table_to_shard: SELECT - master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) + master_append_table_to_shard(shardid, 'table_to_be_appended', '', xxxxx) FROM pg_dist_shard WHERE @@ -20,7 +20,7 @@ master_append_table_to_shard 0.0426667 step s2-master_append_table_to_shard: SELECT - master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) + master_append_table_to_shard(shardid, 'table_to_be_appended', '', xxxxx) FROM pg_dist_shard WHERE diff --git a/src/test/regress/expected/isolation_master_update_node.out b/src/test/regress/expected/isolation_master_update_node.out index cc746278e..3306ade4f 100644 --- a/src/test/regress/expected/isolation_master_update_node.out +++ b/src/test/regress/expected/isolation_master_update_node.out @@ -9,10 +9,10 @@ step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; step s2-update-node-1: -- update a specific node by address - SELECT master_update_node(nodeid, 'localhost', nodeport + 10) + SELECT master_update_node(nodeid, '', nodeport + 10) FROM pg_dist_node - WHERE nodename = 'localhost' - AND nodeport = 57637; + WHERE nodename = '' + AND nodeport = xxxxx; step s1-abort: ABORT; step s2-update-node-1: <... completed> @@ -34,10 +34,10 @@ step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; step s2-update-node-1-force: -- update a specific node by address (force) - SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100) + SELECT master_update_node(nodeid, '', nodeport + 10, force => true, lock_cooldown => 100) FROM pg_dist_node - WHERE nodename = 'localhost' - AND nodeport = 57637; + WHERE nodename = '' + AND nodeport = xxxxx; step s2-update-node-1-force: <... completed> master_update_node diff --git a/src/test/regress/expected/isolation_master_update_node_0.out b/src/test/regress/expected/isolation_master_update_node_0.out index 8dbc71029..0876861ad 100644 --- a/src/test/regress/expected/isolation_master_update_node_0.out +++ b/src/test/regress/expected/isolation_master_update_node_0.out @@ -9,10 +9,10 @@ step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; step s2-update-node-1: -- update a specific node by address - SELECT master_update_node(nodeid, 'localhost', nodeport + 10) + SELECT master_update_node(nodeid, '', nodeport + 10) FROM pg_dist_node - WHERE nodename = 'localhost' - AND nodeport = 57637; + WHERE nodename = '' + AND nodeport = xxxxx; step s1-abort: ABORT; step s2-update-node-1: <... completed> @@ -34,10 +34,10 @@ step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; step s2-update-node-1-force: -- update a specific node by address (force) - SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100) + SELECT master_update_node(nodeid, '', nodeport + 10, force => true, lock_cooldown => 100) FROM pg_dist_node - WHERE nodename = 'localhost' - AND nodeport = 57637; + WHERE nodename = '' + AND nodeport = xxxxx; step s2-update-node-1-force: <... completed> master_update_node diff --git a/src/test/regress/expected/isolation_partitioned_copy_vs_all.out b/src/test/regress/expected/isolation_partitioned_copy_vs_all.out index 3b578a581..8ac062ae6 100644 --- a/src/test/regress/expected/isolation_partitioned_copy_vs_all.out +++ b/src/test/regress/expected/isolation_partitioned_copy_vs_all.out @@ -177,8 +177,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -198,8 +198,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -218,8 +218,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table @@ -455,8 +455,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -476,8 +476,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -496,8 +496,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table diff --git a/src/test/regress/expected/isolation_range_copy_vs_all.out b/src/test/regress/expected/isolation_range_copy_vs_all.out index 5e0e7e474..1cfa831d6 100644 --- a/src/test/regress/expected/isolation_range_copy_vs_all.out +++ b/src/test/regress/expected/isolation_range_copy_vs_all.out @@ -177,8 +177,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -198,8 +198,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -218,8 +218,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -238,8 +238,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -259,8 +259,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -279,8 +279,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table @@ -532,8 +532,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -553,8 +553,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -574,8 +574,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -595,8 +595,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -615,8 +615,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table diff --git a/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out b/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out index 1dab78f92..06eb6dee5 100644 --- a/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out +++ b/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out @@ -2,7 +2,7 @@ Parsed test spec with 2 sessions starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -20,15 +20,15 @@ run_commands_on_session_level_connection_to_node step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -37,7 +37,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t {"(ExclusiveLock,1)","(ShareLock,1)"} + xxxxx t {"(ExclusiveLock,1)","(ShareLock,1)"} step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); @@ -46,8 +46,8 @@ run_commands_on_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -56,7 +56,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t + xxxxx t step s1-stop-connection: SELECT stop_session_level_connection_to_node(); @@ -75,7 +75,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -93,15 +93,15 @@ run_commands_on_session_level_connection_to_node step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -110,7 +110,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t {"(ExclusiveLock,1)","(ShareLock,1)"} + xxxxx t {"(ExclusiveLock,1)","(ShareLock,1)"} step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); @@ -119,8 +119,8 @@ run_commands_on_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -129,7 +129,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t + xxxxx t step s1-stop-connection: SELECT stop_session_level_connection_to_node(); @@ -148,7 +148,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -166,15 +166,15 @@ run_commands_on_session_level_connection_to_node step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -183,7 +183,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t {"(ExclusiveLock,2)","(ShareLock,1)"} + xxxxx t {"(ExclusiveLock,2)","(ShareLock,1)"} step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); @@ -192,8 +192,8 @@ run_commands_on_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -202,7 +202,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t + xxxxx t step s1-stop-connection: SELECT stop_session_level_connection_to_node(); @@ -221,7 +221,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -239,15 +239,15 @@ run_commands_on_session_level_connection_to_node step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -256,7 +256,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t {"(ExclusiveLock,2)","(ShareLock,1)"} + xxxxx t {"(ExclusiveLock,2)","(ShareLock,1)"} step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); @@ -265,8 +265,8 @@ run_commands_on_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -275,7 +275,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t + xxxxx t step s1-stop-connection: SELECT stop_session_level_connection_to_node(); @@ -294,7 +294,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -312,15 +312,15 @@ run_commands_on_session_level_connection_to_node step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -329,7 +329,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t {"(ExclusiveLock,3)","(ShareLock,1)"} + xxxxx t {"(ExclusiveLock,3)","(ShareLock,1)"} step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); @@ -338,8 +338,8 @@ run_commands_on_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -348,7 +348,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t + xxxxx t step s1-stop-connection: SELECT stop_session_level_connection_to_node(); @@ -367,7 +367,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -385,15 +385,15 @@ run_commands_on_session_level_connection_to_node step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -402,7 +402,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t {"(ExclusiveLock,3)","(ShareLock,1)"} + xxxxx t {"(ExclusiveLock,3)","(ShareLock,1)"} step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); @@ -411,8 +411,8 @@ run_commands_on_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -421,7 +421,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t + xxxxx t step s1-stop-connection: SELECT stop_session_level_connection_to_node(); @@ -440,7 +440,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -458,15 +458,15 @@ run_commands_on_session_level_connection_to_node step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -475,7 +475,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t {"(RowExclusiveLock,1)","(ShareLock,1)"} + xxxxx t {"(RowExclusiveLock,1)","(ShareLock,1)"} step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); @@ -484,8 +484,8 @@ run_commands_on_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -494,7 +494,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t + xxxxx t step s1-stop-connection: SELECT stop_session_level_connection_to_node(); @@ -513,7 +513,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -531,15 +531,15 @@ run_commands_on_session_level_connection_to_node step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -548,7 +548,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t {"(RowExclusiveLock,2)","(ShareLock,1)"} + xxxxx t {"(RowExclusiveLock,2)","(ShareLock,1)"} step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); @@ -557,8 +557,8 @@ run_commands_on_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -567,7 +567,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t + xxxxx t step s1-stop-connection: SELECT stop_session_level_connection_to_node(); @@ -586,7 +586,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -604,15 +604,15 @@ run_commands_on_session_level_connection_to_node step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -621,7 +621,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t {"(RowExclusiveLock,3)","(ShareLock,1)"} + xxxxx t {"(RowExclusiveLock,3)","(ShareLock,1)"} step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); @@ -630,8 +630,8 @@ run_commands_on_session_level_connection_to_node step s1-view-locks: SELECT * FROM master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57637]::int[], + ARRAY['']::text[], + ARRAY[xxxxx]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM (SELECT mode, count(*) count FROM pg_locks @@ -640,7 +640,7 @@ step s1-view-locks: node_name node_port success result -localhost 57637 t + xxxxx t step s1-stop-connection: SELECT stop_session_level_connection_to_node(); diff --git a/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out b/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out index 7855dadea..e5d47bca3 100644 --- a/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out @@ -5,7 +5,7 @@ create_reference_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -23,7 +23,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -74,7 +74,7 @@ create_reference_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -92,7 +92,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -149,7 +149,7 @@ create_reference_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -167,7 +167,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -217,7 +217,7 @@ create_reference_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -235,7 +235,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -286,7 +286,7 @@ create_reference_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -304,7 +304,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -361,7 +361,7 @@ create_reference_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -379,7 +379,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -427,7 +427,7 @@ create_reference_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -445,7 +445,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -496,7 +496,7 @@ create_reference_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out b/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out index 0b82dab0e..543209743 100644 --- a/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out @@ -8,7 +8,7 @@ step s1-add-primary-key: ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id); step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -26,7 +26,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -82,7 +82,7 @@ create_reference_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -100,7 +100,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -160,7 +160,7 @@ step s1-add-primary-key: ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id); step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -178,7 +178,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -230,7 +230,7 @@ create_reference_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -248,7 +248,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_reference_copy_vs_all.out b/src/test/regress/expected/isolation_reference_copy_vs_all.out index 1cba83a40..95029b9ac 100644 --- a/src/test/regress/expected/isolation_reference_copy_vs_all.out +++ b/src/test/regress/expected/isolation_reference_copy_vs_all.out @@ -180,8 +180,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_reference_table @@ -201,8 +201,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_reference_table @@ -221,8 +221,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_reference_table @@ -241,8 +241,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_reference_table @@ -262,8 +262,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_reference_table @@ -282,8 +282,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_reference_table @@ -504,8 +504,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table @@ -525,8 +525,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table @@ -546,8 +546,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table @@ -567,8 +567,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table @@ -587,8 +587,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_reference_table diff --git a/src/test/regress/expected/isolation_reference_on_mx.out b/src/test/regress/expected/isolation_reference_on_mx.out index 1fed9e597..bcdceb79d 100644 --- a/src/test/regress/expected/isolation_reference_on_mx.out +++ b/src/test/regress/expected/isolation_reference_on_mx.out @@ -2,7 +2,7 @@ Parsed test spec with 2 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -68,7 +68,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -86,7 +86,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -134,7 +134,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -152,7 +152,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -200,7 +200,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -218,7 +218,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -265,7 +265,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -283,7 +283,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -331,7 +331,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -349,7 +349,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -396,7 +396,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -414,7 +414,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -461,7 +461,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -479,7 +479,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -527,7 +527,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -572,7 +572,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-select-from-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index 110178365..e22550d55 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -79,10 +79,10 @@ query query_hostname query_hostport master_query_host_namemaster_query_ update ref_table set a = a + 1; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead regression update ref_table set a = a + 1; -localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression + xxxxx coordinator_host57636 idle in transactionClient ClientRead regression step s2-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname @@ -94,10 +94,10 @@ step s2-view-worker: query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1) xxxxx coordinator_host57636 idle in transactionClient ClientRead regression +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1) xxxxx coordinator_host57636 idle in transactionClient ClientRead regression +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead regression +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1) xxxxx coordinator_host57636 idle in transactionClient ClientRead regression step s2-end: END; diff --git a/src/test/regress/expected/isolation_select_vs_all.out b/src/test/regress/expected/isolation_select_vs_all.out index b1136066b..140c4663e 100644 --- a/src/test/regress/expected/isolation_select_vs_all.out +++ b/src/test/regress/expected/isolation_select_vs_all.out @@ -367,8 +367,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-router-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -391,8 +391,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -413,8 +413,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -436,8 +436,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-router-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -460,8 +460,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -483,8 +483,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-router-select s2-table-size s1-commit s1-select-count create_distributed_table @@ -693,8 +693,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-router-select s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -717,8 +717,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -740,8 +740,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -764,8 +764,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -787,8 +787,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-router-select s1-commit s1-select-count create_distributed_table @@ -1028,8 +1028,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-real-time-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1056,8 +1056,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1082,8 +1082,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1109,8 +1109,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-real-time-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1137,8 +1137,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1164,8 +1164,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-real-time-select s2-table-size s1-commit s1-select-count create_distributed_table @@ -1370,8 +1370,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-real-time-select s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1398,8 +1398,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1425,8 +1425,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1453,8 +1453,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1480,8 +1480,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -1718,8 +1718,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-task-tracker-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1749,8 +1749,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1778,8 +1778,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1808,8 +1808,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-task-tracker-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1839,8 +1839,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1869,8 +1869,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-table-size s1-commit s1-select-count create_distributed_table @@ -2105,8 +2105,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,1) -(localhost,57638,t,1) +(,xxxxx,t,1) +(,xxxxx,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-task-tracker-select s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -2136,8 +2136,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -2166,8 +2166,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -2197,8 +2197,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -2227,8 +2227,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-task-tracker-select s1-commit s1-select-count create_distributed_table diff --git a/src/test/regress/expected/isolation_select_vs_all_on_mx.out b/src/test/regress/expected/isolation_select_vs_all_on_mx.out index 5c5ce570f..1418e44f6 100644 --- a/src/test/regress/expected/isolation_select_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_select_vs_all_on_mx.out @@ -2,7 +2,7 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -67,7 +67,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -85,7 +85,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -138,7 +138,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-delete s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -156,7 +156,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -209,7 +209,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -227,7 +227,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -280,7 +280,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-begin s2-index s1-commit-worker s2-commit s1-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -324,7 +324,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -342,7 +342,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -389,7 +389,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_shouldhaveshards.out b/src/test/regress/expected/isolation_shouldhaveshards.out index 8de483c5e..911948eb6 100644 --- a/src/test/regress/expected/isolation_shouldhaveshards.out +++ b/src/test/regress/expected/isolation_shouldhaveshards.out @@ -5,7 +5,7 @@ starting permutation: s1-add-second-node s1-begin s2-begin s2-create-distributed 1 step s1-add-second-node: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -26,7 +26,7 @@ create_distributed_table step s1-noshards: - SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); + SELECT * from master_set_node_property('', xxxxx, 'shouldhaveshards', false); step s2-commit: COMMIT; @@ -45,8 +45,8 @@ step s2-shardcounts: nodeport count -57637 2 -57638 2 +xxxxx 2 +xxxxx 2 master_remove_node @@ -57,7 +57,7 @@ starting permutation: s1-add-second-node s1-begin s2-begin s1-noshards s2-create 1 step s1-add-second-node: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('', xxxxx); ?column? @@ -69,7 +69,7 @@ step s2-begin: BEGIN; step s1-noshards: - SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); + SELECT * from master_set_node_property('', xxxxx, 'shouldhaveshards', false); master_set_node_property @@ -97,7 +97,7 @@ step s2-shardcounts: nodeport count -57638 4 +xxxxx 4 master_remove_node @@ -114,13 +114,13 @@ step s2-begin: BEGIN; step s1-noshards: - SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); + SELECT * from master_set_node_property('', xxxxx, 'shouldhaveshards', false); master_set_node_property step s2-update-node: - select * from master_update_node((select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 57638) + select * from master_update_node((select nodeid from pg_dist_node where nodeport = xxxxx), '', xxxxx) step s1-commit: COMMIT; @@ -147,19 +147,19 @@ step s2-begin: BEGIN; step s2-update-node: - select * from master_update_node((select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 57638) + select * from master_update_node((select nodeid from pg_dist_node where nodeport = xxxxx), '', xxxxx) master_update_node step s1-noshards: - SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); + SELECT * from master_set_node_property('', xxxxx, 'shouldhaveshards', false); step s2-commit: COMMIT; step s1-noshards: <... completed> -error in steps s2-commit s1-noshards: ERROR: node at "localhost:xxxxx" does not exist +error in steps s2-commit s1-noshards: ERROR: node at ":xxxxx" does not exist step s1-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_truncate_vs_all.out b/src/test/regress/expected/isolation_truncate_vs_all.out index 930c5ef25..f03cef9c4 100644 --- a/src/test/regress/expected/isolation_truncate_vs_all.out +++ b/src/test/regress/expected/isolation_truncate_vs_all.out @@ -77,8 +77,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -103,8 +103,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -126,8 +126,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -151,8 +151,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -177,8 +177,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -202,8 +202,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -379,8 +379,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -405,8 +405,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -430,8 +430,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -456,8 +456,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -481,8 +481,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func diff --git a/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out b/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out index 9432fcb58..91043b644 100644 --- a/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out @@ -2,7 +2,7 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-truncate s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -74,7 +74,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -92,7 +92,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -146,7 +146,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -164,7 +164,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -218,7 +218,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -236,7 +236,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -290,7 +290,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -308,7 +308,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -368,7 +368,7 @@ step s1-alter: ALTER TABLE truncate_table DROP value; step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -413,7 +413,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -431,7 +431,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out b/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out index e52a21b8d..826367b4f 100644 --- a/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out @@ -5,7 +5,7 @@ create_distributed_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -23,7 +23,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -79,7 +79,7 @@ create_distributed_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -97,7 +97,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -153,7 +153,7 @@ create_distributed_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -171,7 +171,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -225,7 +225,7 @@ create_distributed_table step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node @@ -243,7 +243,7 @@ run_commands_on_session_level_connection_to_node step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('', xxxxx); start_session_level_connection_to_node diff --git a/src/test/regress/expected/isolation_update_node.out b/src/test/regress/expected/isolation_update_node.out index 7b22761dc..6a80d1e4d 100644 --- a/src/test/regress/expected/isolation_update_node.out +++ b/src/test/regress/expected/isolation_update_node.out @@ -3,15 +3,15 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-update-node-1 s2-update-node-2 s1-commit s1-show-nodes nodeid nodename nodeport -22 localhost 57637 -23 localhost 57638 +22 xxxxx +23 xxxxx step s1-begin: BEGIN; step s1-update-node-1: SELECT 1 FROM master_update_node( - (select nodeid from pg_dist_node where nodeport = 57637), - 'localhost', + (select nodeid from pg_dist_node where nodeport = xxxxx), + '', 58637); ?column? @@ -19,8 +19,8 @@ step s1-update-node-1: 1 step s2-update-node-2: SELECT 1 FROM master_update_node( - (select nodeid from pg_dist_node where nodeport = 57638), - 'localhost', + (select nodeid from pg_dist_node where nodeport = xxxxx), + '', 58638); step s1-commit: @@ -37,23 +37,23 @@ step s1-show-nodes: nodeid nodename nodeport isactive -22 localhost 58637 t -23 localhost 58638 t +22 58637 t +23 58638 t nodeid nodename nodeport starting permutation: s1-begin s1-update-node-1 s2-begin s2-update-node-1 s1-commit s2-abort s1-show-nodes nodeid nodename nodeport -24 localhost 57637 -25 localhost 57638 +24 xxxxx +25 xxxxx step s1-begin: BEGIN; step s1-update-node-1: SELECT 1 FROM master_update_node( - (select nodeid from pg_dist_node where nodeport = 57637), - 'localhost', + (select nodeid from pg_dist_node where nodeport = xxxxx), + '', 58637); ?column? @@ -64,8 +64,8 @@ step s2-begin: step s2-update-node-1: SELECT 1 FROM master_update_node( - (select nodeid from pg_dist_node where nodeport = 57637), - 'localhost', + (select nodeid from pg_dist_node where nodeport = xxxxx), + '', 58637); step s1-commit: @@ -85,30 +85,30 @@ step s1-show-nodes: nodeid nodename nodeport isactive -25 localhost 57638 t -24 localhost 58637 t +25 xxxxx t +24 58637 t nodeid nodename nodeport starting permutation: s1-begin s1-update-node-1 s2-start-metadata-sync-node-2 s1-commit s2-verify-metadata nodeid nodename nodeport -26 localhost 57637 -27 localhost 57638 +26 xxxxx +27 xxxxx step s1-begin: BEGIN; step s1-update-node-1: SELECT 1 FROM master_update_node( - (select nodeid from pg_dist_node where nodeport = 57637), - 'localhost', + (select nodeid from pg_dist_node where nodeport = xxxxx), + '', 58637); ?column? 1 step s2-start-metadata-sync-node-2: - SELECT start_metadata_sync_to_node('localhost', 57638); + SELECT start_metadata_sync_to_node('', xxxxx); step s1-commit: COMMIT; @@ -120,16 +120,16 @@ start_metadata_sync_to_node step s2-verify-metadata: SELECT nodeid, groupid, nodename, nodeport FROM pg_dist_node ORDER BY nodeid; SELECT master_run_on_worker( - ARRAY['localhost'], ARRAY[57638], + ARRAY[''], ARRAY[xxxxx], ARRAY['SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport) ORDER BY nodeid) FROM pg_dist_node'], false); nodeid groupid nodename nodeport -26 26 localhost 58637 -27 27 localhost 57638 +26 26 58637 +27 27 xxxxx master_run_on_worker -(localhost,57638,t,"[{""f1"": 26, ""f2"": 26, ""f3"": ""localhost"", ""f4"": 58637}, {""f1"": 27, ""f2"": 27, ""f3"": ""localhost"", ""f4"": 57638}]") +(,xxxxx,t,"[{""f1"": 26, ""f2"": 26, ""f3"": """", ""f4"": 58637}, {""f1"": 27, ""f2"": 27, ""f3"": """", ""f4"": xxxxx}]") nodeid nodename nodeport diff --git a/src/test/regress/expected/isolation_update_node_lock_writes.out b/src/test/regress/expected/isolation_update_node_lock_writes.out index dcaa5b991..07fdb9af5 100644 --- a/src/test/regress/expected/isolation_update_node_lock_writes.out +++ b/src/test/regress/expected/isolation_update_node_lock_writes.out @@ -9,9 +9,9 @@ step s1-begin: step s1-update-node-1: SELECT 1 FROM master_update_node( - (select nodeid from pg_dist_node where nodeport = 57637), - 'localhost', - 57638); + (select nodeid from pg_dist_node where nodeport = xxxxx), + '', + xxxxx); ?column? @@ -49,9 +49,9 @@ step s2-insert: step s1-update-node-1: SELECT 1 FROM master_update_node( - (select nodeid from pg_dist_node where nodeport = 57637), - 'localhost', - 57638); + (select nodeid from pg_dist_node where nodeport = xxxxx), + '', + xxxxx); step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_update_vs_all.out b/src/test/regress/expected/isolation_update_vs_all.out index 42090fcf2..72dee876c 100644 --- a/src/test/regress/expected/isolation_update_vs_all.out +++ b/src/test/regress/expected/isolation_update_vs_all.out @@ -97,8 +97,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -123,8 +123,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -146,8 +146,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -171,8 +171,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -197,8 +197,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -222,8 +222,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -373,8 +373,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -399,8 +399,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers -(localhost,57637,t,0) -(localhost,57638,t,0) +(,xxxxx,t,0) +(,xxxxx,t,0) restore_isolation_tester_func @@ -424,8 +424,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -450,8 +450,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -476,8 +476,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func diff --git a/src/test/regress/expected/isolation_upsert_vs_all.out b/src/test/regress/expected/isolation_upsert_vs_all.out index 9438d39da..b25da8a83 100644 --- a/src/test/regress/expected/isolation_upsert_vs_all.out +++ b/src/test/regress/expected/isolation_upsert_vs_all.out @@ -117,8 +117,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers -(localhost,57637,t,4) -(localhost,57638,t,4) +(,xxxxx,t,4) +(,xxxxx,t,4) restore_isolation_tester_func @@ -143,8 +143,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -166,8 +166,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers -(localhost,57637,t,4) -(localhost,57638,t,4) +(,xxxxx,t,4) +(,xxxxx,t,4) restore_isolation_tester_func @@ -191,8 +191,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -217,8 +217,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -242,8 +242,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -413,8 +413,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers -(localhost,57637,t,4) -(localhost,57638,t,4) +(,xxxxx,t,4) +(,xxxxx,t,4) restore_isolation_tester_func @@ -439,8 +439,8 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers -(localhost,57637,t,2) -(localhost,57638,t,2) +(,xxxxx,t,2) +(,xxxxx,t,2) restore_isolation_tester_func @@ -464,8 +464,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func @@ -490,8 +490,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,"") -(localhost,57638,t,"") +(,xxxxx,t,"") +(,xxxxx,t,"") restore_isolation_tester_func @@ -516,8 +516,8 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) +(,xxxxx,t,new_column) +(,xxxxx,t,new_column) restore_isolation_tester_func diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index 2275661bf..d4f8e3fc9 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -213,7 +213,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table Index Cond: (key = 1) Filter: (age = 20) @@ -226,7 +226,7 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distribute Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=1 loops=1) Index Cond: (key = 1) Filter: (age = 20) @@ -239,7 +239,7 @@ EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Delete on distributed_table_1470001 distributed_table -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table Index Cond: (key = 1) @@ -253,7 +253,7 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) DELETE FROM distributed_ta Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Delete on distributed_table_1470001 distributed_table (actual rows=0 loops=1) -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=0 loops=1) Index Cond: (key = 1) @@ -1550,7 +1550,7 @@ SELECT create_distributed_function('register_for_event(int,int,invite_resp)', 'p (1 row) --- call 7 times to make sure it works after the 5th time(postgres binds values after the 5th time) +-- call 7 times to make sure it works after the 5th time( binds values after the 5th time) -- after 6th, the local execution caches the local plans and uses it -- execute it both locally and remotely CALL register_for_event(16, 1, 'yes'); diff --git a/src/test/regress/expected/locally_execute_intermediate_results.out b/src/test/regress/expected/locally_execute_intermediate_results.out index d245c324b..e099a4b9d 100644 --- a/src/test/regress/expected/locally_execute_intermediate_results.out +++ b/src/test/regress/expected/locally_execute_intermediate_results.out @@ -74,8 +74,8 @@ DEBUG: generating subplan XXX_3 for subquery SELECT key FROM (SELECT intermedia DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM locally_execute_intermediate_results.table_2 WHERE (key OPERATOR(pg_catalog.>) (SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer))) GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1)) DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx NOTICE: executing the command locally: SELECT key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2 ORDER BY key LIMIT 1 count --------------------------------------------------------------------- @@ -99,8 +99,8 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(key) AS max FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM locally_execute_intermediate_results.table_2 WHERE (key OPERATOR(pg_catalog.>) (SELECT cte_2.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max integer)) cte_2)) GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1)) DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -235,8 +235,8 @@ DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM locally_execu DEBUG: push down of limit count: 1 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN locally_execute_intermediate_results.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count | key --------------------------------------------------------------------- (0 rows) @@ -254,8 +254,8 @@ DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM locally_execu DEBUG: generating subplan XXX_2 for subquery SELECT max(value) AS max FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN locally_execute_intermediate_results.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.=) (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text))) DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_2 will be written to local file NOTICE: executing the command locally: SELECT max(value) AS max FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a count | key @@ -308,7 +308,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, ta DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file NOTICE: executing the command locally: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1 -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx NOTICE: executing the command locally: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2 key | value --------------------------------------------------------------------- @@ -326,8 +326,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, re DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file NOTICE: executing the command locally: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1 -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx NOTICE: executing the command locally: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2 key | value --------------------------------------------------------------------- @@ -348,8 +348,8 @@ HAVING max(value) > DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM locally_execute_intermediate_results.table_2 WHERE (key OPERATOR(pg_catalog.=) 3) GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -369,9 +369,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT key, value FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (locally_execute_intermediate_results.table_2 JOIN (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_3 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) GROUP BY table_2.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -391,9 +391,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT key, value FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((locally_execute_intermediate_results.table_2 JOIN (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_3 USING (key)) JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 ON ((table_2.key OPERATOR(pg_catalog.=) (cte_2.max)::integer))) JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 USING (max)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) GROUP BY table_2.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT cte_1_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2_1 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -408,10 +408,10 @@ SELECT * FROM DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.key, bar.key FROM (SELECT table_1.key FROM locally_execute_intermediate_results.table_1 GROUP BY table_1.key HAVING (max(table_1.value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1))) foo, (SELECT table_2.key FROM locally_execute_intermediate_results.table_2 GROUP BY table_2.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT cte_2.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2))) bar WHERE (foo.key OPERATOR(pg_catalog.=) bar.key) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx key | key --------------------------------------------------------------------- (0 rows) @@ -428,11 +428,11 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_2 DEBUG: generating subplan XXX_3 for subquery SELECT key FROM locally_execute_intermediate_results.table_2 GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_2.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.key, bar.key FROM (SELECT table_1.key FROM locally_execute_intermediate_results.table_1 GROUP BY table_1.key HAVING (max(table_1.value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1))) foo, (SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx key | key --------------------------------------------------------------------- (0 rows) @@ -504,8 +504,8 @@ DEBUG: generating subplan XXX_3 for subquery SELECT key FROM (SELECT intermedia DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM locally_execute_intermediate_results.table_2 WHERE (key OPERATOR(pg_catalog.>) (SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer))) GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1)) DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- 1 @@ -528,8 +528,8 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(key) AS max FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM locally_execute_intermediate_results.table_2 WHERE (key OPERATOR(pg_catalog.>) (SELECT cte_2.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max integer)) cte_2)) GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1)) DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -662,7 +662,7 @@ DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM locally_execu DEBUG: push down of limit count: 1 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN locally_execute_intermediate_results.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count | key --------------------------------------------------------------------- (0 rows) @@ -680,7 +680,7 @@ DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM locally_execu DEBUG: generating subplan XXX_2 for subquery SELECT max(value) AS max FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN locally_execute_intermediate_results.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.=) (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text))) DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_2 will be written to local file count | key --------------------------------------------------------------------- @@ -732,7 +732,7 @@ DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS key FROM (SEL DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, table_2.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx key | value --------------------------------------------------------------------- (0 rows) @@ -749,7 +749,7 @@ DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS key FROM (SEL DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, table_2.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 2) DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx key | value --------------------------------------------------------------------- (0 rows) @@ -766,8 +766,8 @@ DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS key FROM (SEL DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.ref_table USING (key)) DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx key | value --------------------------------------------------------------------- 4 | 4 @@ -787,8 +787,8 @@ HAVING max(value) > DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM locally_execute_intermediate_results.table_2 WHERE (key OPERATOR(pg_catalog.=) 3) GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -808,9 +808,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT key, value FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (locally_execute_intermediate_results.table_2 JOIN (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_3 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) GROUP BY table_2.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -829,9 +829,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT key, value FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((locally_execute_intermediate_results.table_2 JOIN (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_3 USING (key)) JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 ON ((table_2.key OPERATOR(pg_catalog.=) (cte_2.max)::integer))) JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 USING (max)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) GROUP BY table_2.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT cte_1_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2_1 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -846,10 +846,10 @@ SELECT * FROM DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.key, bar.key FROM (SELECT table_1.key FROM locally_execute_intermediate_results.table_1 GROUP BY table_1.key HAVING (max(table_1.value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1))) foo, (SELECT table_2.key FROM locally_execute_intermediate_results.table_2 GROUP BY table_2.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT cte_2.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2))) bar WHERE (foo.key OPERATOR(pg_catalog.=) bar.key) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx key | key --------------------------------------------------------------------- (0 rows) @@ -866,11 +866,11 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_2 DEBUG: generating subplan XXX_3 for subquery SELECT key FROM locally_execute_intermediate_results.table_2 GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_2.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.key, bar.key FROM (SELECT table_1.key FROM locally_execute_intermediate_results.table_1 GROUP BY table_1.key HAVING (max(table_1.value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1))) foo, (SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx key | key --------------------------------------------------------------------- (0 rows) @@ -932,9 +932,9 @@ DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM locally_e DEBUG: generating subplan XXX_3 for subquery SELECT key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2 ORDER BY key LIMIT 1 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM locally_execute_intermediate_results.table_2 WHERE (key OPERATOR(pg_catalog.>) (SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer))) GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1)) DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- 1 @@ -957,8 +957,8 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(key) AS max FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM locally_execute_intermediate_results.table_2 WHERE (key OPERATOR(pg_catalog.>) (SELECT cte_2.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max integer)) cte_2)) GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1)) DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -980,9 +980,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(key) AS max FROM locally_execute_intermediate_results.table_2 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT key, value FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_3 WHERE (key OPERATOR(pg_catalog.>) (SELECT cte_2.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max integer)) cte_2)) GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1)) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -1024,9 +1024,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT key, value FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_3 GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- 1 @@ -1047,7 +1047,7 @@ HAVING max(value) > (SELECT max(max) FROM cte_1); DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_2 for subquery SELECT max(max) AS max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM locally_execute_intermediate_results.table_2 GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_2 will be written to local file count --------------------------------------------------------------------- @@ -1069,9 +1069,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_3 for subquery SELECT max(max) AS max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2 GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.<) (SELECT intermediate_result.max FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(max text))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- 1 @@ -1091,7 +1091,7 @@ DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM locally_execu DEBUG: push down of limit count: 1 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN locally_execute_intermediate_results.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) DEBUG: Subplan XXX_1 will be written to local file -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx count | key --------------------------------------------------------------------- (0 rows) @@ -1108,8 +1108,8 @@ HAVING (max(table_2.value) = (SELECT max(value) FROM a)); DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_2 for subquery SELECT max(value) AS max FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN locally_execute_intermediate_results.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.=) (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_2 will be written to local file count | key --------------------------------------------------------------------- @@ -1140,9 +1140,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_e DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS max FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)) cte_3 -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx max --------------------------------------------------------------------- 4 @@ -1159,9 +1159,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_e DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, table_2.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx key | value --------------------------------------------------------------------- (0 rows) @@ -1176,9 +1176,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_e DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, table_2.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 2) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx key | value --------------------------------------------------------------------- (0 rows) @@ -1193,10 +1193,10 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_e DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.ref_table USING (key)) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx key | value --------------------------------------------------------------------- 4 | 4 @@ -1216,8 +1216,8 @@ HAVING max(value) > DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM locally_execute_intermediate_results.table_2 WHERE (key OPERATOR(pg_catalog.=) 3) GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -1237,9 +1237,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT key, value FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (locally_execute_intermediate_results.table_2 JOIN (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_3 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) GROUP BY table_2.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -1258,9 +1258,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT key, value FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (locally_execute_intermediate_results.table_2 JOIN (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_3 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) GROUP BY table_2.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -1280,9 +1280,9 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_1 DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT key, value FROM locally_execute_intermediate_results.table_2 DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((locally_execute_intermediate_results.table_2 JOIN (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_3 USING (key)) JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2 ON ((table_2.key OPERATOR(pg_catalog.=) (cte_2.max)::integer))) JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1 USING (max)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3) GROUP BY table_2.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>) (SELECT cte_1_1.max FROM ((SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1_1 JOIN (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2_1 USING (max)))) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_2 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx count --------------------------------------------------------------------- (0 rows) @@ -1307,11 +1307,11 @@ DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT max(value) AS max FROM lo DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT max(value) AS max FROM locally_execute_intermediate_results.table_2 DEBUG: generating subplan XXX_3 for subquery SELECT key FROM locally_execute_intermediate_results.table_2 GROUP BY key HAVING (max(value) OPERATOR(pg_catalog.>) (SELECT cte_2.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_2)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.key, bar.key FROM (SELECT table_1.key FROM locally_execute_intermediate_results.table_1 GROUP BY table_1.key HAVING (max(table_1.value) OPERATOR(pg_catalog.>) (SELECT cte_1.max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max text)) cte_1))) foo, (SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx +DEBUG: Subplan XXX_1 will be sent to :xxxxx DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx key | key --------------------------------------------------------------------- (0 rows) @@ -1330,8 +1330,8 @@ DEBUG: generating subplan XXX_4 for subquery SELECT key FROM locally_execute_in DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.key, bar.key FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) foo, (SELECT intermediate_result.key FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_2 will be written to local file -DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx -DEBUG: Subplan XXX_4 will be sent to localhost:xxxxx +DEBUG: Subplan XXX_3 will be sent to :xxxxx +DEBUG: Subplan XXX_4 will be sent to :xxxxx key | key --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/materialized_view.out b/src/test/regress/expected/materialized_view.out index b8cb80529..8ce5f756e 100644 --- a/src/test/regress/expected/materialized_view.out +++ b/src/test/regress/expected/materialized_view.out @@ -219,7 +219,7 @@ SELECT create_distributed_table('small','tenant_id'); \copy small FROM STDIN DELIMITER ',' CREATE MATERIALIZED VIEW small_view AS SELECT * from small where id < 100; \copy large FROM STDIN DELIMITER ',' --- running any kind of modify statements "on" materialized views is not supported by postgres +-- running any kind of modify statements "on" materialized views is not supported by UPDATE small_view SET id = 1; ERROR: cannot change materialized view "small_view" -- for now, using materialized views in modify statements' FROM / WHERE clauses is not supported diff --git a/src/test/regress/expected/multi_703_upgrade.out b/src/test/regress/expected/multi_703_upgrade.out index 61c8b568f..0df35d28c 100644 --- a/src/test/regress/expected/multi_703_upgrade.out +++ b/src/test/regress/expected/multi_703_upgrade.out @@ -5,14 +5,14 @@ SET citus.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '7.0-2'; INSERT INTO pg_dist_shard_placement (placementid, shardid, shardstate, shardlength, nodename, nodeport) VALUES - (1, 1, 1, 0, 'localhost', :worker_1_port); + (1, 1, 1, 0, '', :worker_1_port); -- if there are no worker nodes which match the shards this should fail ALTER EXTENSION citus UPDATE TO '7.0-3'; -ERROR: There is no node at "localhost:xxxxx" +ERROR: There is no node at ":xxxxx" CONTEXT: PL/pgSQL function citus.find_groupid_for_node(text,integer) line 6 at RAISE -- if you add a matching worker the upgrade should succeed INSERT INTO pg_dist_node (nodename, nodeport, groupid) - VALUES ('localhost', :worker_1_port, 1); + VALUES ('', :worker_1_port, 1); ALTER EXTENSION citus UPDATE TO '7.0-3'; SELECT * FROM pg_dist_placement; placementid | shardid | shardstate | shardlength | groupid diff --git a/src/test/regress/expected/multi_alter_table_add_constraints.out b/src/test/regress/expected/multi_alter_table_add_constraints.out index da3dcccf2..e3ac66039 100644 --- a/src/test/regress/expected/multi_alter_table_add_constraints.out +++ b/src/test/regress/expected/multi_alter_table_add_constraints.out @@ -31,14 +31,14 @@ INSERT INTO products VALUES(1, 'product_1', 1); INSERT INTO products VALUES(1, 'product_1', 1); ERROR: duplicate key value violates unique constraint "p_key_1450001" DETAIL: Key (product_no)=(1) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx ALTER TABLE products DROP CONSTRAINT p_key; INSERT INTO products VALUES(1, 'product_1', 1); -- Can not create constraint since it conflicts with the existing data ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); ERROR: could not create unique index "p_key_1450001" DETAIL: Key (product_no)=(1) is duplicated. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx DROP TABLE products; -- Check "PRIMARY KEY CONSTRAINT" with reference table CREATE TABLE products_ref ( @@ -62,7 +62,7 @@ INSERT INTO products_ref VALUES(1, 'product_1', 1); INSERT INTO products_ref VALUES(1, 'product_1', 1); ERROR: duplicate key value violates unique constraint "p_key_1450032" DETAIL: Key (product_no)=(1) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx DROP TABLE products_ref; -- Check "PRIMARY KEY CONSTRAINT" on append table CREATE TABLE products_append ( @@ -114,7 +114,7 @@ INSERT INTO unique_test_table VALUES(1, 'Ahmet'); INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_1450035" DETAIL: Key (id)=(X) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx ALTER TABLE unique_test_table DROP CONSTRAINT unn_id; -- Insert row which will conflict with the next unique constraint command INSERT INTO unique_test_table VALUES(1, 'Mehmet'); @@ -122,7 +122,7 @@ INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ALTER TABLE unique_test_table ADD CONSTRAINT unn_id UNIQUE(id); ERROR: could not create unique index "unn_id_1450035" DETAIL: Key (id)=(X) is duplicated. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- Can create unique constraint over multiple columns which must include -- distribution column ALTER TABLE unique_test_table ADD CONSTRAINT unn_id_name UNIQUE(id, name); @@ -130,7 +130,7 @@ ALTER TABLE unique_test_table ADD CONSTRAINT unn_id_name UNIQUE(id, name); INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_name_1450035" DETAIL: Key (id, name)=(1, Mehmet) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx DROP TABLE unique_test_table; -- Check "UNIQUE CONSTRAINT" with reference table CREATE TABLE unique_test_table_ref(id int, name varchar(20)); @@ -148,7 +148,7 @@ INSERT INTO unique_test_table_ref VALUES(1, 'Ahmet'); INSERT INTO unique_test_table_ref VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_1450066" DETAIL: Key (id)=(X) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- We can add unique constraint with multiple columns ALTER TABLE unique_test_table_ref DROP CONSTRAINT unn_id; ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_id_name UNIQUE(id,name); @@ -202,12 +202,12 @@ ALTER TABLE products ADD CONSTRAINT p_multi_check CHECK(price > discounted_price INSERT INTO products VALUES(1, 'product_1', -1, -2); ERROR: new row for relation "products_1450069" violates check constraint "p_check_1450069" DETAIL: Failing row contains (1, product_1, -1, -2). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx INSERT INTO products VALUES(1, 'product_1', 5, 3); INSERT INTO products VALUES(1, 'product_1', 2, 3); ERROR: new row for relation "products_1450069" violates check constraint "p_multi_check_1450069" DETAIL: Failing row contains (1, product_1, 2, 3). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx DROP TABLE products; -- Check "CHECK CONSTRAINT" with reference table CREATE TABLE products_ref ( @@ -230,12 +230,12 @@ ALTER TABLE products_ref ADD CONSTRAINT p_multi_check CHECK(price > discounted_p INSERT INTO products_ref VALUES(1, 'product_1', -1, -2); ERROR: new row for relation "products_ref_1450100" violates check constraint "p_check_1450100" DETAIL: Failing row contains (1, product_1, -1, -2). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx INSERT INTO products_ref VALUES(1, 'product_1', 5, 3); INSERT INTO products_ref VALUES(1, 'product_1', 2, 3); ERROR: new row for relation "products_ref_1450100" violates check constraint "p_multi_check_1450100" DETAIL: Failing row contains (1, product_1, 2, 3). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx DROP TABLE products_ref; -- Check "CHECK CONSTRAINT" with append table CREATE TABLE products_append ( @@ -285,7 +285,7 @@ INSERT INTO products VALUES(2,'product_2', 5); INSERT INTO products VALUES(2,'product_2', 5); ERROR: conflicting key value violates exclusion constraint "exc_pno_name_1450126" DETAIL: Key (product_no, name)=(2, product_2) conflicts with existing key (product_no, name)=(2, product_2). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx DROP TABLE products; -- Check "EXCLUSION CONSTRAINT" with reference table CREATE TABLE products_ref ( @@ -309,7 +309,7 @@ INSERT INTO products_ref VALUES(1,'product_2', 10); INSERT INTO products_ref VALUES(2,'product_2', 5); ERROR: conflicting key value violates exclusion constraint "exc_name_1450134" DETAIL: Key (name)=(product_2) conflicts with existing key (name)=(product_2). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx DROP TABLE products_ref; -- Check "EXCLUSION CONSTRAINT" with append table CREATE TABLE products_append ( @@ -358,7 +358,7 @@ ALTER TABLE products ALTER COLUMN name SET NOT NULL; INSERT INTO products VALUES(1,NULL,5); ERROR: null value in column "name" violates not-null constraint DETAIL: Failing row contains (1, null, 5). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx INSERT INTO products VALUES(NULL,'product_1', 5); ERROR: cannot perform an INSERT with NULL in the partition column DROP TABLE products; @@ -379,7 +379,7 @@ ALTER TABLE products_ref ALTER COLUMN name SET NOT NULL; INSERT INTO products_ref VALUES(1,NULL,5); ERROR: null value in column "name" violates not-null constraint DETAIL: Failing row contains (1, null, 5). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx INSERT INTO products_ref VALUES(NULL,'product_1', 5); DROP TABLE products_ref; -- Check "NOT NULL" with append table @@ -518,8 +518,8 @@ ORDER BY 1,2,3,4; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | alter_pk_idx_1450234 - localhost | 57638 | t | alter_pk_idx_1450234 + | xxxxx | t | alter_pk_idx_1450234 + | xxxxx | t | alter_pk_idx_1450234 (2 rows) CREATE SCHEMA sc2; @@ -549,8 +549,8 @@ ORDER BY 1,2,3,4; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | alter_pk_idx_1450236 - localhost | 57638 | t | alter_pk_idx_1450236 + | xxxxx | t | alter_pk_idx_1450236 + | xxxxx | t | alter_pk_idx_1450236 (2 rows) -- We are running almost the same test with a slight change on the constraint name because if the constraint has a different name than the index, Postgres renames the index. @@ -584,8 +584,8 @@ ORDER BY 1,2,3,4; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | a_constraint_1450238 - localhost | 57638 | t | a_constraint_1450238 + | xxxxx | t | a_constraint_1450238 + | xxxxx | t | a_constraint_1450238 (2 rows) ALTER TABLE alter_add_prim_key DROP CONSTRAINT a_constraint; @@ -605,8 +605,8 @@ ORDER BY 1,2,3,4; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | - localhost | 57638 | t | + | xxxxx | t | + | xxxxx | t | (2 rows) SET search_path TO 'public'; diff --git a/src/test/regress/expected/multi_cache_invalidation.out b/src/test/regress/expected/multi_cache_invalidation.out index 285275ae2..9c85edfde 100644 --- a/src/test/regress/expected/multi_cache_invalidation.out +++ b/src/test/regress/expected/multi_cache_invalidation.out @@ -22,14 +22,14 @@ INSERT INTO mci_1.test VALUES (1,2), (3,4); SELECT run_command_on_placements('mci_1.test', 'ALTER TABLE %s SET SCHEMA mci_2'); run_command_on_placements --------------------------------------------------------------------- - (localhost,57637,1601000,t,"ALTER TABLE") - (localhost,57638,1601000,t,"ALTER TABLE") - (localhost,57637,1601001,t,"ALTER TABLE") - (localhost,57638,1601001,t,"ALTER TABLE") - (localhost,57637,1601002,t,"ALTER TABLE") - (localhost,57638,1601002,t,"ALTER TABLE") - (localhost,57637,1601003,t,"ALTER TABLE") - (localhost,57638,1601003,t,"ALTER TABLE") + (,xxxxx,1601000,t,"ALTER TABLE") + (,xxxxx,1601000,t,"ALTER TABLE") + (,xxxxx,1601001,t,"ALTER TABLE") + (,xxxxx,1601001,t,"ALTER TABLE") + (,xxxxx,1601002,t,"ALTER TABLE") + (,xxxxx,1601002,t,"ALTER TABLE") + (,xxxxx,1601003,t,"ALTER TABLE") + (,xxxxx,1601003,t,"ALTER TABLE") (8 rows) UPDATE pg_dist_shard diff --git a/src/test/regress/expected/multi_citus_tools.out b/src/test/regress/expected/multi_citus_tools.out index a1c073530..fceb3bf74 100644 --- a/src/test/regress/expected/multi_citus_tools.out +++ b/src/test/regress/expected/multi_citus_tools.out @@ -11,20 +11,20 @@ SET client_min_messages to ERROR; -- for this test only to have consistent behavior -- b/w PG 9.6+ and PG 9.5. \set SHOW_CONTEXT never -SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], +SELECT * FROM master_run_on_worker(ARRAY['']::text[], ARRAY['666']::int[], ARRAY['select count(*) from pg_dist_shard']::text[], false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 666 | f | failed to connect to localhost:xxxxx + | 666 | f | failed to connect to :xxxxx (1 row) -SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], +SELECT * FROM master_run_on_worker(ARRAY['']::text[], ARRAY['666']::int[], ARRAY['select count(*) from pg_dist_shard']::text[], true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 666 | f | failed to connect to localhost:xxxxx + | 666 | f | failed to connect to :xxxxx (1 row) RESET client_min_messages; @@ -39,7 +39,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 0 + | xxxxx | t | 0 (1 row) -- connect to the first worker and ask for shards, should fail with @@ -49,7 +49,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | f | expected a single column in query target + | xxxxx | f | expected a single column in query target (1 row) -- query result may only contain a single row @@ -58,7 +58,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | f | expected a single row in query result + | xxxxx | f | expected a single row in query result (1 row) -- send multiple queries @@ -69,8 +69,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 1 - localhost | 57637 | t | 2 + | xxxxx | t | 1 + | xxxxx | t | 2 (2 rows) -- send multiple queries, one fails @@ -81,8 +81,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 1 - localhost | 57637 | f | expected a single row in query result + | xxxxx | t | 1 + | xxxxx | f | expected a single row in query result (2 rows) -- send multiple queries, both fail @@ -93,8 +93,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | f | expected a single row in query result - localhost | 57637 | f | expected a single row in query result + | xxxxx | f | expected a single row in query result + | xxxxx | f | expected a single row in query result (2 rows) -- can create tables at worker @@ -105,8 +105,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | CREATE TABLE - localhost | 57637 | t | CREATE TABLE + | xxxxx | t | CREATE TABLE + | xxxxx | t | CREATE TABLE (2 rows) -- can insert into table @@ -115,7 +115,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | INSERT 0 20 + | xxxxx | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], @@ -123,7 +123,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 20 + | xxxxx | t | 20 (1 row) -- insert into second table twice @@ -132,7 +132,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | INSERT 0 20 + | xxxxx | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], @@ -140,7 +140,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | INSERT 0 20 + | xxxxx | t | INSERT 0 20 (1 row) -- check inserted values at second table @@ -149,7 +149,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 40 + | xxxxx | t | 40 (1 row) -- store worker node name and port again @@ -164,7 +164,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | CREATE INDEX + | xxxxx | t | CREATE INDEX (1 row) -- drop created tables @@ -173,7 +173,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | DROP TABLE + | xxxxx | t | DROP TABLE (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], @@ -181,7 +181,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | DROP TABLE + | xxxxx | t | DROP TABLE (1 row) -- verify table is dropped @@ -190,7 +190,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: false); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | f | ERROR: relation "second_table" does not exist + | xxxxx | f | ERROR: relation "second_table" does not exist (1 row) -- @@ -202,7 +202,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 0 + | xxxxx | t | 0 (1 row) -- connect to the first worker and ask for shards, should fail with @@ -212,7 +212,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | f | expected a single column in query target + | xxxxx | f | expected a single column in query target (1 row) -- query result may only contain a single row @@ -221,7 +221,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | f | expected a single row in query result + | xxxxx | f | expected a single row in query result (1 row) -- send multiple queries @@ -232,8 +232,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 1 - localhost | 57637 | t | 2 + | xxxxx | t | 1 + | xxxxx | t | 2 (2 rows) -- send multiple queries, one fails @@ -244,8 +244,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 1 - localhost | 57637 | f | expected a single row in query result + | xxxxx | t | 1 + | xxxxx | f | expected a single row in query result (2 rows) -- send multiple queries, both fail @@ -256,8 +256,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | f | expected a single row in query result - localhost | 57637 | f | expected a single row in query result + | xxxxx | f | expected a single row in query result + | xxxxx | f | expected a single row in query result (2 rows) -- can create tables at worker @@ -268,8 +268,8 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | CREATE TABLE - localhost | 57637 | t | CREATE TABLE + | xxxxx | t | CREATE TABLE + | xxxxx | t | CREATE TABLE (2 rows) -- store worker node name and port again @@ -284,7 +284,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | INSERT 0 20 + | xxxxx | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], @@ -292,7 +292,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 20 + | xxxxx | t | 20 (1 row) -- insert into second table twice @@ -301,7 +301,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | INSERT 0 20 + | xxxxx | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], @@ -309,7 +309,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | INSERT 0 20 + | xxxxx | t | INSERT 0 20 (1 row) -- check inserted values at second table @@ -318,7 +318,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 40 + | xxxxx | t | 40 (1 row) -- create index on tables @@ -327,7 +327,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | CREATE INDEX + | xxxxx | t | CREATE INDEX (1 row) -- drop created tables @@ -336,7 +336,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | DROP TABLE + | xxxxx | t | DROP TABLE (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], @@ -344,7 +344,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | t | DROP TABLE + | xxxxx | t | DROP TABLE (1 row) -- verify table is dropped @@ -353,22 +353,22 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: true); node_name | node_port | success | result --------------------------------------------------------------------- - localhost | 57637 | f | ERROR: relation "second_table" does not exist + | xxxxx | f | ERROR: relation "second_table" does not exist (1 row) -- run_command_on_XXX tests SELECT * FROM run_command_on_workers('select 1') ORDER BY 2 ASC; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 1 - localhost | 57638 | t | 1 + | xxxxx | t | 1 + | xxxxx | t | 1 (2 rows) SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') ORDER BY 2 ASC; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | 0 - localhost | 57638 | t | 0 + | xxxxx | t | 0 + | xxxxx | t | 0 (2 rows) -- make sure run_on_all_placements respects shardstate @@ -383,16 +383,16 @@ SELECT create_distributed_table('check_placements', 'key', 'hash'); SELECT * FROM run_command_on_placements('check_placements', 'select 1'); nodename | nodeport | shardid | success | result --------------------------------------------------------------------- - localhost | 57637 | 1240000 | t | 1 - localhost | 57638 | 1240000 | t | 1 - localhost | 57637 | 1240001 | t | 1 - localhost | 57638 | 1240001 | t | 1 - localhost | 57637 | 1240002 | t | 1 - localhost | 57638 | 1240002 | t | 1 - localhost | 57637 | 1240003 | t | 1 - localhost | 57638 | 1240003 | t | 1 - localhost | 57637 | 1240004 | t | 1 - localhost | 57638 | 1240004 | t | 1 + | xxxxx | 1240000 | t | 1 + | xxxxx | 1240000 | t | 1 + | xxxxx | 1240001 | t | 1 + | xxxxx | 1240001 | t | 1 + | xxxxx | 1240002 | t | 1 + | xxxxx | 1240002 | t | 1 + | xxxxx | 1240003 | t | 1 + | xxxxx | 1240003 | t | 1 + | xxxxx | 1240004 | t | 1 + | xxxxx | 1240004 | t | 1 (10 rows) UPDATE pg_dist_shard_placement SET shardstate = 3 @@ -400,13 +400,13 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 SELECT * FROM run_command_on_placements('check_placements', 'select 1'); nodename | nodeport | shardid | success | result --------------------------------------------------------------------- - localhost | 57638 | 1240000 | t | 1 - localhost | 57637 | 1240001 | t | 1 - localhost | 57638 | 1240001 | t | 1 - localhost | 57638 | 1240002 | t | 1 - localhost | 57637 | 1240003 | t | 1 - localhost | 57638 | 1240003 | t | 1 - localhost | 57638 | 1240004 | t | 1 + | xxxxx | 1240000 | t | 1 + | xxxxx | 1240001 | t | 1 + | xxxxx | 1240001 | t | 1 + | xxxxx | 1240002 | t | 1 + | xxxxx | 1240003 | t | 1 + | xxxxx | 1240003 | t | 1 + | xxxxx | 1240004 | t | 1 (7 rows) DROP TABLE check_placements CASCADE; @@ -458,16 +458,16 @@ SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_tab 'select 1'); nodename | nodeport | shardid1 | shardid2 | success | result --------------------------------------------------------------------- - localhost | 57637 | 1240005 | 1240019 | t | 1 - localhost | 57638 | 1240005 | 1240019 | t | 1 - localhost | 57637 | 1240006 | 1240020 | t | 1 - localhost | 57638 | 1240006 | 1240020 | t | 1 - localhost | 57637 | 1240007 | 1240021 | t | 1 - localhost | 57638 | 1240007 | 1240021 | t | 1 - localhost | 57637 | 1240008 | 1240022 | t | 1 - localhost | 57638 | 1240008 | 1240022 | t | 1 - localhost | 57637 | 1240009 | 1240023 | t | 1 - localhost | 57638 | 1240009 | 1240023 | t | 1 + | xxxxx | 1240005 | 1240019 | t | 1 + | xxxxx | 1240005 | 1240019 | t | 1 + | xxxxx | 1240006 | 1240020 | t | 1 + | xxxxx | 1240006 | 1240020 | t | 1 + | xxxxx | 1240007 | 1240021 | t | 1 + | xxxxx | 1240007 | 1240021 | t | 1 + | xxxxx | 1240008 | 1240022 | t | 1 + | xxxxx | 1240008 | 1240022 | t | 1 + | xxxxx | 1240009 | 1240023 | t | 1 + | xxxxx | 1240009 | 1240023 | t | 1 (10 rows) DROP TABLE check_colocated CASCADE; diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index 67cb4b8ba..8546e850e 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -8,13 +8,13 @@ SELECT create_reference_table('test_reference_table'); ERROR: cannot create reference table "test_reference_table" DETAIL: There are no active worker nodes. -- add the nodes to the cluster -SELECT 1 FROM master_add_node('localhost', :worker_1_port); +SELECT 1 FROM master_add_node('', :worker_1_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -24,12 +24,12 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- - (localhost,57638) - (localhost,57637) + (,xxxxx) + (,xxxxx) (2 rows) -- try to add a node that is already in the cluster -SELECT * FROM master_add_node('localhost', :worker_1_port); +SELECT * FROM master_add_node('', :worker_1_port); master_add_node --------------------------------------------------------------------- 1 @@ -39,12 +39,12 @@ SELECT * FROM master_add_node('localhost', :worker_1_port); SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- - (localhost,57638) - (localhost,57637) + (,xxxxx) + (,xxxxx) (2 rows) -- try to remove a node (with no placements) -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -54,17 +54,17 @@ SELECT master_remove_node('localhost', :worker_2_port); SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- - (localhost,57637) + (,xxxxx) (1 row) -- try to disable a node with no placements see that node is removed -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT master_disable_node('localhost', :worker_2_port); +SELECT master_disable_node('', :worker_2_port); master_disable_node --------------------------------------------------------------------- @@ -73,13 +73,13 @@ SELECT master_disable_node('localhost', :worker_2_port); SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- - (localhost,57637) + (,xxxxx) (1 row) -- add some shard placements to the cluster SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; -SELECT * FROM master_activate_node('localhost', :worker_2_port); +SELECT * FROM master_activate_node('', :worker_2_port); master_activate_node --------------------------------------------------------------------- 3 @@ -96,32 +96,32 @@ SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1220001 | 1 | localhost | 57638 - 1220003 | 1 | localhost | 57638 - 1220005 | 1 | localhost | 57638 - 1220007 | 1 | localhost | 57638 - 1220009 | 1 | localhost | 57638 - 1220011 | 1 | localhost | 57638 - 1220013 | 1 | localhost | 57638 - 1220015 | 1 | localhost | 57638 + 1220001 | 1 | | xxxxx + 1220003 | 1 | | xxxxx + 1220005 | 1 | | xxxxx + 1220007 | 1 | | xxxxx + 1220009 | 1 | | xxxxx + 1220011 | 1 | | xxxxx + 1220013 | 1 | | xxxxx + 1220015 | 1 | | xxxxx (8 rows) -- try to remove a node with active placements and see that node removal is failed -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- - (localhost,57638) - (localhost,57637) + (,xxxxx) + (,xxxxx) (2 rows) -- insert a row so that master_disable_node() exercises closing connections INSERT INTO test_reference_table VALUES (1, '1'); -- try to disable a node with active placements see that node is removed -- observe that a notification is displayed -SELECT master_disable_node('localhost', :worker_2_port); -NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back. +SELECT master_disable_node('', :worker_2_port); +NOTICE: Node :xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('', xxxxx) to activate this node back. master_disable_node --------------------------------------------------------------------- @@ -130,12 +130,12 @@ NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- - (localhost,57637) + (,xxxxx) (1 row) -- try to disable a node which does not exist and see that an error is thrown -SELECT master_disable_node('localhost.noexist', 2345); -ERROR: node at "localhost.noexist:2345" does not exist +SELECT master_disable_node('.noexist', 2345); +ERROR: node at ".noexist:2345" does not exist CREATE USER non_super_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. @@ -153,60 +153,60 @@ GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_meta DELETE FROM citus.pg_dist_object WHERE objid = 'public'::regnamespace::oid; -- try to manipulate node metadata via non-super user SET ROLE non_super_user; -SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1); +SELECT 1 FROM master_add_inactive_node('', :worker_2_port + 1); ERROR: permission denied for function master_add_inactive_node -SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1); +SELECT 1 FROM master_activate_node('', :worker_2_port + 1); ERROR: permission denied for function master_activate_node -SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1); +SELECT 1 FROM master_disable_node('', :worker_2_port + 1); ERROR: permission denied for function master_disable_node -SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1); +SELECT 1 FROM master_remove_node('', :worker_2_port + 1); ERROR: permission denied for function master_remove_node -SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1); +SELECT 1 FROM master_add_node('', :worker_2_port + 1); ERROR: permission denied for function master_add_node -SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port); +SELECT 1 FROM master_add_secondary_node('', :worker_2_port + 2, '', :worker_2_port); ERROR: permission denied for function master_add_secondary_node -SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port; +SELECT master_update_node(nodeid, '', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port; ERROR: permission denied for function master_update_node -- try to manipulate node metadata via privileged user SET ROLE node_metadata_user; BEGIN; -SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1); +SELECT 1 FROM master_add_inactive_node('', :worker_2_port + 1); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1); +SELECT 1 FROM master_activate_node('', :worker_2_port + 1); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1); +SELECT 1 FROM master_disable_node('', :worker_2_port + 1); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1); +SELECT 1 FROM master_remove_node('', :worker_2_port + 1); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1); +SELECT 1 FROM master_add_node('', :worker_2_port + 1); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port); +SELECT 1 FROM master_add_secondary_node('', :worker_2_port + 2, '', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port; +SELECT master_update_node(nodeid, '', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port; master_update_node --------------------------------------------------------------------- @@ -215,30 +215,30 @@ SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_ SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; nodename | nodeport | noderole --------------------------------------------------------------------- - localhost | 57637 | primary - localhost | 57639 | primary - localhost | 57640 | secondary - localhost | 57641 | primary + | xxxxx | primary + | xxxxx | primary + | xxxxx | secondary + | xxxxx | primary (4 rows) ABORT; -\c - postgres - :master_port +\c - - :master_port SET citus.next_shard_id TO 1220016; SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- - (localhost,57637) + (,xxxxx) (1 row) -- restore the node for next tests -SELECT * FROM master_activate_node('localhost', :worker_2_port); +SELECT * FROM master_activate_node('', :worker_2_port); master_activate_node --------------------------------------------------------------------- 3 (1 row) -- try to remove a node with active placements and see that node removal is failed -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements -- mark all placements in the candidate node as inactive SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset @@ -246,24 +246,24 @@ UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=:worker_2_group; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1220001 | 3 | localhost | 57638 - 1220003 | 3 | localhost | 57638 - 1220005 | 3 | localhost | 57638 - 1220007 | 3 | localhost | 57638 - 1220009 | 3 | localhost | 57638 - 1220011 | 3 | localhost | 57638 - 1220013 | 3 | localhost | 57638 - 1220015 | 3 | localhost | 57638 + 1220001 | 3 | | xxxxx + 1220003 | 3 | | xxxxx + 1220005 | 3 | | xxxxx + 1220007 | 3 | | xxxxx + 1220009 | 3 | | xxxxx + 1220011 | 3 | | xxxxx + 1220013 | 3 | | xxxxx + 1220015 | 3 | | xxxxx (8 rows) -- try to remove a node with only inactive placements and see that removal still fails -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- - (localhost,57638) - (localhost,57637) + (,xxxxx) + (,xxxxx) (2 rows) -- mark all placements in the candidate node as to be deleted @@ -271,14 +271,14 @@ UPDATE pg_dist_placement SET shardstate=4 WHERE groupid=:worker_2_group; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1220001 | 4 | localhost | 57638 - 1220003 | 4 | localhost | 57638 - 1220005 | 4 | localhost | 57638 - 1220007 | 4 | localhost | 57638 - 1220009 | 4 | localhost | 57638 - 1220011 | 4 | localhost | 57638 - 1220013 | 4 | localhost | 57638 - 1220015 | 4 | localhost | 57638 + 1220001 | 4 | | xxxxx + 1220003 | 4 | | xxxxx + 1220005 | 4 | | xxxxx + 1220007 | 4 | | xxxxx + 1220009 | 4 | | xxxxx + 1220011 | 4 | | xxxxx + 1220013 | 4 | | xxxxx + 1220015 | 4 | | xxxxx (8 rows) CREATE TABLE cluster_management_test_colocated (col_1 text, col_2 int); @@ -292,44 +292,44 @@ SELECT create_distributed_table('cluster_management_test_colocated', 'col_1', 'h SELECT logicalrelid, shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard; logicalrelid | shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - cluster_management_test | 1220000 | 1 | localhost | 57637 - cluster_management_test | 1220002 | 1 | localhost | 57637 - cluster_management_test | 1220004 | 1 | localhost | 57637 - cluster_management_test | 1220006 | 1 | localhost | 57637 - cluster_management_test | 1220008 | 1 | localhost | 57637 - cluster_management_test | 1220010 | 1 | localhost | 57637 - cluster_management_test | 1220012 | 1 | localhost | 57637 - cluster_management_test | 1220014 | 1 | localhost | 57637 - cluster_management_test_colocated | 1220016 | 1 | localhost | 57637 - cluster_management_test_colocated | 1220018 | 1 | localhost | 57637 - cluster_management_test_colocated | 1220020 | 1 | localhost | 57637 - cluster_management_test_colocated | 1220022 | 1 | localhost | 57637 - cluster_management_test_colocated | 1220024 | 1 | localhost | 57637 - cluster_management_test_colocated | 1220026 | 1 | localhost | 57637 - cluster_management_test_colocated | 1220028 | 1 | localhost | 57637 - cluster_management_test_colocated | 1220030 | 1 | localhost | 57637 - cluster_management_test | 1220001 | 4 | localhost | 57638 - cluster_management_test | 1220003 | 4 | localhost | 57638 - cluster_management_test | 1220005 | 4 | localhost | 57638 - cluster_management_test | 1220007 | 4 | localhost | 57638 - cluster_management_test | 1220009 | 4 | localhost | 57638 - cluster_management_test | 1220011 | 4 | localhost | 57638 - cluster_management_test | 1220013 | 4 | localhost | 57638 - cluster_management_test | 1220015 | 4 | localhost | 57638 + cluster_management_test | 1220000 | 1 | | xxxxx + cluster_management_test | 1220002 | 1 | | xxxxx + cluster_management_test | 1220004 | 1 | | xxxxx + cluster_management_test | 1220006 | 1 | | xxxxx + cluster_management_test | 1220008 | 1 | | xxxxx + cluster_management_test | 1220010 | 1 | | xxxxx + cluster_management_test | 1220012 | 1 | | xxxxx + cluster_management_test | 1220014 | 1 | | xxxxx + cluster_management_test_colocated | 1220016 | 1 | | xxxxx + cluster_management_test_colocated | 1220018 | 1 | | xxxxx + cluster_management_test_colocated | 1220020 | 1 | | xxxxx + cluster_management_test_colocated | 1220022 | 1 | | xxxxx + cluster_management_test_colocated | 1220024 | 1 | | xxxxx + cluster_management_test_colocated | 1220026 | 1 | | xxxxx + cluster_management_test_colocated | 1220028 | 1 | | xxxxx + cluster_management_test_colocated | 1220030 | 1 | | xxxxx + cluster_management_test | 1220001 | 4 | | xxxxx + cluster_management_test | 1220003 | 4 | | xxxxx + cluster_management_test | 1220005 | 4 | | xxxxx + cluster_management_test | 1220007 | 4 | | xxxxx + cluster_management_test | 1220009 | 4 | | xxxxx + cluster_management_test | 1220011 | 4 | | xxxxx + cluster_management_test | 1220013 | 4 | | xxxxx + cluster_management_test | 1220015 | 4 | | xxxxx (24 rows) -- try to remove a node with only to be deleted placements and see that removal still fails -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- - (localhost,57638) - (localhost,57637) + (,xxxxx) + (,xxxxx) (2 rows) -- clean-up -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -348,19 +348,19 @@ DELETE FROM pg_dist_node WHERE nodeport=:worker_2_port; SELECT * FROM cluster_management_test; ERROR: there is a shard placement in node group 3 but there are no nodes in that group -- clean-up -SELECT master_add_node('localhost', :worker_2_port) AS new_node \gset +SELECT master_add_node('', :worker_2_port) AS new_node \gset SELECT groupid AS new_group FROM pg_dist_node WHERE nodeid = :new_node \gset UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group; -- test that you are allowed to remove secondary nodes even if there are placements -SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary'); +SELECT 1 FROM master_add_node('', 9990, groupid => :new_group, noderole => 'secondary'); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements -SELECT master_remove_node('localhost', 9990); +SELECT master_remove_node('', 9990); master_remove_node --------------------------------------------------------------------- @@ -369,60 +369,60 @@ SELECT master_remove_node('localhost', 9990); -- clean-up DROP TABLE cluster_management_test; -- check that adding/removing nodes are propagated to nodes with metadata -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port -SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; +SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- - localhost | 57638 + | xxxxx (1 row) \c - - - :master_port -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) \c - - - :worker_1_port -SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; +SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- (0 rows) \c - - - :master_port -- check that added nodes are not propagated to nodes without metadata -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port -SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; +SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -430,8 +430,8 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep \c - - - :master_port -- check that removing two nodes in the same transaction works SELECT - master_remove_node('localhost', :worker_1_port), - master_remove_node('localhost', :worker_2_port); + master_remove_node('', :worker_1_port), + master_remove_node('', :worker_2_port); master_remove_node | master_remove_node --------------------------------------------------------------------- | @@ -445,8 +445,8 @@ SELECT count(1) FROM pg_dist_node; -- check that adding two nodes in the same transaction works SELECT - master_add_node('localhost', :worker_1_port), - master_add_node('localhost', :worker_2_port); + master_add_node('', :worker_1_port), + master_add_node('', :worker_2_port); master_add_node | master_add_node --------------------------------------------------------------------- 11 | 12 @@ -455,73 +455,73 @@ SELECT SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- - 11 | 9 | localhost | 57637 | default | f | t | primary | default | f | t - 12 | 10 | localhost | 57638 | default | f | t | primary | default | f | t + 11 | 9 | | xxxxx | default | f | t | primary | default | f | t + 12 | 10 | | xxxxx | default | f | t | primary | default | f | t (2 rows) -- check that mixed add/remove node commands work fine inside transaction BEGIN; -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) COMMIT; -SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; +SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- (0 rows) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) BEGIN; -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) COMMIT; -SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; +SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- - localhost | 57638 + | xxxxx (1 row) \c - - - :worker_1_port -SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; +SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- - localhost | 57638 + | xxxxx (1 row) \c - - - :master_port @@ -532,13 +532,13 @@ SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; (2 rows) -SELECT 1 FROM master_add_node('localhost', :worker_1_port); +SELECT 1 FROM master_add_node('', :worker_1_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -546,14 +546,14 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- check that a distributed table can be created after adding a node in a transaction SET citus.shard_count TO 4; -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) BEGIN; -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -596,66 +596,66 @@ DELETE FROM pg_dist_shard; DELETE FROM pg_dist_placement; DELETE FROM pg_dist_node; \c - - - :master_port -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +SELECT stop_metadata_sync_to_node('', :worker_2_port); stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -- check that you can't add a primary to a non-default cluster -SELECT master_add_node('localhost', 9999, nodecluster => 'olap'); +SELECT master_add_node('', 9999, nodecluster => 'olap'); ERROR: primaries must be added to the default cluster -- check that you can't add more than one primary to a group SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset -SELECT master_add_node('localhost', 9999, groupid => :worker_1_group, noderole => 'primary'); +SELECT master_add_node('', 9999, groupid => :worker_1_group, noderole => 'primary'); ERROR: group 14 already has a primary node -- check that you can add secondaries and unavailable nodes to a group SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset -SELECT 1 FROM master_add_node('localhost', 9998, groupid => :worker_1_group, noderole => 'secondary'); +SELECT 1 FROM master_add_node('', 9998, groupid => :worker_1_group, noderole => 'secondary'); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_add_node('localhost', 9997, groupid => :worker_1_group, noderole => 'unavailable'); +SELECT 1 FROM master_add_node('', 9997, groupid => :worker_1_group, noderole => 'unavailable'); ?column? --------------------------------------------------------------------- 1 (1 row) -- add_inactive_node also works with secondaries -SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_group, noderole => 'secondary'); +SELECT 1 FROM master_add_inactive_node('', 9996, groupid => :worker_2_group, noderole => 'secondary'); ?column? --------------------------------------------------------------------- 1 (1 row) -- check that you can add a seconary to a non-default cluster, and activate it, and remove it -SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); +SELECT master_add_inactive_node('', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); master_add_inactive_node --------------------------------------------------------------------- 22 (1 row) -SELECT master_activate_node('localhost', 9999); +SELECT master_activate_node('', 9999); master_activate_node --------------------------------------------------------------------- 22 (1 row) -SELECT master_disable_node('localhost', 9999); +SELECT master_disable_node('', 9999); master_disable_node --------------------------------------------------------------------- (1 row) -SELECT master_remove_node('localhost', 9999); +SELECT master_remove_node('', 9999); master_remove_node --------------------------------------------------------------------- @@ -663,7 +663,7 @@ SELECT master_remove_node('localhost', 9999); -- check that you can't manually add two primaries to a group INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole) - VALUES ('localhost', 5000, :worker_1_group, 'primary'); + VALUES ('', 5000, :worker_1_group, 'primary'); ERROR: there cannot be two primary nodes in a group CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line 10 at RAISE UPDATE pg_dist_node SET noderole = 'primary' @@ -672,23 +672,23 @@ ERROR: there cannot be two primary nodes in a group CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line 18 at RAISE -- check that you can't manually add a primary to a non-default cluster INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole, nodecluster) - VALUES ('localhost', 5000, 1000, 'primary', 'olap'); + VALUES ('', 5000, 1000, 'primary', 'olap'); ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster" -DETAIL: Failing row contains (24, 1000, localhost, 5000, default, f, t, primary, olap, f, t). +DETAIL: Failing row contains (24, 1000, , 5000, default, f, t, primary, olap, f, t). UPDATE pg_dist_node SET nodecluster = 'olap' WHERE nodeport = :worker_1_port; ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster" -DETAIL: Failing row contains (16, 14, localhost, 57637, default, f, t, primary, olap, f, t). +DETAIL: Failing row contains (16, 14, , xxxxx, default, f, t, primary, olap, f, t). -- check that you /can/ add a secondary node to a non-default cluster SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset -SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap'); +SELECT master_add_node('', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap'); master_add_node --------------------------------------------------------------------- 25 (1 row) -- check that super-long cluster names are truncated -SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> +SELECT master_add_node('', 8887, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' @@ -703,27 +703,27 @@ SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole = SELECT * FROM pg_dist_node WHERE nodeport=8887; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- - 26 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t + 26 | 14 | | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t (1 row) -- don't remove the secondary and unavailable nodes, check that no commands are sent to -- them in any of the remaining tests -- master_add_secondary_node lets you skip looking up the groupid -SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port); +SELECT master_add_secondary_node('', 9995, '', :worker_1_port); master_add_secondary_node --------------------------------------------------------------------- 27 (1 row) -SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port); +SELECT master_add_secondary_node('', 9994, primaryname => '', primaryport => :worker_2_port); master_add_secondary_node --------------------------------------------------------------------- 28 (1 row) -SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000); -ERROR: node at "localhost:xxxxx" does not exist -SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); +SELECT master_add_secondary_node('', 9993, '', 2000); +ERROR: node at ":xxxxx" does not exist +SELECT master_add_secondary_node('', 9992, '', :worker_1_port, nodecluster => 'second-cluster'); master_add_secondary_node --------------------------------------------------------------------- 29 @@ -731,10 +731,10 @@ SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset -- master_update_node checks node exists -SELECT master_update_node(100, 'localhost', 8000); +SELECT master_update_node(100, '', 8000); ERROR: node 100 not found -- master_update_node disallows aliasing existing node -SELECT master_update_node(:worker_1_node, 'localhost', :worker_2_port); +SELECT master_update_node(:worker_1_node, '', :worker_2_port); ERROR: there is already another node with the specified hostname and port -- master_update_node moves a node SELECT master_update_node(:worker_1_node, 'somehost', 9000); @@ -750,7 +750,7 @@ SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; (1 row) -- cleanup -SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port); +SELECT master_update_node(:worker_1_node, '', :worker_1_port); master_update_node --------------------------------------------------------------------- @@ -759,7 +759,7 @@ SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port); SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- - 16 | 14 | localhost | 57637 | default | f | t | primary | default | f | t + 16 | 14 | | xxxxx | default | f | t | primary | default | f | t (1 row) SET citus.shard_replication_factor TO 1; @@ -771,7 +771,7 @@ SELECT create_distributed_table('test_dist', 'x'); (1 row) -- testing behaviour when setting shouldhaveshards to false on partially empty node -SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); +SELECT * from master_set_node_property('', :worker_2_port, 'shouldhaveshards', false); master_set_node_property --------------------------------------------------------------------- @@ -811,8 +811,8 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- - 57637 | 2 - 57638 | 2 + xxxxx | 2 + xxxxx | 2 (2 rows) -- non colocated tables should not be placed on shouldhaveshards false nodes anymore @@ -821,7 +821,7 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- - 57637 | 4 + xxxxx | 4 (1 row) -- this table should be colocated with the test_dist_non_colocated table @@ -831,7 +831,7 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated_with_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- - 57637 | 4 + xxxxx | 4 (1 row) -- reference tables should be placed on with shouldhaveshards false @@ -840,14 +840,14 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- - 57637 | 1 - 57638 | 1 + xxxxx | 1 + xxxxx | 1 (2 rows) -- cleanup for next test DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, test_dist_colocated_with_non_colocated; -- testing behaviour when setting shouldhaveshards to false on fully empty node -SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); +SELECT * from master_set_node_property('', :worker_2_port, 'shouldhaveshards', false); master_set_node_property --------------------------------------------------------------------- @@ -875,7 +875,7 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- - 57637 | 4 + xxxxx | 4 (1 row) -- reference tables should be placed on nodes with shouldhaveshards false @@ -884,11 +884,11 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- - 57637 | 1 - 57638 | 1 + xxxxx | 1 + xxxxx | 1 (2 rows) -SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); +SELECT * from master_set_node_property('', :worker_2_port, 'shouldhaveshards', true); master_set_node_property --------------------------------------------------------------------- @@ -901,7 +901,7 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- - 57637 | 4 + xxxxx | 4 (1 row) -- reference tables should still be placed on all nodes with isdatanode 'true' @@ -910,8 +910,8 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- - 57637 | 1 - 57638 | 1 + xxxxx | 1 + xxxxx | 1 (2 rows) SELECT create_distributed_table('test_dist_colocated', 'x'); @@ -933,7 +933,7 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- - 57637 | 4 + xxxxx | 4 (1 row) -- non colocated tables should be placed on nodedes that were switched to @@ -943,10 +943,10 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- - 57637 | 2 - 57638 | 2 + xxxxx | 2 + xxxxx | 2 (2 rows) -SELECT * from master_set_node_property('localhost', :worker_2_port, 'bogusproperty', false); +SELECT * from master_set_node_property('', :worker_2_port, 'bogusproperty', false); ERROR: only the 'shouldhaveshards' property can be set using this function DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated; diff --git a/src/test/regress/expected/multi_colocated_shard_transfer.out b/src/test/regress/expected/multi_colocated_shard_transfer.out index cd15be0c3..dc2a85954 100644 --- a/src/test/regress/expected/multi_colocated_shard_transfer.out +++ b/src/test/regress/expected/multi_colocated_shard_transfer.out @@ -19,26 +19,26 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- - 1300000 | table1_group1 | 57637 | 1000 | 1 - 1300000 | table1_group1 | 57638 | 1000 | 3 - 1300001 | table1_group1 | 57637 | 1000 | 1 - 1300001 | table1_group1 | 57638 | 1000 | 1 - 1300002 | table1_group1 | 57637 | 1000 | 1 - 1300002 | table1_group1 | 57638 | 1000 | 1 - 1300003 | table1_group1 | 57637 | 1000 | 1 - 1300003 | table1_group1 | 57638 | 1000 | 1 - 1300004 | table2_group1 | 57637 | 1000 | 1 - 1300004 | table2_group1 | 57638 | 1000 | 3 - 1300005 | table2_group1 | 57637 | 1000 | 1 - 1300005 | table2_group1 | 57638 | 1000 | 1 - 1300006 | table2_group1 | 57637 | 1000 | 1 - 1300006 | table2_group1 | 57638 | 1000 | 1 - 1300007 | table2_group1 | 57637 | 1000 | 1 - 1300007 | table2_group1 | 57638 | 1000 | 1 + 1300000 | table1_group1 | xxxxx | 1000 | 1 + 1300000 | table1_group1 | xxxxx | 1000 | 3 + 1300001 | table1_group1 | xxxxx | 1000 | 1 + 1300001 | table1_group1 | xxxxx | 1000 | 1 + 1300002 | table1_group1 | xxxxx | 1000 | 1 + 1300002 | table1_group1 | xxxxx | 1000 | 1 + 1300003 | table1_group1 | xxxxx | 1000 | 1 + 1300003 | table1_group1 | xxxxx | 1000 | 1 + 1300004 | table2_group1 | xxxxx | 1000 | 1 + 1300004 | table2_group1 | xxxxx | 1000 | 3 + 1300005 | table2_group1 | xxxxx | 1000 | 1 + 1300005 | table2_group1 | xxxxx | 1000 | 1 + 1300006 | table2_group1 | xxxxx | 1000 | 1 + 1300006 | table2_group1 | xxxxx | 1000 | 1 + 1300007 | table2_group1 | xxxxx | 1000 | 1 + 1300007 | table2_group1 | xxxxx | 1000 | 1 (16 rows) -- repair colocated shards -SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(1300000, '', :worker_1_port, '', :worker_2_port); master_copy_shard_placement --------------------------------------------------------------------- @@ -55,22 +55,22 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- - 1300000 | table1_group1 | 57637 | 1000 | 1 - 1300000 | table1_group1 | 57638 | 1000 | 1 - 1300001 | table1_group1 | 57637 | 1000 | 1 - 1300001 | table1_group1 | 57638 | 1000 | 1 - 1300002 | table1_group1 | 57637 | 1000 | 1 - 1300002 | table1_group1 | 57638 | 1000 | 1 - 1300003 | table1_group1 | 57637 | 1000 | 1 - 1300003 | table1_group1 | 57638 | 1000 | 1 - 1300004 | table2_group1 | 57637 | 1000 | 1 - 1300004 | table2_group1 | 57638 | 1000 | 3 - 1300005 | table2_group1 | 57637 | 1000 | 1 - 1300005 | table2_group1 | 57638 | 1000 | 1 - 1300006 | table2_group1 | 57637 | 1000 | 1 - 1300006 | table2_group1 | 57638 | 1000 | 1 - 1300007 | table2_group1 | 57637 | 1000 | 1 - 1300007 | table2_group1 | 57638 | 1000 | 1 + 1300000 | table1_group1 | xxxxx | 1000 | 1 + 1300000 | table1_group1 | xxxxx | 1000 | 1 + 1300001 | table1_group1 | xxxxx | 1000 | 1 + 1300001 | table1_group1 | xxxxx | 1000 | 1 + 1300002 | table1_group1 | xxxxx | 1000 | 1 + 1300002 | table1_group1 | xxxxx | 1000 | 1 + 1300003 | table1_group1 | xxxxx | 1000 | 1 + 1300003 | table1_group1 | xxxxx | 1000 | 1 + 1300004 | table2_group1 | xxxxx | 1000 | 1 + 1300004 | table2_group1 | xxxxx | 1000 | 3 + 1300005 | table2_group1 | xxxxx | 1000 | 1 + 1300005 | table2_group1 | xxxxx | 1000 | 1 + 1300006 | table2_group1 | xxxxx | 1000 | 1 + 1300006 | table2_group1 | xxxxx | 1000 | 1 + 1300007 | table2_group1 | xxxxx | 1000 | 1 + 1300007 | table2_group1 | xxxxx | 1000 | 1 (16 rows) -- test repairing NOT colocated shard @@ -85,18 +85,18 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- - 1300016 | table5_groupx | 57637 | 0 | 1 - 1300016 | table5_groupx | 57638 | 0 | 3 - 1300017 | table5_groupx | 57637 | 0 | 1 - 1300017 | table5_groupx | 57638 | 0 | 1 - 1300018 | table5_groupx | 57637 | 0 | 1 - 1300018 | table5_groupx | 57638 | 0 | 1 - 1300019 | table5_groupx | 57637 | 0 | 1 - 1300019 | table5_groupx | 57638 | 0 | 1 + 1300016 | table5_groupx | xxxxx | 0 | 1 + 1300016 | table5_groupx | xxxxx | 0 | 3 + 1300017 | table5_groupx | xxxxx | 0 | 1 + 1300017 | table5_groupx | xxxxx | 0 | 1 + 1300018 | table5_groupx | xxxxx | 0 | 1 + 1300018 | table5_groupx | xxxxx | 0 | 1 + 1300019 | table5_groupx | xxxxx | 0 | 1 + 1300019 | table5_groupx | xxxxx | 0 | 1 (8 rows) -- repair NOT colocated shard -SELECT master_copy_shard_placement(1300016, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(1300016, '', :worker_1_port, '', :worker_2_port); master_copy_shard_placement --------------------------------------------------------------------- @@ -113,14 +113,14 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- - 1300016 | table5_groupx | 57637 | 0 | 1 - 1300016 | table5_groupx | 57638 | 0 | 1 - 1300017 | table5_groupx | 57637 | 0 | 1 - 1300017 | table5_groupx | 57638 | 0 | 1 - 1300018 | table5_groupx | 57637 | 0 | 1 - 1300018 | table5_groupx | 57638 | 0 | 1 - 1300019 | table5_groupx | 57637 | 0 | 1 - 1300019 | table5_groupx | 57638 | 0 | 1 + 1300016 | table5_groupx | xxxxx | 0 | 1 + 1300016 | table5_groupx | xxxxx | 0 | 1 + 1300017 | table5_groupx | xxxxx | 0 | 1 + 1300017 | table5_groupx | xxxxx | 0 | 1 + 1300018 | table5_groupx | xxxxx | 0 | 1 + 1300018 | table5_groupx | xxxxx | 0 | 1 + 1300019 | table5_groupx | xxxxx | 0 | 1 + 1300019 | table5_groupx | xxxxx | 0 | 1 (8 rows) -- test repairing shard in append distributed table @@ -135,14 +135,14 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- - 1300020 | table6_append | 57637 | 0 | 1 - 1300020 | table6_append | 57638 | 0 | 3 - 1300021 | table6_append | 57637 | 0 | 1 - 1300021 | table6_append | 57638 | 0 | 1 + 1300020 | table6_append | xxxxx | 0 | 1 + 1300020 | table6_append | xxxxx | 0 | 3 + 1300021 | table6_append | xxxxx | 0 | 1 + 1300021 | table6_append | xxxxx | 0 | 1 (4 rows) -- repair shard in append distributed table -SELECT master_copy_shard_placement(1300020, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(1300020, '', :worker_1_port, '', :worker_2_port); master_copy_shard_placement --------------------------------------------------------------------- @@ -159,10 +159,10 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- - 1300020 | table6_append | 57637 | 0 | 1 - 1300020 | table6_append | 57638 | 0 | 1 - 1300021 | table6_append | 57637 | 0 | 1 - 1300021 | table6_append | 57638 | 0 | 1 + 1300020 | table6_append | xxxxx | 0 | 1 + 1300020 | table6_append | xxxxx | 0 | 1 + 1300021 | table6_append | xxxxx | 0 | 1 + 1300021 | table6_append | xxxxx | 0 | 1 (4 rows) -- test repair while all placements of one shard in colocation group is unhealthy @@ -179,26 +179,26 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- - 1300000 | table1_group1 | 57637 | 1000 | 3 - 1300000 | table1_group1 | 57638 | 1000 | 3 - 1300001 | table1_group1 | 57637 | 1000 | 1 - 1300001 | table1_group1 | 57638 | 1000 | 1 - 1300002 | table1_group1 | 57637 | 1000 | 1 - 1300002 | table1_group1 | 57638 | 1000 | 1 - 1300003 | table1_group1 | 57637 | 1000 | 1 - 1300003 | table1_group1 | 57638 | 1000 | 1 - 1300004 | table2_group1 | 57637 | 1000 | 1 - 1300004 | table2_group1 | 57638 | 1000 | 3 - 1300005 | table2_group1 | 57637 | 1000 | 1 - 1300005 | table2_group1 | 57638 | 1000 | 1 - 1300006 | table2_group1 | 57637 | 1000 | 1 - 1300006 | table2_group1 | 57638 | 1000 | 1 - 1300007 | table2_group1 | 57637 | 1000 | 1 - 1300007 | table2_group1 | 57638 | 1000 | 1 + 1300000 | table1_group1 | xxxxx | 1000 | 3 + 1300000 | table1_group1 | xxxxx | 1000 | 3 + 1300001 | table1_group1 | xxxxx | 1000 | 1 + 1300001 | table1_group1 | xxxxx | 1000 | 1 + 1300002 | table1_group1 | xxxxx | 1000 | 1 + 1300002 | table1_group1 | xxxxx | 1000 | 1 + 1300003 | table1_group1 | xxxxx | 1000 | 1 + 1300003 | table1_group1 | xxxxx | 1000 | 1 + 1300004 | table2_group1 | xxxxx | 1000 | 1 + 1300004 | table2_group1 | xxxxx | 1000 | 3 + 1300005 | table2_group1 | xxxxx | 1000 | 1 + 1300005 | table2_group1 | xxxxx | 1000 | 1 + 1300006 | table2_group1 | xxxxx | 1000 | 1 + 1300006 | table2_group1 | xxxxx | 1000 | 1 + 1300007 | table2_group1 | xxxxx | 1000 | 1 + 1300007 | table2_group1 | xxxxx | 1000 | 1 (16 rows) -- repair while all placements of one shard in colocation group is unhealthy -SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(1300000, '', :worker_1_port, '', :worker_2_port); ERROR: source placement must be in active state -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate @@ -211,21 +211,21 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- - 1300000 | table1_group1 | 57637 | 1000 | 3 - 1300000 | table1_group1 | 57638 | 1000 | 3 - 1300001 | table1_group1 | 57637 | 1000 | 1 - 1300001 | table1_group1 | 57638 | 1000 | 1 - 1300002 | table1_group1 | 57637 | 1000 | 1 - 1300002 | table1_group1 | 57638 | 1000 | 1 - 1300003 | table1_group1 | 57637 | 1000 | 1 - 1300003 | table1_group1 | 57638 | 1000 | 1 - 1300004 | table2_group1 | 57637 | 1000 | 1 - 1300004 | table2_group1 | 57638 | 1000 | 3 - 1300005 | table2_group1 | 57637 | 1000 | 1 - 1300005 | table2_group1 | 57638 | 1000 | 1 - 1300006 | table2_group1 | 57637 | 1000 | 1 - 1300006 | table2_group1 | 57638 | 1000 | 1 - 1300007 | table2_group1 | 57637 | 1000 | 1 - 1300007 | table2_group1 | 57638 | 1000 | 1 + 1300000 | table1_group1 | xxxxx | 1000 | 3 + 1300000 | table1_group1 | xxxxx | 1000 | 3 + 1300001 | table1_group1 | xxxxx | 1000 | 1 + 1300001 | table1_group1 | xxxxx | 1000 | 1 + 1300002 | table1_group1 | xxxxx | 1000 | 1 + 1300002 | table1_group1 | xxxxx | 1000 | 1 + 1300003 | table1_group1 | xxxxx | 1000 | 1 + 1300003 | table1_group1 | xxxxx | 1000 | 1 + 1300004 | table2_group1 | xxxxx | 1000 | 1 + 1300004 | table2_group1 | xxxxx | 1000 | 3 + 1300005 | table2_group1 | xxxxx | 1000 | 1 + 1300005 | table2_group1 | xxxxx | 1000 | 1 + 1300006 | table2_group1 | xxxxx | 1000 | 1 + 1300006 | table2_group1 | xxxxx | 1000 | 1 + 1300007 | table2_group1 | xxxxx | 1000 | 1 + 1300007 | table2_group1 | xxxxx | 1000 | 1 (16 rows) diff --git a/src/test/regress/expected/multi_colocation_utils.out b/src/test/regress/expected/multi_colocation_utils.out index 7a5120354..e8fbd236a 100644 --- a/src/test/regress/expected/multi_colocation_utils.out +++ b/src/test/regress/expected/multi_colocation_utils.out @@ -719,114 +719,114 @@ ORDER BY nodeport; logicalrelid | shardid | shardstorage | nodeport | shardminvalue | shardmaxvalue --------------------------------------------------------------------- - table1_groupb | 1300026 | t | 57637 | -2147483648 | -1 - table1_groupb | 1300027 | t | 57638 | 0 | 2147483647 - table2_groupb | 1300028 | t | 57637 | -2147483648 | -1 - table2_groupb | 1300029 | t | 57638 | 0 | 2147483647 - table1_groupc | 1300030 | t | 57637 | -2147483648 | -1 - table1_groupc | 1300030 | t | 57638 | -2147483648 | -1 - table1_groupc | 1300031 | t | 57637 | 0 | 2147483647 - table1_groupc | 1300031 | t | 57638 | 0 | 2147483647 - table2_groupc | 1300032 | t | 57637 | -2147483648 | -1 - table2_groupc | 1300032 | t | 57638 | -2147483648 | -1 - table2_groupc | 1300033 | t | 57637 | 0 | 2147483647 - table2_groupc | 1300033 | t | 57638 | 0 | 2147483647 - table1_groupd | 1300034 | t | 57637 | -2147483648 | -1610612737 - table1_groupd | 1300034 | t | 57638 | -2147483648 | -1610612737 - table1_groupd | 1300035 | t | 57637 | -1610612736 | -1073741825 - table1_groupd | 1300035 | t | 57638 | -1610612736 | -1073741825 - table1_groupd | 1300036 | t | 57637 | -1073741824 | -536870913 - table1_groupd | 1300036 | t | 57638 | -1073741824 | -536870913 - table1_groupd | 1300037 | t | 57637 | -536870912 | -1 - table1_groupd | 1300037 | t | 57638 | -536870912 | -1 - table1_groupd | 1300038 | t | 57637 | 0 | 536870911 - table1_groupd | 1300038 | t | 57638 | 0 | 536870911 - table1_groupd | 1300039 | t | 57637 | 536870912 | 1073741823 - table1_groupd | 1300039 | t | 57638 | 536870912 | 1073741823 - table1_groupd | 1300040 | t | 57637 | 1073741824 | 1610612735 - table1_groupd | 1300040 | t | 57638 | 1073741824 | 1610612735 - table1_groupd | 1300041 | t | 57637 | 1610612736 | 2147483647 - table1_groupd | 1300041 | t | 57638 | 1610612736 | 2147483647 - table2_groupd | 1300042 | t | 57637 | -2147483648 | -1610612737 - table2_groupd | 1300042 | t | 57638 | -2147483648 | -1610612737 - table2_groupd | 1300043 | t | 57637 | -1610612736 | -1073741825 - table2_groupd | 1300043 | t | 57638 | -1610612736 | -1073741825 - table2_groupd | 1300044 | t | 57637 | -1073741824 | -536870913 - table2_groupd | 1300044 | t | 57638 | -1073741824 | -536870913 - table2_groupd | 1300045 | t | 57637 | -536870912 | -1 - table2_groupd | 1300045 | t | 57638 | -536870912 | -1 - table2_groupd | 1300046 | t | 57637 | 0 | 536870911 - table2_groupd | 1300046 | t | 57638 | 0 | 536870911 - table2_groupd | 1300047 | t | 57637 | 536870912 | 1073741823 - table2_groupd | 1300047 | t | 57638 | 536870912 | 1073741823 - table2_groupd | 1300048 | t | 57637 | 1073741824 | 1610612735 - table2_groupd | 1300048 | t | 57638 | 1073741824 | 1610612735 - table2_groupd | 1300049 | t | 57637 | 1610612736 | 2147483647 - table2_groupd | 1300049 | t | 57638 | 1610612736 | 2147483647 - table3_groupd | 1300050 | f | 57637 | -2147483648 | -1610612737 - table3_groupd | 1300050 | f | 57638 | -2147483648 | -1610612737 - table3_groupd | 1300051 | f | 57637 | -1610612736 | -1073741825 - table3_groupd | 1300051 | f | 57638 | -1610612736 | -1073741825 - table3_groupd | 1300052 | f | 57637 | -1073741824 | -536870913 - table3_groupd | 1300052 | f | 57638 | -1073741824 | -536870913 - table3_groupd | 1300053 | f | 57637 | -536870912 | -1 - table3_groupd | 1300053 | f | 57638 | -536870912 | -1 - table3_groupd | 1300054 | f | 57637 | 0 | 536870911 - table3_groupd | 1300054 | f | 57638 | 0 | 536870911 - table3_groupd | 1300055 | f | 57637 | 536870912 | 1073741823 - table3_groupd | 1300055 | f | 57638 | 536870912 | 1073741823 - table3_groupd | 1300056 | f | 57637 | 1073741824 | 1610612735 - table3_groupd | 1300056 | f | 57638 | 1073741824 | 1610612735 - table3_groupd | 1300057 | f | 57637 | 1610612736 | 2147483647 - table3_groupd | 1300057 | f | 57638 | 1610612736 | 2147483647 - table1_groupe | 1300058 | t | 57637 | -2147483648 | -1 - table1_groupe | 1300058 | t | 57638 | -2147483648 | -1 - table1_groupe | 1300059 | t | 57637 | 0 | 2147483647 - table1_groupe | 1300059 | t | 57638 | 0 | 2147483647 - table2_groupe | 1300060 | t | 57637 | -2147483648 | -1 - table2_groupe | 1300060 | t | 57638 | -2147483648 | -1 - table2_groupe | 1300061 | t | 57637 | 0 | 2147483647 - table2_groupe | 1300061 | t | 57638 | 0 | 2147483647 - table3_groupe | 1300062 | t | 57637 | -2147483648 | -1 - table3_groupe | 1300062 | t | 57638 | -2147483648 | -1 - table3_groupe | 1300063 | t | 57637 | 0 | 2147483647 - table3_groupe | 1300063 | t | 57638 | 0 | 2147483647 - schema_colocation.table4_groupe | 1300064 | t | 57637 | -2147483648 | -1 - schema_colocation.table4_groupe | 1300064 | t | 57638 | -2147483648 | -1 - schema_colocation.table4_groupe | 1300065 | t | 57637 | 0 | 2147483647 - schema_colocation.table4_groupe | 1300065 | t | 57638 | 0 | 2147483647 - table1_group_none_1 | 1300066 | t | 57637 | -2147483648 | -1 - table1_group_none_1 | 1300066 | t | 57638 | -2147483648 | -1 - table1_group_none_1 | 1300067 | t | 57637 | 0 | 2147483647 - table1_group_none_1 | 1300067 | t | 57638 | 0 | 2147483647 - table2_group_none_1 | 1300068 | t | 57637 | -2147483648 | -1 - table2_group_none_1 | 1300068 | t | 57638 | -2147483648 | -1 - table2_group_none_1 | 1300069 | t | 57637 | 0 | 2147483647 - table2_group_none_1 | 1300069 | t | 57638 | 0 | 2147483647 - table1_group_none_2 | 1300070 | t | 57637 | -2147483648 | -1 - table1_group_none_2 | 1300070 | t | 57638 | -2147483648 | -1 - table1_group_none_2 | 1300071 | t | 57637 | 0 | 2147483647 - table1_group_none_2 | 1300071 | t | 57638 | 0 | 2147483647 - table4_groupe | 1300072 | t | 57637 | -2147483648 | -1 - table4_groupe | 1300072 | t | 57638 | -2147483648 | -1 - table4_groupe | 1300073 | t | 57637 | 0 | 2147483647 - table4_groupe | 1300073 | t | 57638 | 0 | 2147483647 - table1_group_none_3 | 1300074 | t | 57637 | -2147483648 | -715827884 - table1_group_none_3 | 1300074 | t | 57638 | -2147483648 | -715827884 - table1_group_none_3 | 1300075 | t | 57637 | -715827883 | 715827881 - table1_group_none_3 | 1300075 | t | 57638 | -715827883 | 715827881 - table1_group_none_3 | 1300076 | t | 57637 | 715827882 | 2147483647 - table1_group_none_3 | 1300076 | t | 57638 | 715827882 | 2147483647 - table1_group_default | 1300077 | t | 57637 | -2147483648 | -715827884 - table1_group_default | 1300077 | t | 57638 | -2147483648 | -715827884 - table1_group_default | 1300078 | t | 57637 | -715827883 | 715827881 - table1_group_default | 1300078 | t | 57638 | -715827883 | 715827881 - table1_group_default | 1300079 | t | 57637 | 715827882 | 2147483647 - table1_group_default | 1300079 | t | 57638 | 715827882 | 2147483647 - table1_groupf | 1300080 | t | 57637 | | - table1_groupf | 1300080 | t | 57638 | | - table2_groupf | 1300081 | t | 57637 | | - table2_groupf | 1300081 | t | 57638 | | + table1_groupb | 1300026 | t | xxxxx | -2147483648 | -1 + table1_groupb | 1300027 | t | xxxxx | 0 | 2147483647 + table2_groupb | 1300028 | t | xxxxx | -2147483648 | -1 + table2_groupb | 1300029 | t | xxxxx | 0 | 2147483647 + table1_groupc | 1300030 | t | xxxxx | -2147483648 | -1 + table1_groupc | 1300030 | t | xxxxx | -2147483648 | -1 + table1_groupc | 1300031 | t | xxxxx | 0 | 2147483647 + table1_groupc | 1300031 | t | xxxxx | 0 | 2147483647 + table2_groupc | 1300032 | t | xxxxx | -2147483648 | -1 + table2_groupc | 1300032 | t | xxxxx | -2147483648 | -1 + table2_groupc | 1300033 | t | xxxxx | 0 | 2147483647 + table2_groupc | 1300033 | t | xxxxx | 0 | 2147483647 + table1_groupd | 1300034 | t | xxxxx | -2147483648 | -1610612737 + table1_groupd | 1300034 | t | xxxxx | -2147483648 | -1610612737 + table1_groupd | 1300035 | t | xxxxx | -1610612736 | -1073741825 + table1_groupd | 1300035 | t | xxxxx | -1610612736 | -1073741825 + table1_groupd | 1300036 | t | xxxxx | -1073741824 | -536870913 + table1_groupd | 1300036 | t | xxxxx | -1073741824 | -536870913 + table1_groupd | 1300037 | t | xxxxx | -536870912 | -1 + table1_groupd | 1300037 | t | xxxxx | -536870912 | -1 + table1_groupd | 1300038 | t | xxxxx | 0 | 536870911 + table1_groupd | 1300038 | t | xxxxx | 0 | 536870911 + table1_groupd | 1300039 | t | xxxxx | 536870912 | 1073741823 + table1_groupd | 1300039 | t | xxxxx | 536870912 | 1073741823 + table1_groupd | 1300040 | t | xxxxx | 1073741824 | 1610612735 + table1_groupd | 1300040 | t | xxxxx | 1073741824 | 1610612735 + table1_groupd | 1300041 | t | xxxxx | 1610612736 | 2147483647 + table1_groupd | 1300041 | t | xxxxx | 1610612736 | 2147483647 + table2_groupd | 1300042 | t | xxxxx | -2147483648 | -1610612737 + table2_groupd | 1300042 | t | xxxxx | -2147483648 | -1610612737 + table2_groupd | 1300043 | t | xxxxx | -1610612736 | -1073741825 + table2_groupd | 1300043 | t | xxxxx | -1610612736 | -1073741825 + table2_groupd | 1300044 | t | xxxxx | -1073741824 | -536870913 + table2_groupd | 1300044 | t | xxxxx | -1073741824 | -536870913 + table2_groupd | 1300045 | t | xxxxx | -536870912 | -1 + table2_groupd | 1300045 | t | xxxxx | -536870912 | -1 + table2_groupd | 1300046 | t | xxxxx | 0 | 536870911 + table2_groupd | 1300046 | t | xxxxx | 0 | 536870911 + table2_groupd | 1300047 | t | xxxxx | 536870912 | 1073741823 + table2_groupd | 1300047 | t | xxxxx | 536870912 | 1073741823 + table2_groupd | 1300048 | t | xxxxx | 1073741824 | 1610612735 + table2_groupd | 1300048 | t | xxxxx | 1073741824 | 1610612735 + table2_groupd | 1300049 | t | xxxxx | 1610612736 | 2147483647 + table2_groupd | 1300049 | t | xxxxx | 1610612736 | 2147483647 + table3_groupd | 1300050 | f | xxxxx | -2147483648 | -1610612737 + table3_groupd | 1300050 | f | xxxxx | -2147483648 | -1610612737 + table3_groupd | 1300051 | f | xxxxx | -1610612736 | -1073741825 + table3_groupd | 1300051 | f | xxxxx | -1610612736 | -1073741825 + table3_groupd | 1300052 | f | xxxxx | -1073741824 | -536870913 + table3_groupd | 1300052 | f | xxxxx | -1073741824 | -536870913 + table3_groupd | 1300053 | f | xxxxx | -536870912 | -1 + table3_groupd | 1300053 | f | xxxxx | -536870912 | -1 + table3_groupd | 1300054 | f | xxxxx | 0 | 536870911 + table3_groupd | 1300054 | f | xxxxx | 0 | 536870911 + table3_groupd | 1300055 | f | xxxxx | 536870912 | 1073741823 + table3_groupd | 1300055 | f | xxxxx | 536870912 | 1073741823 + table3_groupd | 1300056 | f | xxxxx | 1073741824 | 1610612735 + table3_groupd | 1300056 | f | xxxxx | 1073741824 | 1610612735 + table3_groupd | 1300057 | f | xxxxx | 1610612736 | 2147483647 + table3_groupd | 1300057 | f | xxxxx | 1610612736 | 2147483647 + table1_groupe | 1300058 | t | xxxxx | -2147483648 | -1 + table1_groupe | 1300058 | t | xxxxx | -2147483648 | -1 + table1_groupe | 1300059 | t | xxxxx | 0 | 2147483647 + table1_groupe | 1300059 | t | xxxxx | 0 | 2147483647 + table2_groupe | 1300060 | t | xxxxx | -2147483648 | -1 + table2_groupe | 1300060 | t | xxxxx | -2147483648 | -1 + table2_groupe | 1300061 | t | xxxxx | 0 | 2147483647 + table2_groupe | 1300061 | t | xxxxx | 0 | 2147483647 + table3_groupe | 1300062 | t | xxxxx | -2147483648 | -1 + table3_groupe | 1300062 | t | xxxxx | -2147483648 | -1 + table3_groupe | 1300063 | t | xxxxx | 0 | 2147483647 + table3_groupe | 1300063 | t | xxxxx | 0 | 2147483647 + schema_colocation.table4_groupe | 1300064 | t | xxxxx | -2147483648 | -1 + schema_colocation.table4_groupe | 1300064 | t | xxxxx | -2147483648 | -1 + schema_colocation.table4_groupe | 1300065 | t | xxxxx | 0 | 2147483647 + schema_colocation.table4_groupe | 1300065 | t | xxxxx | 0 | 2147483647 + table1_group_none_1 | 1300066 | t | xxxxx | -2147483648 | -1 + table1_group_none_1 | 1300066 | t | xxxxx | -2147483648 | -1 + table1_group_none_1 | 1300067 | t | xxxxx | 0 | 2147483647 + table1_group_none_1 | 1300067 | t | xxxxx | 0 | 2147483647 + table2_group_none_1 | 1300068 | t | xxxxx | -2147483648 | -1 + table2_group_none_1 | 1300068 | t | xxxxx | -2147483648 | -1 + table2_group_none_1 | 1300069 | t | xxxxx | 0 | 2147483647 + table2_group_none_1 | 1300069 | t | xxxxx | 0 | 2147483647 + table1_group_none_2 | 1300070 | t | xxxxx | -2147483648 | -1 + table1_group_none_2 | 1300070 | t | xxxxx | -2147483648 | -1 + table1_group_none_2 | 1300071 | t | xxxxx | 0 | 2147483647 + table1_group_none_2 | 1300071 | t | xxxxx | 0 | 2147483647 + table4_groupe | 1300072 | t | xxxxx | -2147483648 | -1 + table4_groupe | 1300072 | t | xxxxx | -2147483648 | -1 + table4_groupe | 1300073 | t | xxxxx | 0 | 2147483647 + table4_groupe | 1300073 | t | xxxxx | 0 | 2147483647 + table1_group_none_3 | 1300074 | t | xxxxx | -2147483648 | -715827884 + table1_group_none_3 | 1300074 | t | xxxxx | -2147483648 | -715827884 + table1_group_none_3 | 1300075 | t | xxxxx | -715827883 | 715827881 + table1_group_none_3 | 1300075 | t | xxxxx | -715827883 | 715827881 + table1_group_none_3 | 1300076 | t | xxxxx | 715827882 | 2147483647 + table1_group_none_3 | 1300076 | t | xxxxx | 715827882 | 2147483647 + table1_group_default | 1300077 | t | xxxxx | -2147483648 | -715827884 + table1_group_default | 1300077 | t | xxxxx | -2147483648 | -715827884 + table1_group_default | 1300078 | t | xxxxx | -715827883 | 715827881 + table1_group_default | 1300078 | t | xxxxx | -715827883 | 715827881 + table1_group_default | 1300079 | t | xxxxx | 715827882 | 2147483647 + table1_group_default | 1300079 | t | xxxxx | 715827882 | 2147483647 + table1_groupf | 1300080 | t | xxxxx | | + table1_groupf | 1300080 | t | xxxxx | | + table2_groupf | 1300081 | t | xxxxx | | + table2_groupf | 1300081 | t | xxxxx | | (108 rows) -- reset colocation ids to test mark_tables_colocated diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index 7132d4e51..9a5742d09 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -634,7 +634,7 @@ SELECT * FROM master_get_table_ddl_events('unlogged_table'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE UNLOGGED TABLE public.unlogged_table (key text, value text) - ALTER TABLE public.unlogged_table OWNER TO postgres + ALTER TABLE public.unlogged_table OWNER TO (2 rows) \c - - - :worker_1_port @@ -985,8 +985,8 @@ COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc3' LIMIT 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,i) - (localhost,57638,t,i) + (,xxxxx,t,i) + (,xxxxx,t,i) (2 rows) BEGIN; @@ -1010,8 +1010,8 @@ COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc4' LIMIT 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,i) - (localhost,57638,t,i) + (,xxxxx,t,i) + (,xxxxx,t,i) (2 rows) SET search_path = 'public'; @@ -1035,8 +1035,8 @@ COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc5' LIMIT 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,f) - (localhost,57638,t,f) + (,xxxxx,t,f) + (,xxxxx,t,f) (2 rows) BEGIN; @@ -1060,8 +1060,8 @@ COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc6' LIMIT 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,i) - (localhost,57638,t,i) + (,xxxxx,t,i) + (,xxxxx,t,i) (2 rows) BEGIN; @@ -1084,8 +1084,8 @@ COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='public' LIMIT 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,i) - (localhost,57638,t,i) + (,xxxxx,t,i) + (,xxxxx,t,i) (2 rows) DROP TABLE tt1; diff --git a/src/test/regress/expected/multi_create_table_constraints.out b/src/test/regress/expected/multi_create_table_constraints.out index a08a2f54b..d88dfa484 100644 --- a/src/test/regress/expected/multi_create_table_constraints.out +++ b/src/test/regress/expected/multi_create_table_constraints.out @@ -102,7 +102,7 @@ INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); ERROR: duplicate key value violates unique constraint "uq_two_columns_partition_col_other_col_key_365008" DETAIL: Key (partition_col, other_col)=(1, 1) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx CREATE TABLE ex_on_part_col ( partition_col integer, @@ -119,7 +119,7 @@ INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,2); ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_partition_col_excl_365012" DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx CREATE TABLE ex_on_two_columns ( partition_col integer, @@ -136,7 +136,7 @@ INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_partition_col_other_col_excl_365016" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx CREATE TABLE ex_on_two_columns_prt ( partition_col integer, @@ -155,7 +155,7 @@ INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_prt_partition_col_other_col_excl_365020" DETAIL: Key (partition_col, other_col)=(1, 101) conflicts with existing key (partition_col, other_col)=(1, 101). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx CREATE TABLE ex_wrong_operator ( partition_col tsrange, @@ -181,7 +181,7 @@ INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00 INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); ERROR: conflicting key value violates exclusion constraint "ex_overlaps_other_col_partition_col_excl_365027" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- now show that Citus can distribute unique and EXCLUDE constraints that -- include the partition column, for hash-partitioned tables. -- However, EXCLUDE constraints must include the partition column with @@ -225,7 +225,7 @@ INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); ERROR: duplicate key value violates unique constraint "uq_two_columns_named_uniq_365036" DETAIL: Key (partition_col, other_col)=(1, 1) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx CREATE TABLE ex_on_part_col_named ( partition_col integer, @@ -242,7 +242,7 @@ INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,2); ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_named_exclude_365040" DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx CREATE TABLE ex_on_two_columns_named ( partition_col integer, @@ -259,7 +259,7 @@ INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_named_exclude_365044" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx CREATE TABLE ex_multiple_excludes ( partition_col integer, @@ -278,11 +278,11 @@ INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VAL INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,2); ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl1_365048" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,2,1); ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl2_365048" DETAIL: Key (partition_col, other_other_col)=(1, 1) conflicts with existing key (partition_col, other_other_col)=(1, 1). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx CREATE TABLE ex_wrong_operator_named ( partition_col tsrange, @@ -308,7 +308,7 @@ INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00 INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); ERROR: conflicting key value violates exclusion constraint "ex_overlaps_operator_named_exclude_365055" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- now show that Citus allows unique constraints on range-partitioned tables. CREATE TABLE uq_range_tables ( diff --git a/src/test/regress/expected/multi_deparse_function.out b/src/test/regress/expected/multi_deparse_function.out index e1448d5f1..526f1e8f5 100644 --- a/src/test/regress/expected/multi_deparse_function.out +++ b/src/test/regress/expected/multi_deparse_function.out @@ -74,8 +74,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- RETURNS NULL ON NULL INPUT and STRICT are synonyms and can be used interchangeably @@ -87,8 +87,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -98,8 +98,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -109,8 +109,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -120,8 +120,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -131,8 +131,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -142,8 +142,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -153,8 +153,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- EXTERNAL keyword is ignored by Postgres Parser. It is allowed only for SQL conformance @@ -166,8 +166,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -177,8 +177,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -188,8 +188,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -199,8 +199,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -210,8 +210,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -221,8 +221,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -232,8 +232,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- The COST arguments should always be numeric @@ -244,8 +244,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -255,8 +255,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -266,8 +266,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -277,8 +277,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -288,8 +288,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -299,8 +299,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -310,8 +310,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -321,8 +321,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -332,8 +332,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -343,8 +343,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- This raises an error about only accepting one item, @@ -356,8 +356,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: SET citus.setting;' takes only one argument") - (localhost,57638,f,"ERROR: SET citus.setting;' takes only one argument") + (,xxxxx,f,"ERROR: SET citus.setting;' takes only one argument") + (,xxxxx,f,"ERROR: SET citus.setting;' takes only one argument") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -367,8 +367,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -378,8 +378,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- Rename the function in the workers @@ -390,8 +390,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- Rename the function inb the coordinator as well. @@ -405,8 +405,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.summation(integ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- Rename the function back to the original name in the coordinator @@ -417,8 +417,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers('CREATE ROLE function_role'); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -428,8 +428,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -439,8 +439,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: role ""missing_role"" does not exist") - (localhost,57638,f,"ERROR: role ""missing_role"" does not exist") + (,xxxxx,f,"ERROR: role ""missing_role"" does not exist") + (,xxxxx,f,"ERROR: role ""missing_role"" does not exist") (2 rows) -- SET the schema in workers as well as the coordinator so that it remains in the same schema @@ -451,8 +451,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) ALTER FUNCTION add SET SCHEMA public; @@ -464,8 +464,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION public.add(integer, integer) S CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) ALTER FUNCTION public.add SET SCHEMA function_tests; @@ -476,8 +476,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- make sure "any" type is correctly deparsed @@ -488,8 +488,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION pg_catalog.get_shard_id_for_di CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- Do not run valid drop queries in the workers @@ -509,8 +509,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, in CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- Check that an invalid function name is still parsed correctly @@ -522,8 +522,8 @@ INFO: Propagating deparsed query: DROP FUNCTION missing_function(pg_catalog.int CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: function missing_function(integer, text) does not exist") - (localhost,57638,f,"ERROR: function missing_function(integer, text) does not exist") + (,xxxxx,f,"ERROR: function missing_function(integer, text) does not exist") + (,xxxxx,f,"ERROR: function missing_function(integer, text) does not exist") (2 rows) -- Check that an invalid function name is still parsed correctly @@ -535,8 +535,8 @@ INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_function(pg_c CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP FUNCTION") - (localhost,57638,t,"DROP FUNCTION") + (,xxxxx,t,"DROP FUNCTION") + (,xxxxx,t,"DROP FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -546,8 +546,8 @@ INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_schema.missin CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP FUNCTION") - (localhost,57638,t,"DROP FUNCTION") + (,xxxxx,t,"DROP FUNCTION") + (,xxxxx,t,"DROP FUNCTION") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -557,8 +557,8 @@ INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_func_without_ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP FUNCTION") - (localhost,57638,t,"DROP FUNCTION") + (,xxxxx,t,"DROP FUNCTION") + (,xxxxx,t,"DROP FUNCTION") (2 rows) -- create schema with weird names @@ -570,8 +570,8 @@ SELECT run_command_on_workers($$ $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") (2 rows) -- create table with weird names @@ -600,8 +600,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!? CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- drop 2 functions at the same time @@ -612,8 +612,8 @@ INFO: Propagating deparsed query: DROP FUNCTION "CiTUS.TEEN2"."TeeNFunCT10N.1!? CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP FUNCTION") - (localhost,57638,t,"DROP FUNCTION") + (,xxxxx,t,"DROP FUNCTION") + (,xxxxx,t,"DROP FUNCTION") (2 rows) -- a function with a default parameter @@ -633,8 +633,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_default_pa CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- a function with IN and OUT parameters @@ -654,8 +654,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_out_param( CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- a function with INOUT parameter @@ -678,8 +678,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.square(numeric) CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- a function with variadic input. @@ -709,8 +709,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.sum_avg(numeric CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- a function with a custom type IN parameter @@ -731,8 +731,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_custom_par CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- a function that returns TABLE @@ -753,8 +753,8 @@ INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_returns_ta CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") + (,xxxxx,t,"ALTER FUNCTION") (2 rows) -- clear objects @@ -769,8 +769,8 @@ SELECT run_command_on_workers($$ $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") + (,xxxxx,t,"DROP SCHEMA") (2 rows) DROP ROLE function_role; diff --git a/src/test/regress/expected/multi_deparse_procedure.out b/src/test/regress/expected/multi_deparse_procedure.out index f31578f38..1ffce4818 100644 --- a/src/test/regress/expected/multi_deparse_procedure.out +++ b/src/test/regress/expected/multi_deparse_procedure.out @@ -60,8 +60,8 @@ ALTER PROCEDURE raise_info CALLED ON NULL INPUT $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -69,8 +69,8 @@ ALTER PROCEDURE raise_info RETURNS NULL ON NULL INPUT $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -78,8 +78,8 @@ ALTER PROCEDURE raise_info STRICT $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -87,8 +87,8 @@ ALTER PROCEDURE raise_info IMMUTABLE $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -96,8 +96,8 @@ ALTER PROCEDURE raise_info STABLE $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -105,8 +105,8 @@ ALTER PROCEDURE raise_info VOLATILE $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -114,8 +114,8 @@ ALTER PROCEDURE raise_info LEAKPROOF $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -123,8 +123,8 @@ ALTER PROCEDURE raise_info NOT LEAKPROOF $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -132,8 +132,8 @@ ALTER PROCEDURE raise_info EXTERNAL SECURITY INVOKER $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -141,8 +141,8 @@ ALTER PROCEDURE raise_info SECURITY INVOKER $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -150,8 +150,8 @@ ALTER PROCEDURE raise_info EXTERNAL SECURITY DEFINER $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -159,8 +159,8 @@ ALTER PROCEDURE raise_info SECURITY DEFINER $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -168,8 +168,8 @@ ALTER PROCEDURE raise_info PARALLEL UNSAFE $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -177,8 +177,8 @@ ALTER PROCEDURE raise_info PARALLEL RESTRICTED $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -186,8 +186,8 @@ ALTER PROCEDURE raise_info PARALLEL SAFE $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) -- The COST/ROWS arguments should always be numeric @@ -196,8 +196,8 @@ ALTER PROCEDURE raise_info COST 1234 $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -205,8 +205,8 @@ ALTER PROCEDURE raise_info COST 1234.5 $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -214,8 +214,8 @@ ALTER PROCEDURE raise_info ROWS 10 $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -223,8 +223,8 @@ ALTER PROCEDURE raise_info ROWS 10.8 $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: invalid attribute in procedure definition") - (localhost,57638,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") + (,xxxxx,f,"ERROR: invalid attribute in procedure definition") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -232,8 +232,8 @@ ALTER PROCEDURE raise_info SECURITY INVOKER SET client_min_messages TO warning; $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -241,8 +241,8 @@ ALTER PROCEDURE raise_info SET log_min_messages = ERROR $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -250,8 +250,8 @@ ALTER PROCEDURE raise_info SET log_min_messages TO DEFAULT $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -259,8 +259,8 @@ ALTER PROCEDURE raise_info SET log_min_messages FROM CURRENT $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -268,8 +268,8 @@ ALTER PROCEDURE raise_info RESET log_min_messages $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -277,8 +277,8 @@ ALTER PROCEDURE raise_info RESET ALL $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) -- rename and rename back to keep the nodes in sync @@ -287,8 +287,8 @@ ALTER PROCEDURE raise_info RENAME TO summation; $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) ALTER PROCEDURE raise_info RENAME TO summation; @@ -297,8 +297,8 @@ ALTER PROCEDURE summation RENAME TO raise_info; $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) ALTER PROCEDURE summation RENAME TO raise_info; @@ -308,8 +308,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers($$CREATE ROLE procedure_role;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -317,8 +317,8 @@ ALTER PROCEDURE raise_info OWNER TO procedure_role $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -326,8 +326,8 @@ ALTER PROCEDURE raise_info OWNER TO missing_role $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,f,"ERROR: role ""missing_role"" does not exist") - (localhost,57638,f,"ERROR: role ""missing_role"" does not exist") + (,xxxxx,f,"ERROR: role ""missing_role"" does not exist") + (,xxxxx,f,"ERROR: role ""missing_role"" does not exist") (2 rows) -- move schema and back to keep the nodes in sync @@ -336,8 +336,8 @@ ALTER PROCEDURE raise_info SET SCHEMA public; $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) ALTER PROCEDURE raise_info SET SCHEMA public; @@ -346,8 +346,8 @@ ALTER PROCEDURE public.raise_info SET SCHEMA procedure_tests; $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) ALTER PROCEDURE public.raise_info SET SCHEMA procedure_tests; @@ -356,8 +356,8 @@ ALTER PROCEDURE raise_info DEPENDS ON EXTENSION citus $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"ALTER PROCEDURE") - (localhost,57638,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") + (,xxxxx,t,"ALTER PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -365,8 +365,8 @@ DROP PROCEDURE raise_info(text); $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP PROCEDURE") - (localhost,57638,t,"DROP PROCEDURE") + (,xxxxx,t,"DROP PROCEDURE") + (,xxxxx,t,"DROP PROCEDURE") (2 rows) -- Check that an invalid PROCEDURE name is still parsed correctly @@ -375,8 +375,8 @@ DROP PROCEDURE IF EXISTS missing_PROCEDURE(int, text); $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP PROCEDURE") - (localhost,57638,t,"DROP PROCEDURE") + (,xxxxx,t,"DROP PROCEDURE") + (,xxxxx,t,"DROP PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -384,8 +384,8 @@ DROP PROCEDURE IF EXISTS missing_schema.missing_PROCEDURE(int,float); $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP PROCEDURE") - (localhost,57638,t,"DROP PROCEDURE") + (,xxxxx,t,"DROP PROCEDURE") + (,xxxxx,t,"DROP PROCEDURE") (2 rows) SELECT deparse_and_run_on_workers($cmd$ @@ -393,8 +393,8 @@ DROP PROCEDURE IF EXISTS missing_schema.missing_PROCEDURE(int,float) CASCADE; $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP PROCEDURE") - (localhost,57638,t,"DROP PROCEDURE") + (,xxxxx,t,"DROP PROCEDURE") + (,xxxxx,t,"DROP PROCEDURE") (2 rows) -- clear objects @@ -404,7 +404,7 @@ DROP ROLE procedure_role; SELECT run_command_on_workers($$DROP ROLE procedure_role;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") + (,xxxxx,t,"DROP ROLE") + (,xxxxx,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/multi_distribution_metadata.out b/src/test/regress/expected/multi_distribution_metadata.out index 584af69d9..354ebe521 100644 --- a/src/test/regress/expected/multi_distribution_metadata.out +++ b/src/test/regress/expected/multi_distribution_metadata.out @@ -109,21 +109,21 @@ ERROR: could not find valid entry for shard xxxxx SELECT load_shard_placement_array(540001, false); load_shard_placement_array --------------------------------------------------------------------- - {localhost:xxxxx,localhost:xxxxx} + {:xxxxx,:xxxxx} (1 row) -- only one of which is active SELECT load_shard_placement_array(540001, true); load_shard_placement_array --------------------------------------------------------------------- - {localhost:xxxxx} + {:xxxxx} (1 row) -- should see error for non-existent shard SELECT load_shard_placement_array(540001, false); load_shard_placement_array --------------------------------------------------------------------- - {localhost:xxxxx,localhost:xxxxx} + {:xxxxx,:xxxxx} (1 row) -- should see column id of 'name' diff --git a/src/test/regress/expected/multi_drop_extension.out b/src/test/regress/expected/multi_drop_extension.out index 6419ebd1a..14e3989d5 100644 --- a/src/test/regress/expected/multi_drop_extension.out +++ b/src/test/regress/expected/multi_drop_extension.out @@ -19,13 +19,13 @@ DROP EXTENSION citus CASCADE; RESET client_min_messages; CREATE EXTENSION citus; -- re-add the nodes to the cluster -SELECT 1 FROM master_add_node('localhost', :worker_1_port); +SELECT 1 FROM master_add_node('', :worker_1_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index ad633c38f..958e5f529 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -42,7 +42,7 @@ Sort Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem @@ -61,7 +61,7 @@ Sort Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem @@ -96,7 +96,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Tasks Shown": "One of 2", "Tasks": [ { - "Node": "host=localhost port=xxxxx dbname=regression", + "Node": "host= port=xxxxx dbname=", "Remote Plan": [ [ { @@ -171,7 +171,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) One of 2 - host=localhost port=xxxxx dbname=regression + host= port=xxxxx dbname= @@ -240,7 +240,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Task Count: 2 Tasks Shown: "One of 2" Tasks: - - Node: "host=localhost port=xxxxx dbname=regression" + - Node: "host= port=xxxxx dbname=" Remote Plan: - Plan: Node Type: "Aggregate" @@ -268,7 +268,7 @@ Sort Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem @@ -285,7 +285,7 @@ Sort (actual rows=50 loops=1) Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate (actual rows=50 loops=1) Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem (actual rows=6000 loops=1) @@ -299,7 +299,7 @@ Aggregate Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) -> Seq Scan on public.lineitem_290000 lineitem @@ -316,7 +316,7 @@ Limit Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: lineitem.l_quantity @@ -333,7 +333,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Insert on lineitem_290000 citus_table_alias -> Values Scan on "*VALUES*" -- Test update @@ -345,7 +345,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Update on lineitem_290000 lineitem -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 1) @@ -360,7 +360,7 @@ Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Update on lineitem_290000 lineitem (actual rows=0 loops=1) -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (actual rows=0 loops=1) Index Cond: (l_orderkey = 1) @@ -375,7 +375,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Delete on lineitem_290000 lineitem -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 1) @@ -402,7 +402,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ @@ -419,7 +419,7 @@ Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Seq Scan on lineitem_290000 lineitem -- Test having EXPLAIN (COSTS FALSE, VERBOSE TRUE) @@ -433,7 +433,7 @@ Aggregate Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) -> Seq Scan on public.lineitem_290000 lineitem @@ -452,7 +452,7 @@ HashAggregate Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Output: l_quantity, l_quantity Group Key: lineitem.l_quantity @@ -489,7 +489,7 @@ Aggregate Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) @@ -571,7 +571,7 @@ HashAggregate Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> GroupAggregate Group Key: subquery_top.hasdone -> Sort @@ -686,7 +686,7 @@ Sort Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> GroupAggregate Group Key: subquery_top.count_pay -> Sort @@ -785,7 +785,7 @@ Limit Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: (max(users.lastseen)) DESC @@ -814,7 +814,7 @@ Aggregate Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) @@ -832,19 +832,19 @@ Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Update on lineitem_hash_part_360041 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Update on lineitem_hash_part_360042 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Update on lineitem_hash_part_360043 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Update on lineitem_hash_part_360044 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part EXPLAIN (COSTS FALSE) @@ -855,12 +855,12 @@ Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Update on lineitem_hash_part_360041 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part Filter: ((l_orderkey = 1) OR (l_orderkey = 3)) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Update on lineitem_hash_part_360042 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part Filter: ((l_orderkey = 1) OR (l_orderkey = 3)) @@ -871,19 +871,19 @@ Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Delete on lineitem_hash_part_360041 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Delete on lineitem_hash_part_360042 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Delete on lineitem_hash_part_360043 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Delete on lineitem_hash_part_360044 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) @@ -899,12 +899,12 @@ Sort (actual rows=50 loops=1) Task Count: 2 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate (actual rows=50 loops=1) Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem (actual rows=6000 loops=1) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate (actual rows=50 loops=1) Group Key: l_quantity -> Seq Scan on lineitem_290001 lineitem (actual rows=6000 loops=1) @@ -919,7 +919,7 @@ Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Update on lineitem_hash_part_360041 lineitem_hash_part -> Hash Join Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey) @@ -935,7 +935,7 @@ Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Delete on lineitem_hash_part_360041 lineitem_hash_part -> Hash Join Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey) @@ -951,7 +951,7 @@ Aggregate Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) @@ -1128,7 +1128,7 @@ Aggregate Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on lineitem_290000 lineitem -- ensure EXPLAIN EXECUTE doesn't crash @@ -1140,7 +1140,7 @@ Aggregate Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) @@ -1151,7 +1151,7 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5) Index Cond: (l_orderkey = 5) PREPARE real_time_executor_query AS @@ -1162,7 +1162,7 @@ Aggregate Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) @@ -1174,7 +1174,7 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5) Index Cond: (l_orderkey = 5) -- test explain in a transaction with alter table to test we use right connections @@ -1195,7 +1195,7 @@ Custom Scan (Citus INSERT ... SELECT) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Seq Scan on orders_hash_part_360045 orders_hash_part SELECT true AS valid FROM explain_json($$ @@ -1213,7 +1213,7 @@ Custom Scan (Citus INSERT ... SELECT) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Seq Scan on orders_hash_part_360045 orders_hash_part EXPLAIN (COSTS OFF) @@ -1278,7 +1278,7 @@ Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Output: l_orderkey Group Key: lineitem_hash_part.l_orderkey @@ -1291,7 +1291,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Merge Join Output: intermediate_result_1.l_orderkey, intermediate_result.s Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey) diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 896bdedcf..a52d2fe62 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -32,7 +32,7 @@ SELECT datname, current_database(), FROM test.maintenance_worker(); datname | current_database | usename | extowner --------------------------------------------------------------------- - regression | regression | postgres | postgres + regression | regression | | (1 row) -- ensure no objects were created outside pg_catalog @@ -169,12 +169,12 @@ FROM pg_catalog.pg_database d ORDER BY 1; Name | Owner | Access privileges --------------------------------------------------------------------- - postgres | postgres | - regression | postgres | - template0 | postgres | =c/postgres + - | | postgres=CTc/postgres - template1 | postgres | =c/postgres + - | | postgres=CTc/postgres + | | + regression | | + template0 | | =c/ + + | | =CTc/ + template1 | | =c/ + + | | =CTc/ (4 rows) -- We should not distribute table in version mistmatch @@ -270,7 +270,7 @@ SELECT datname, current_database(), FROM test.maintenance_worker(); datname | current_database | usename | extowner --------------------------------------------------------------------- - another | another | postgres | postgres + another | another | | (1 row) -- Test that database with active worker can be dropped. @@ -329,7 +329,7 @@ HINT: You can manually create a database and its extensions on workers. \c - - - :master_port \c another CREATE EXTENSION citus; -SELECT FROM master_add_node('localhost', :worker_1_port); +SELECT FROM master_add_node('', :worker_1_port); -- (1 row) diff --git a/src/test/regress/expected/multi_follower_dml.out b/src/test/regress/expected/multi_follower_dml.out index 08e84a8b4..71a75e90c 100644 --- a/src/test/regress/expected/multi_follower_dml.out +++ b/src/test/regress/expected/multi_follower_dml.out @@ -96,7 +96,7 @@ HINT: Consider using CREATE TEMPORARY TABLE tmp AS SELECT ... and inserting fro -- we shouldn't be able to create local tables CREATE TEMP TABLE local_copy_of_the_table AS SELECT * FROM the_table; ERROR: cannot execute CREATE TABLE AS in a read-only transaction -\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" +\c "port=9070 dbname= options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" -- separate follower formations currently cannot do writes SET citus.writable_standby_coordinator TO on; INSERT INTO the_table (a, b, z) VALUES (1, 2, 3); diff --git a/src/test/regress/expected/multi_follower_select_statements.out b/src/test/regress/expected/multi_follower_select_statements.out index 3f4340d61..af537cde3 100644 --- a/src/test/regress/expected/multi_follower_select_statements.out +++ b/src/test/regress/expected/multi_follower_select_statements.out @@ -1,12 +1,12 @@ \c - - - :master_port -- do some setup -SELECT 1 FROM master_add_node('localhost', :worker_1_port); +SELECT 1 FROM master_add_node('', :worker_1_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -58,12 +58,12 @@ order by s_i_id; -- now, connect to the follower but tell it to use secondary nodes. There are no -- secondary nodes so this should fail. -- (this is :follower_master_port but substitution doesn't work here) -\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" +\c "port=9070 dbname= options='-c\ citus.use_secondary_nodes=always'" SELECT * FROM the_table; ERROR: node group does not have a secondary node -- add the secondary nodes and try again, the SELECT statement should work this time \c - - - :master_port -SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port, +SELECT 1 FROM master_add_node('', :follower_worker_1_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_1_port), noderole => 'secondary'); ?column? @@ -71,7 +71,7 @@ SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port, 1 (1 row) -SELECT 1 FROM master_add_node('localhost', :follower_worker_2_port, +SELECT 1 FROM master_add_node('', :follower_worker_2_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port), noderole => 'secondary'); ?column? @@ -79,7 +79,7 @@ SELECT 1 FROM master_add_node('localhost', :follower_worker_2_port, 1 (1 row) -\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" +\c "port=9070 dbname= options='-c\ citus.use_secondary_nodes=always'" -- now that we've added secondaries this should work SELECT * FROM the_table; a | b @@ -108,13 +108,13 @@ ORDER BY node_name, node_port; node_name | node_port --------------------------------------------------------------------- - localhost | 9071 - localhost | 9072 + | 9071 + | 9072 (2 rows) -- okay, now let's play with nodecluster. If we change the cluster of our follower node -- queries should stat failing again, since there are no worker nodes in the new cluster -\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" +\c "port=9070 dbname= options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" -- there are no secondary nodes in this cluster, so this should fail! SELECT * FROM the_table; ERROR: there is a shard placement in node group but there are no nodes in that group @@ -128,7 +128,7 @@ ERROR: there is a shard placement in node group but there are no nodes in that -- correctly configured, can run select queries involving them \c - - - :master_port UPDATE pg_dist_node SET nodecluster = 'second-cluster' WHERE noderole = 'secondary'; -\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" +\c "port=9070 dbname= options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" SELECT * FROM the_table; a | b --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_foreign_key.out b/src/test/regress/expected/multi_foreign_key.out index 2b612124e..bef381a4a 100644 --- a/src/test/regress/expected/multi_foreign_key.out +++ b/src/test/regress/expected/multi_foreign_key.out @@ -135,7 +135,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referencing_table VALUES(1, 1); ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350129" DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); @@ -144,7 +144,7 @@ INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350129" on table "referencing_table_xxxxxxx" DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referencing_table WHERE ref_id = 1; DELETE FROM referenced_table WHERE id = 1; @@ -229,7 +229,7 @@ INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350257" on table "referencing_table_xxxxxxx" DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; @@ -267,7 +267,7 @@ BEGIN; DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350321" on table "referencing_table_xxxxxxx" DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx DELETE FROM referencing_table WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; @@ -305,7 +305,7 @@ INSERT INTO referencing_table VALUES(1, 1); UPDATE referenced_table SET test_column = 10 WHERE id = 1; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350385" on table "referencing_table_xxxxxxx" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx BEGIN; UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; @@ -345,7 +345,7 @@ BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350449" on table "referencing_table_xxxxxxx" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx UPDATE referencing_table SET id = 20 WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; @@ -406,7 +406,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referencing_table VALUES(null, 2); ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350600" DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT * FROM referencing_table; id | ref_id --------------------------------------------------------------------- @@ -523,7 +523,7 @@ INSERT INTO referencing_table VALUES(1, 1); ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- test foreign constraint with correct conditions DELETE FROM referencing_table WHERE ref_id = 1; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); @@ -532,7 +532,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) INSERT INTO referencing_table VALUES(1, 1); ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); @@ -541,7 +541,7 @@ INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_xxxxxxx" DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referencing_table WHERE ref_id = 1; DELETE FROM referenced_table WHERE id = 1; @@ -571,7 +571,7 @@ INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_xxxxxxx" DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; @@ -595,7 +595,7 @@ BEGIN; DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_xxxxxxx" DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx DELETE FROM referencing_table WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; @@ -617,7 +617,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, UPDATE referenced_table SET test_column = 10 WHERE id = 1; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_xxxxxxx" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx BEGIN; UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; @@ -641,7 +641,7 @@ BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "test_constraint_1350628" on table "referencing_table_xxxxxxx" DETAIL: Key (id, test_column)=(1, 10) is still referenced from table "referencing_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx UPDATE referencing_table SET id = 20 WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; @@ -675,7 +675,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, INSERT INTO referencing_table VALUES(null, 2); ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "test_constraint_1350631" DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SELECT * FROM referencing_table; id | ref_id --------------------------------------------------------------------- @@ -707,7 +707,7 @@ ALTER TABLE cyclic_reference_table2 ADD CONSTRAINT cyclic_constraint2 FOREIGN KE INSERT INTO cyclic_reference_table1 VALUES(1, 1); ERROR: insert or update on table "cyclic_reference_table1_1350632" violates foreign key constraint "cyclic_constraint1_1350632" DETAIL: Key (id, table2_id)=(1, 1) is not present in table "cyclic_reference_table2_1350636". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- proper insertion to table with cyclic dependency BEGIN; INSERT INTO cyclic_reference_table1 VALUES(1, 1); @@ -789,7 +789,7 @@ INSERT INTO self_referencing_table1 VALUES(1, 1, 1); INSERT INTO self_referencing_table1 VALUES(1, 2, 3); ERROR: insert or update on table "self_referencing_table1_1350640" violates foreign key constraint "self_referencing_table1_id_fkey_1350640" DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table1_1350640". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- verify that rows are actually inserted SELECT * FROM self_referencing_table1; id | other_column | other_column_ref @@ -814,7 +814,7 @@ INSERT INTO self_referencing_table2 VALUES(1, 1, 1); INSERT INTO self_referencing_table2 VALUES(1, 2, 3); ERROR: insert or update on table "self_referencing_table2_1350644" violates foreign key constraint "self_referencing_fk_constraint_1350644" DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table2_1350644". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- verify that rows are actually inserted SELECT * FROM self_referencing_table2; id | other_column | other_column_ref diff --git a/src/test/regress/expected/multi_generate_ddl_commands.out b/src/test/regress/expected/multi_generate_ddl_commands.out index aeabd222b..e683e2705 100644 --- a/src/test/regress/expected/multi_generate_ddl_commands.out +++ b/src/test/regress/expected/multi_generate_ddl_commands.out @@ -12,7 +12,7 @@ SELECT master_get_table_ddl_events('simple_table'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.simple_table (first_name text, last_name text, id bigint) - ALTER TABLE public.simple_table OWNER TO postgres + ALTER TABLE public.simple_table OWNER TO (2 rows) -- ensure not-null constraints are propagated @@ -24,7 +24,7 @@ SELECT master_get_table_ddl_events('not_null_table'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.not_null_table (city text, id bigint NOT NULL) - ALTER TABLE public.not_null_table OWNER TO postgres + ALTER TABLE public.not_null_table OWNER TO (2 rows) -- even more complex constraints should be preserved... @@ -37,7 +37,7 @@ SELECT master_get_table_ddl_events('column_constraint_table'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.column_constraint_table (first_name text, last_name text, age integer, CONSTRAINT non_negative_age CHECK (age >= 0)) - ALTER TABLE public.column_constraint_table OWNER TO postgres + ALTER TABLE public.column_constraint_table OWNER TO (2 rows) -- including table constraints @@ -51,7 +51,7 @@ SELECT master_get_table_ddl_events('table_constraint_table'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.table_constraint_table (bid_item_id bigint, min_bid numeric NOT NULL, max_bid numeric NOT NULL, CONSTRAINT bids_ordered CHECK (min_bid > max_bid)) - ALTER TABLE public.table_constraint_table OWNER TO postgres + ALTER TABLE public.table_constraint_table OWNER TO (2 rows) -- default values are supported @@ -63,7 +63,7 @@ SELECT master_get_table_ddl_events('default_value_table'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.default_value_table (name text, price numeric DEFAULT 0.00) - ALTER TABLE public.default_value_table OWNER TO postgres + ALTER TABLE public.default_value_table OWNER TO (2 rows) -- of course primary keys work... @@ -76,7 +76,7 @@ SELECT master_get_table_ddl_events('pkey_table'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.pkey_table (first_name text, last_name text, id bigint NOT NULL) - ALTER TABLE public.pkey_table OWNER TO postgres + ALTER TABLE public.pkey_table OWNER TO ALTER TABLE public.pkey_table ADD CONSTRAINT pkey_table_pkey PRIMARY KEY (id) (3 rows) @@ -89,7 +89,7 @@ SELECT master_get_table_ddl_events('unique_table'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.unique_table (user_id bigint NOT NULL, username text NOT NULL) - ALTER TABLE public.unique_table OWNER TO postgres + ALTER TABLE public.unique_table OWNER TO ALTER TABLE public.unique_table ADD CONSTRAINT unique_table_username_key UNIQUE (username) (3 rows) @@ -104,7 +104,7 @@ SELECT master_get_table_ddl_events('clustered_table'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.clustered_table (data json NOT NULL, received_at timestamp without time zone NOT NULL) - ALTER TABLE public.clustered_table OWNER TO postgres + ALTER TABLE public.clustered_table OWNER TO CREATE INDEX clustered_time_idx ON public.clustered_table USING btree (received_at) ALTER TABLE public.clustered_table CLUSTER ON clustered_time_idx (4 rows) @@ -127,7 +127,7 @@ SELECT master_get_table_ddl_events('fiddly_table'); --------------------------------------------------------------------- CREATE TABLE public.fiddly_table (hostname character(255) NOT NULL, os character(255) NOT NULL, ip_addr inet NOT NULL, traceroute text NOT NULL) ALTER TABLE ONLY public.fiddly_table ALTER COLUMN hostname SET STORAGE PLAIN, ALTER COLUMN os SET STORAGE MAIN, ALTER COLUMN ip_addr SET STORAGE EXTENDED, ALTER COLUMN ip_addr SET STATISTICS 500, ALTER COLUMN traceroute SET STORAGE EXTERNAL - ALTER TABLE public.fiddly_table OWNER TO postgres + ALTER TABLE public.fiddly_table OWNER TO (3 rows) -- test foreign tables using fake FDW @@ -165,7 +165,7 @@ NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined --------------------------------------------------------------------- CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw CREATE FOREIGN TABLE public.renamed_foreign_table (id bigint NOT NULL, rename_name character(8) DEFAULT ''::text NOT NULL) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true') - ALTER TABLE public.renamed_foreign_table OWNER TO postgres + ALTER TABLE public.renamed_foreign_table OWNER TO (3 rows) -- propagating views is not supported diff --git a/src/test/regress/expected/multi_having_pushdown.out b/src/test/regress/expected/multi_having_pushdown.out index 7c15c3976..68a4b0691 100644 --- a/src/test/regress/expected/multi_having_pushdown.out +++ b/src/test/regress/expected/multi_having_pushdown.out @@ -31,7 +31,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: (sum((l_extendedprice * l_discount))) DESC, l_orderkey @@ -59,7 +59,7 @@ EXPLAIN (COSTS FALSE) Task Count: 2 Tasks Shown: One of 2 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_orderkey -> Seq Scan on lineitem_290000 lineitem @@ -83,7 +83,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_shipmode -> Seq Scan on lineitem_hash_590000 lineitem_hash @@ -104,7 +104,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: (sum((l_extendedprice * l_discount))) DESC, l_shipmode, l_orderkey @@ -130,7 +130,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: (sum((lineitem_hash.l_extendedprice * lineitem_hash.l_discount))) DESC @@ -162,7 +162,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: lineitem_hash.l_shipmode, orders_hash.o_clerk -> Hash Join diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index 7d3515336..466d21b25 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -164,7 +164,7 @@ CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(a); ERROR: creating unique indexes on append-partitioned tables is currently unsupported CREATE UNIQUE INDEX try_unique_append_index_a_b ON index_test_append(a,b); ERROR: creating unique indexes on append-partitioned tables is currently unsupported --- Verify that we error out in case of postgres errors on supported statement +-- Verify that we error out in case of errors on supported statement -- types. CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey); ERROR: relation "lineitem_orderkey_index" already exists diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out index c2697bbe6..7f6a04c4b 100644 --- a/src/test/regress/expected/multi_insert_select.out +++ b/src/test/regress/expected/multi_insert_select.out @@ -1191,7 +1191,7 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: performing repartitioned INSERT ... SELECT DEBUG: partitioning SELECT query by column index 0 with name 'user_id' ERROR: the partition column value cannot be NULL -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx INSERT INTO raw_events_second (user_id) SELECT user_id * 2 @@ -1240,7 +1240,7 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: performing repartitioned INSERT ... SELECT DEBUG: partitioning SELECT query by column index 0 with name 'user_id' ERROR: the partition column value cannot be NULL -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx INSERT INTO agg_events (value_3_agg, value_4_agg, @@ -1261,7 +1261,7 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: performing repartitioned INSERT ... SELECT DEBUG: partitioning SELECT query by column index 0 with name 'user_id' ERROR: the partition column value cannot be NULL -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- tables should be co-located INSERT INTO agg_events (user_id) SELECT diff --git a/src/test/regress/expected/multi_insert_select_conflict.out b/src/test/regress/expected/multi_insert_select_conflict.out index d069cf7d3..c15c958d2 100644 --- a/src/test/regress/expected/multi_insert_select_conflict.out +++ b/src/test/regress/expected/multi_insert_select_conflict.out @@ -322,7 +322,7 @@ NOTICE: truncate cascades to table "target_table" FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *; ERROR: insert or update on table "target_table_xxxxxxx" violates foreign key constraint "fkey_xxxxxxx" DETAIL: Key (col_1)=(X) is not present in table "test_ref_table_xxxxxxx". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx ROLLBACK; BEGIN; DELETE FROM test_ref_table WHERE key > 10; diff --git a/src/test/regress/expected/multi_limit_clause.out b/src/test/regress/expected/multi_limit_clause.out index 6d891ddde..5a118bb6f 100644 --- a/src/test/regress/expected/multi_limit_clause.out +++ b/src/test/regress/expected/multi_limit_clause.out @@ -351,7 +351,7 @@ DEBUG: push down of limit count: 5 -- Don't push down limit when there is const expression in distinct on -- even if there is a group by on the expression --- This is due to fact that postgres removes (1+1) from distinct on +-- This is due to fact that removes (1+1) from distinct on -- clause but keeps it in group by list. SELECT DISTINCT ON (l_linenumber, 1+1, l_linenumber) l_orderkey, l_linenumber diff --git a/src/test/regress/expected/multi_master_protocol.out b/src/test/regress/expected/multi_master_protocol.out index d38a5865d..81ecccdbe 100644 --- a/src/test/regress/expected/multi_master_protocol.out +++ b/src/test/regress/expected/multi_master_protocol.out @@ -14,7 +14,7 @@ SELECT * FROM master_get_table_ddl_events('lineitem') order by 1; master_get_table_ddl_events --------------------------------------------------------------------- ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber) - ALTER TABLE public.lineitem OWNER TO postgres + ALTER TABLE public.lineitem OWNER TO CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) CREATE TABLE public.lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL) (4 rows) @@ -28,7 +28,7 @@ SELECT * FROM master_get_new_shardid(); SELECT * FROM master_get_active_worker_nodes(); node_name | node_port --------------------------------------------------------------------- - localhost | 57638 - localhost | 57637 + | xxxxx + | xxxxx (2 rows) diff --git a/src/test/regress/expected/multi_metadata_attributes.out b/src/test/regress/expected/multi_metadata_attributes.out index 91d927c18..189f5866f 100644 --- a/src/test/regress/expected/multi_metadata_attributes.out +++ b/src/test/regress/expected/multi_metadata_attributes.out @@ -1,6 +1,6 @@ -- if the output of following query changes, we might need to change -- some heap_getattr() calls to heap_deform_tuple(). This errors out in --- postgres versions before 11. If this test fails check out +-- versions before 11. If this test fails check out -- https://github.com/citusdata/citus/pull/2464 for an explanation of what to -- do. Once you used the new code for the table you can add it to the NOT IN -- part of the query so new changes to it won't affect this test. diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 31cd05e43..910a12c51 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -27,7 +27,7 @@ SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s'; SELECT unnest(master_metadata_snapshot()) order by 1; unnest --------------------------------------------------------------------- - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') + INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition TRUNCATE pg_dist_node CASCADE (3 rows) @@ -53,12 +53,12 @@ UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::re SELECT unnest(master_metadata_snapshot()) order by 1; unnest --------------------------------------------------------------------- - ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres + ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE public.mx_test_table OWNER TO postgres - ALTER TABLE public.mx_test_table OWNER TO postgres + ALTER TABLE public.mx_test_table OWNER TO + ALTER TABLE public.mx_test_table OWNER TO CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL) - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') + INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's') INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') @@ -73,13 +73,13 @@ CREATE INDEX mx_index ON mx_test_table(col_2); SELECT unnest(master_metadata_snapshot()) order by 1; unnest --------------------------------------------------------------------- - ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres + ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE public.mx_test_table OWNER TO postgres - ALTER TABLE public.mx_test_table OWNER TO postgres + ALTER TABLE public.mx_test_table OWNER TO + ALTER TABLE public.mx_test_table OWNER TO CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2) CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL) - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') + INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's') INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') @@ -95,13 +95,13 @@ ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; SELECT unnest(master_metadata_snapshot()) order by 1; unnest --------------------------------------------------------------------- - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres + ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres + ALTER TABLE mx_testing_schema.mx_test_table OWNER TO + ALTER TABLE mx_testing_schema.mx_test_table OWNER TO CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL) - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') + INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's') INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') @@ -123,13 +123,13 @@ UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table' SELECT unnest(master_metadata_snapshot()) order by 1; unnest --------------------------------------------------------------------- - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres + ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres + ALTER TABLE mx_testing_schema.mx_test_table OWNER TO + ALTER TABLE mx_testing_schema.mx_test_table OWNER TO CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL) - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') + INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's') INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') @@ -144,13 +144,13 @@ UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_tabl SELECT unnest(master_metadata_snapshot()) order by 1; unnest --------------------------------------------------------------------- - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres + ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres + ALTER TABLE mx_testing_schema.mx_test_table OWNER TO + ALTER TABLE mx_testing_schema.mx_test_table OWNER TO CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL) - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') + INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, '', xxxxx, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's') INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') @@ -170,13 +170,13 @@ SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; -- Ensure it works when run on a secondary node SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset -SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); +SELECT master_add_node('', 8888, groupid => :worker_1_group, noderole => 'secondary'); master_add_node --------------------------------------------------------------------- 4 (1 row) -SELECT start_metadata_sync_to_node('localhost', 8888); +SELECT start_metadata_sync_to_node('', 8888); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -188,7 +188,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; t (1 row) -SELECT stop_metadata_sync_to_node('localhost', 8888); +SELECT stop_metadata_sync_to_node('', 8888); stop_metadata_sync_to_node --------------------------------------------------------------------- @@ -201,20 +201,20 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; (1 row) -- Add a node to another cluster to make sure it's also synced -SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); +SELECT master_add_secondary_node('', 8889, '', :worker_1_port, nodecluster => 'second-cluster'); master_add_secondary_node --------------------------------------------------------------------- 5 (1 row) -- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; +SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='' AND nodeport=:worker_1_port; nodeid | hasmetadata --------------------------------------------------------------------- 1 | t @@ -231,10 +231,10 @@ SELECT * FROM pg_dist_local_group; SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- - 1 | 1 | localhost | 57637 | default | t | t | primary | default | f | t - 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t - 4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t - 5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t + 1 | 1 | | xxxxx | default | t | t | primary | default | f | t + 2 | 2 | | xxxxx | default | f | t | primary | default | f | t + 4 | 1 | | 8888 | default | f | t | secondary | default | f | t + 5 | 1 | | 8889 | default | f | t | secondary | second-cluster | f | t (4 rows) SELECT * FROM pg_dist_partition ORDER BY logicalrelid; @@ -259,14 +259,14 @@ SELECT * FROM pg_dist_shard ORDER BY shardid; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - 1310000 | 1 | 0 | localhost | 57637 | 100000 - 1310001 | 1 | 0 | localhost | 57638 | 100001 - 1310002 | 1 | 0 | localhost | 57637 | 100002 - 1310003 | 1 | 0 | localhost | 57638 | 100003 - 1310004 | 1 | 0 | localhost | 57637 | 100004 - 1310005 | 1 | 0 | localhost | 57638 | 100005 - 1310006 | 1 | 0 | localhost | 57637 | 100006 - 1310007 | 1 | 0 | localhost | 57638 | 100007 + 1310000 | 1 | 0 | | xxxxx | 100000 + 1310001 | 1 | 0 | | xxxxx | 100001 + 1310002 | 1 | 0 | | xxxxx | 100002 + 1310003 | 1 | 0 | | xxxxx | 100003 + 1310004 | 1 | 0 | | xxxxx | 100004 + 1310005 | 1 | 0 | | xxxxx | 100005 + 1310006 | 1 | 0 | | xxxxx | 100006 + 1310007 | 1 | 0 | | xxxxx | 100007 (8 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; @@ -326,7 +326,7 @@ SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -347,13 +347,13 @@ RESET citus.shard_replication_factor; RESET citus.replication_model; -- Check that repeated calls to start_metadata_sync_to_node has no side effects \c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -369,10 +369,10 @@ SELECT * FROM pg_dist_local_group; SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- - 1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t - 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t - 4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t - 5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t + 1 | 1 | | xxxxx | default | t | t | primary | default | t | t + 2 | 2 | | xxxxx | default | f | t | primary | default | f | t + 4 | 1 | | 8888 | default | f | t | secondary | default | f | t + 5 | 1 | | 8889 | default | f | t | secondary | second-cluster | f | t (4 rows) SELECT * FROM pg_dist_partition ORDER BY logicalrelid; @@ -397,14 +397,14 @@ SELECT * FROM pg_dist_shard ORDER BY shardid; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- - 1310000 | 1 | 0 | localhost | 57637 | 100000 - 1310001 | 1 | 0 | localhost | 57638 | 100001 - 1310002 | 1 | 0 | localhost | 57637 | 100002 - 1310003 | 1 | 0 | localhost | 57638 | 100003 - 1310004 | 1 | 0 | localhost | 57637 | 100004 - 1310005 | 1 | 0 | localhost | 57638 | 100005 - 1310006 | 1 | 0 | localhost | 57637 | 100006 - 1310007 | 1 | 0 | localhost | 57638 | 100007 + 1310000 | 1 | 0 | | xxxxx | 100000 + 1310001 | 1 | 0 | | xxxxx | 100001 + 1310002 | 1 | 0 | | xxxxx | 100002 + 1310003 | 1 | 0 | | xxxxx | 100003 + 1310004 | 1 | 0 | | xxxxx | 100004 + 1310005 | 1 | 0 | | xxxxx | 100005 + 1310006 | 1 | 0 | | xxxxx | 100006 + 1310007 | 1 | 0 | | xxxxx | 100007 (8 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; @@ -438,7 +438,7 @@ SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table': -- Make sure that start_metadata_sync_to_node cannot be called inside a transaction \c - - - :master_port BEGIN; -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('', :worker_2_port); ERROR: start_metadata_sync_to_node cannot run inside a transaction block ROLLBACK; SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; @@ -451,7 +451,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -504,7 +504,7 @@ SELECT * FROM mx_query_test ORDER BY a; DROP TABLE mx_query_test; -- Check that stop_metadata_sync_to_node function sets hasmetadata of the node to false \c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -516,7 +516,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; t (1 row) -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- @@ -529,7 +529,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; (1 row) -- Test DDL propagation in MX tables -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -628,16 +628,16 @@ ORDER BY logicalrelid, shardid; logicalrelid | shardid | nodename | nodeport --------------------------------------------------------------------- - mx_test_schema_1.mx_table_1 | 1310020 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310021 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310023 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310024 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310025 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310026 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310027 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310028 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310029 | localhost | 57637 + mx_test_schema_1.mx_table_1 | 1310020 | | xxxxx + mx_test_schema_1.mx_table_1 | 1310021 | | xxxxx + mx_test_schema_1.mx_table_1 | 1310022 | | xxxxx + mx_test_schema_1.mx_table_1 | 1310023 | | xxxxx + mx_test_schema_1.mx_table_1 | 1310024 | | xxxxx + mx_test_schema_2.mx_table_2 | 1310025 | | xxxxx + mx_test_schema_2.mx_table_2 | 1310026 | | xxxxx + mx_test_schema_2.mx_table_2 | 1310027 | | xxxxx + mx_test_schema_2.mx_table_2 | 1310028 | | xxxxx + mx_test_schema_2.mx_table_2 | 1310029 | | xxxxx (10 rows) -- Check that metadata of MX tables exist on the metadata worker @@ -647,8 +647,8 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - mx_test_schema_1 | mx_table_1 | table | postgres - mx_test_schema_2 | mx_table_2 | table | postgres + mx_test_schema_1 | mx_table_1 | table | + mx_test_schema_2 | mx_table_2 | table | (2 rows) -- Check that table metadata are created @@ -677,16 +677,16 @@ ORDER BY logicalrelid, shardid; logicalrelid | shardid | nodename | nodeport --------------------------------------------------------------------- - mx_test_schema_1.mx_table_1 | 1310020 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310021 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310023 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310024 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310025 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310026 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310027 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310028 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310029 | localhost | 57637 + mx_test_schema_1.mx_table_1 | 1310020 | | xxxxx + mx_test_schema_1.mx_table_1 | 1310021 | | xxxxx + mx_test_schema_1.mx_table_1 | 1310022 | | xxxxx + mx_test_schema_1.mx_table_1 | 1310023 | | xxxxx + mx_test_schema_1.mx_table_1 | 1310024 | | xxxxx + mx_test_schema_2.mx_table_2 | 1310025 | | xxxxx + mx_test_schema_2.mx_table_2 | 1310026 | | xxxxx + mx_test_schema_2.mx_table_2 | 1310027 | | xxxxx + mx_test_schema_2.mx_table_2 | 1310028 | | xxxxx + mx_test_schema_2.mx_table_2 | 1310029 | | xxxxx (10 rows) -- Check that metadata of MX tables don't exist on the non-metadata worker @@ -918,13 +918,13 @@ DROP TABLE mx_temp_drop_test; SET citus.shard_count TO 3; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +SELECT stop_metadata_sync_to_node('', :worker_2_port); stop_metadata_sync_to_node --------------------------------------------------------------------- @@ -938,7 +938,7 @@ SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -979,14 +979,14 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_ List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | mx_table_with_sequence_b_seq | sequence | postgres + public | mx_table_with_sequence_b_seq | sequence | (1 row) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | mx_table_with_sequence_c_seq | sequence | postgres + public | mx_table_with_sequence_c_seq | sequence | (1 row) -- Check that the sequences created on the metadata worker as well @@ -1003,14 +1003,14 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_ List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | mx_table_with_sequence_b_seq | sequence | postgres + public | mx_table_with_sequence_b_seq | sequence | (1 row) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | mx_table_with_sequence_c_seq | sequence | postgres + public | mx_table_with_sequence_c_seq | sequence | (1 row) -- Check that the sequences on the worker have their own space @@ -1028,7 +1028,7 @@ SELECT nextval('mx_table_with_sequence_c_seq'); -- Check that adding a new metadata node sets the sequence space correctly \c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('', :worker_2_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -1053,14 +1053,14 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_ List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | mx_table_with_sequence_b_seq | sequence | postgres + public | mx_table_with_sequence_b_seq | sequence | (1 row) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | mx_table_with_sequence_c_seq | sequence | postgres + public | mx_table_with_sequence_c_seq | sequence | (1 row) SELECT nextval('mx_table_with_sequence_b_seq'); @@ -1143,7 +1143,7 @@ CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition; DELETE FROM pg_dist_placement; DELETE FROM pg_dist_partition; SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -1172,14 +1172,14 @@ SELECT create_distributed_table('mx_table', 'a'); (1 row) -\c - postgres - :master_port -SELECT master_add_node('localhost', :worker_2_port); +\c - - :master_port +SELECT master_add_node('', :worker_2_port); master_add_node --------------------------------------------------------------------- 6 (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('', :worker_2_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -1222,7 +1222,7 @@ SELECT * FROM mx_table ORDER BY a; \c - mx_user - :master_port DROP TABLE mx_table; -- put the metadata back into a consistent state -\c - postgres - :master_port +\c - - :master_port INSERT INTO pg_dist_placement SELECT * FROM pg_dist_placement_temp; INSERT INTO pg_dist_partition SELECT * FROM pg_dist_partition_temp; DROP TABLE pg_dist_placement_temp; @@ -1239,7 +1239,7 @@ UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; \c - - - :master_port -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +SELECT stop_metadata_sync_to_node('', :worker_2_port); stop_metadata_sync_to_node --------------------------------------------------------------------- @@ -1271,7 +1271,7 @@ SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | mx_ref | table | postgres + public | mx_ref | table | (1 row) \c - - - :worker_1_port @@ -1279,7 +1279,7 @@ SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | mx_ref | table | postgres + public | mx_ref | table | (1 row) SELECT @@ -1294,8 +1294,8 @@ ORDER BY nodeport; logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport --------------------------------------------------------------------- - mx_ref | n | t | 1310072 | 100072 | localhost | 57637 - mx_ref | n | t | 1310072 | 100073 | localhost | 57638 + mx_ref | n | t | 1310072 | 100072 | | xxxxx + mx_ref | n | t | 1310072 | 100073 | | xxxxx (2 rows) SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset @@ -1362,7 +1362,7 @@ CREATE TABLE tmp_placement AS SELECT * FROM pg_dist_placement WHERE groupid = :old_worker_2_group; DELETE FROM pg_dist_placement WHERE groupid = :old_worker_2_group; -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -1380,7 +1380,7 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; shardid | nodename | nodeport --------------------------------------------------------------------- - 1310073 | localhost | 57637 + 1310073 | | xxxxx (1 row) \c - - - :worker_1_port @@ -1389,12 +1389,12 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; shardid | nodename | nodeport --------------------------------------------------------------------- - 1310073 | localhost | 57637 + 1310073 | | xxxxx (1 row) \c - - - :master_port -SELECT master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "mx_ref" to the node localhost:xxxxx +SELECT master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "mx_ref" to the node :xxxxx master_add_node --------------------------------------------------------------------- 7 @@ -1406,8 +1406,8 @@ WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; shardid | nodename | nodeport --------------------------------------------------------------------- - 1310073 | localhost | 57637 - 1310073 | localhost | 57638 + 1310073 | | xxxxx + 1310073 | | xxxxx (2 rows) \c - - - :worker_1_port @@ -1417,8 +1417,8 @@ WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; shardid | nodename | nodeport --------------------------------------------------------------------- - 1310073 | localhost | 57637 - 1310073 | localhost | 57638 + 1310073 | | xxxxx + 1310073 | | xxxxx (2 rows) -- Get the metadata back into a consistent state @@ -1440,7 +1440,7 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; t (1 row) -\c - postgres - :worker_1_port +\c - - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; shouldhaveshards --------------------------------------------------------------------- @@ -1449,7 +1449,7 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; -- Check that setting shouldhaveshards to false is correctly transferred to other mx nodes \c - - - :master_port -SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', false); +SELECT * from master_set_node_property('', 8888, 'shouldhaveshards', false); master_set_node_property --------------------------------------------------------------------- @@ -1461,7 +1461,7 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; f (1 row) -\c - postgres - :worker_1_port +\c - - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; shouldhaveshards --------------------------------------------------------------------- @@ -1469,8 +1469,8 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; (1 row) -- Check that setting shouldhaveshards to true is correctly transferred to other mx nodes -\c - postgres - :master_port -SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', true); +\c - - :master_port +SELECT * from master_set_node_property('', 8888, 'shouldhaveshards', true); master_set_node_property --------------------------------------------------------------------- @@ -1482,7 +1482,7 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; t (1 row) -\c - postgres - :worker_1_port +\c - - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; shouldhaveshards --------------------------------------------------------------------- @@ -1520,40 +1520,40 @@ SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport=:worker_1_po CREATE TABLE dist_table_2(a int); SELECT create_distributed_table('dist_table_2', 'a'); -ERROR: localhost:xxxxx is a metadata node, but is out of sync +ERROR: :xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT create_reference_table('dist_table_2'); -ERROR: localhost:xxxxx is a metadata node, but is out of sync +ERROR: :xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. ALTER TABLE dist_table_1 ADD COLUMN b int; -ERROR: localhost:xxxxx is a metadata node, but is out of sync +ERROR: :xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. -SELECT master_add_node('localhost', :master_port, groupid => 0); -ERROR: localhost:xxxxx is a metadata node, but is out of sync +SELECT master_add_node('', :master_port, groupid => 0); +ERROR: :xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. -SELECT master_disable_node('localhost', :worker_1_port); -ERROR: Disabling localhost:xxxxx failed -DETAIL: localhost:xxxxx is a metadata node, but is out of sync +SELECT master_disable_node('', :worker_1_port); +ERROR: Disabling :xxxxx failed +DETAIL: :xxxxx is a metadata node, but is out of sync HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. -SELECT master_disable_node('localhost', :worker_2_port); -ERROR: Disabling localhost:xxxxx failed -DETAIL: localhost:xxxxx is a metadata node, but is out of sync +SELECT master_disable_node('', :worker_2_port); +ERROR: Disabling :xxxxx failed +DETAIL: :xxxxx is a metadata node, but is out of sync HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. -SELECT master_remove_node('localhost', :worker_1_port); -ERROR: localhost:xxxxx is a metadata node, but is out of sync +SELECT master_remove_node('', :worker_1_port); +ERROR: :xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. -SELECT master_remove_node('localhost', :worker_2_port); -ERROR: localhost:xxxxx is a metadata node, but is out of sync +SELECT master_remove_node('', :worker_2_port); +ERROR: :xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. -- master_update_node should succeed SELECT nodeid AS worker_2_nodeid FROM pg_dist_node WHERE nodeport=:worker_2_port \gset -SELECT master_update_node(:worker_2_nodeid, 'localhost', 4444); +SELECT master_update_node(:worker_2_nodeid, '', 4444); master_update_node --------------------------------------------------------------------- (1 row) -SELECT master_update_node(:worker_2_nodeid, 'localhost', :worker_2_port); +SELECT master_update_node(:worker_2_nodeid, '', :worker_2_port); master_update_node --------------------------------------------------------------------- @@ -1569,13 +1569,13 @@ SELECT pg_reload_conf(); UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port; -- Cleanup -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +SELECT stop_metadata_sync_to_node('', :worker_2_port); stop_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_modifications.out b/src/test/regress/expected/multi_modifications.out index 92b3de6f0..ccb873097 100644 --- a/src/test/regress/expected/multi_modifications.out +++ b/src/test/regress/expected/multi_modifications.out @@ -405,7 +405,7 @@ SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid -AND sp.nodename = 'localhost' +AND sp.nodename = '' AND sp.nodeport = :worker_1_port AND sp.shardstate = 1 AND s.logicalrelid = 'limit_orders'::regclass; diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index 7af0de8d9..7753ac3ab 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -153,7 +153,7 @@ INSERT INTO labs VALUES (6, 'Bell Labs'); INSERT INTO researchers VALUES (9, 6, 'Leslie Lamport'); ERROR: duplicate key value violates unique constraint "avoid_name_confusion_idx_1200001" DETAIL: Key (lab_id, name)=(6, Leslie Lamport) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx ABORT; -- SELECTs may occur after a modification: First check that selecting -- from the modified node works. @@ -171,7 +171,7 @@ BEGIN; UPDATE pg_dist_shard_placement AS sp SET shardstate = 3 FROM pg_dist_shard AS s WHERE sp.shardid = s.shardid -AND sp.nodename = 'localhost' +AND sp.nodename = '' AND sp.nodeport = :worker_1_port AND s.logicalrelid = 'researchers'::regclass; INSERT INTO labs VALUES (6, 'Bell Labs'); @@ -352,8 +352,8 @@ $rli$ LANGUAGE plpgsql;') ORDER BY nodeport; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | CREATE FUNCTION - localhost | 57638 | t | CREATE FUNCTION + | xxxxx | t | CREATE FUNCTION + | xxxxx | t | CREATE FUNCTION (2 rows) -- register after insert trigger @@ -361,10 +361,10 @@ SELECT * FROM run_command_on_placements('researchers', 'CREATE CONSTRAINT TRIGGE ORDER BY nodeport, shardid; nodename | nodeport | shardid | success | result --------------------------------------------------------------------- - localhost | 57637 | 1200000 | t | CREATE TRIGGER - localhost | 57637 | 1200001 | t | CREATE TRIGGER - localhost | 57638 | 1200000 | t | CREATE TRIGGER - localhost | 57638 | 1200001 | t | CREATE TRIGGER + | xxxxx | 1200000 | t | CREATE TRIGGER + | xxxxx | 1200001 | t | CREATE TRIGGER + | xxxxx | 1200000 | t | CREATE TRIGGER + | xxxxx | 1200001 | t | CREATE TRIGGER (4 rows) -- hide postgresql version dependend messages for next test only @@ -377,9 +377,9 @@ DELETE FROM researchers WHERE lab_id = 6; \copy researchers FROM STDIN delimiter ',' COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: illegal value -WARNING: failed to commit transaction on localhost:xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node \unset VERBOSITY @@ -397,18 +397,18 @@ SELECT * from run_command_on_placements('researchers', 'drop trigger reject_larg ORDER BY nodeport, shardid; nodename | nodeport | shardid | success | result --------------------------------------------------------------------- - localhost | 57637 | 1200000 | t | DROP TRIGGER - localhost | 57637 | 1200001 | t | DROP TRIGGER - localhost | 57638 | 1200000 | t | DROP TRIGGER - localhost | 57638 | 1200001 | t | DROP TRIGGER + | xxxxx | 1200000 | t | DROP TRIGGER + | xxxxx | 1200001 | t | DROP TRIGGER + | xxxxx | 1200000 | t | DROP TRIGGER + | xxxxx | 1200001 | t | DROP TRIGGER (4 rows) SELECT * FROM run_command_on_workers('drop function reject_large_id()') ORDER BY nodeport; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | DROP FUNCTION - localhost | 57638 | t | DROP FUNCTION + | xxxxx | t | DROP FUNCTION + | xxxxx | t | DROP FUNCTION (2 rows) -- ALTER and copy are compatible @@ -485,7 +485,7 @@ INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (1, 'orange'); ERROR: duplicate key value violates unique constraint "objects_pkey_1200003" DETAIL: Key (id)=(X) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COMMIT; -- data shouldn't have persisted... SELECT * FROM objects WHERE id = 1; @@ -545,7 +545,7 @@ SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid -AND sp.nodename = 'localhost' +AND sp.nodename = '' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; @@ -620,7 +620,7 @@ INSERT INTO objects VALUES (2, 'BAD'); INSERT INTO labs VALUES (9, 'Umbrella Corporation'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:xxxxx +WARNING: failed to commit transaction on :xxxxx -- data should be persisted SELECT * FROM objects WHERE id = 2; id | name @@ -633,7 +633,7 @@ SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid -AND sp.nodename = 'localhost' +AND sp.nodename = '' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; @@ -663,9 +663,9 @@ INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: illegal value -WARNING: failed to commit transaction on localhost:xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: could not commit transaction for shard xxxxx on any active node WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node @@ -704,7 +704,7 @@ INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: could not commit transaction for shard xxxxx on any active node \set VERBOSITY default -- data to objects should be persisted, but labs should not... @@ -1177,15 +1177,15 @@ ALTER USER test_user RENAME TO test_user_new; \c - test_user - :master_port -- should fail since the worker doesn't have test_user anymore INSERT INTO reference_failure_test VALUES (1, '1'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx -- the same as the above, but wrapped within a transaction BEGIN; INSERT INTO reference_failure_test VALUES (1, '1'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx COMMIT; BEGIN; COPY reference_failure_test FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx COMMIT; -- show that no data go through the table and shard states are good SET client_min_messages to 'ERROR'; @@ -1210,8 +1210,8 @@ ORDER BY s.logicalrelid, sp.shardstate; BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -WARNING: connection error: localhost:xxxxx -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx +WARNING: connection error: :xxxxx -- some placements are invalid before abort SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) @@ -1219,21 +1219,21 @@ WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1200016 | 3 | localhost | 57637 - 1200016 | 1 | localhost | 57638 - 1200017 | 1 | localhost | 57637 - 1200017 | 1 | localhost | 57638 - 1200018 | 1 | localhost | 57637 - 1200018 | 1 | localhost | 57638 - 1200019 | 3 | localhost | 57637 - 1200019 | 1 | localhost | 57638 + 1200016 | 3 | | xxxxx + 1200016 | 1 | | xxxxx + 1200017 | 1 | | xxxxx + 1200017 | 1 | | xxxxx + 1200018 | 1 | | xxxxx + 1200018 | 1 | | xxxxx + 1200019 | 3 | | xxxxx + 1200019 | 1 | | xxxxx (8 rows) ABORT; -- verify nothing is inserted SELECT count(*) FROM numbers_hash_failure_test; -WARNING: connection error: localhost:xxxxx -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx +WARNING: connection error: :xxxxx count --------------------------------------------------------------------- 0 @@ -1246,20 +1246,20 @@ WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1200016 | 1 | localhost | 57637 - 1200016 | 1 | localhost | 57638 - 1200017 | 1 | localhost | 57637 - 1200017 | 1 | localhost | 57638 - 1200018 | 1 | localhost | 57637 - 1200018 | 1 | localhost | 57638 - 1200019 | 1 | localhost | 57637 - 1200019 | 1 | localhost | 57638 + 1200016 | 1 | | xxxxx + 1200016 | 1 | | xxxxx + 1200017 | 1 | | xxxxx + 1200017 | 1 | | xxxxx + 1200018 | 1 | | xxxxx + 1200018 | 1 | | xxxxx + 1200019 | 1 | | xxxxx + 1200019 | 1 | | xxxxx (8 rows) BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -WARNING: connection error: localhost:xxxxx -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx +WARNING: connection error: :xxxxx -- check shard states before commit SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) @@ -1267,14 +1267,14 @@ WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1200016 | 3 | localhost | 57637 - 1200016 | 1 | localhost | 57638 - 1200017 | 1 | localhost | 57637 - 1200017 | 1 | localhost | 57638 - 1200018 | 1 | localhost | 57637 - 1200018 | 1 | localhost | 57638 - 1200019 | 3 | localhost | 57637 - 1200019 | 1 | localhost | 57638 + 1200016 | 3 | | xxxxx + 1200016 | 1 | | xxxxx + 1200017 | 1 | | xxxxx + 1200017 | 1 | | xxxxx + 1200018 | 1 | | xxxxx + 1200018 | 1 | | xxxxx + 1200019 | 3 | | xxxxx + 1200019 | 1 | | xxxxx (8 rows) COMMIT; @@ -1285,20 +1285,20 @@ WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1200016 | 3 | localhost | 57637 - 1200016 | 1 | localhost | 57638 - 1200017 | 1 | localhost | 57637 - 1200017 | 1 | localhost | 57638 - 1200018 | 1 | localhost | 57637 - 1200018 | 1 | localhost | 57638 - 1200019 | 3 | localhost | 57637 - 1200019 | 1 | localhost | 57638 + 1200016 | 3 | | xxxxx + 1200016 | 1 | | xxxxx + 1200017 | 1 | | xxxxx + 1200017 | 1 | | xxxxx + 1200018 | 1 | | xxxxx + 1200018 | 1 | | xxxxx + 1200019 | 3 | | xxxxx + 1200019 | 1 | | xxxxx (8 rows) -- verify data is inserted SELECT count(*) FROM numbers_hash_failure_test; -WARNING: connection error: localhost:xxxxx -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx +WARNING: connection error: :xxxxx count --------------------------------------------------------------------- 2 @@ -1310,7 +1310,7 @@ ALTER USER test_user RENAME TO test_user_new; \c - test_user - :master_port -- fails on all shard placements INSERT INTO numbers_hash_failure_test VALUES (2,2); -ERROR: connection error: localhost:xxxxx +ERROR: connection error: :xxxxx -- connect back to the master with the proper user to continue the tests \c - :default_user - :master_port SET citus.next_shard_id TO 1200020; @@ -1319,8 +1319,8 @@ SET citus.next_placement_id TO 1200033; SELECT * FROM run_command_on_workers('ALTER USER test_user_new RENAME TO test_user'); nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | ALTER ROLE - localhost | 57638 | t | ALTER ROLE + | xxxxx | t | ALTER ROLE + | xxxxx | t | ALTER ROLE (2 rows) DROP TABLE reference_modifying_xacts, hash_modifying_xacts, hash_modifying_xacts_second, @@ -1328,8 +1328,8 @@ DROP TABLE reference_modifying_xacts, hash_modifying_xacts, hash_modifying_xacts SELECT * FROM run_command_on_workers('DROP USER test_user'); nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | DROP ROLE - localhost | 57638 | t | DROP ROLE + | xxxxx | t | DROP ROLE + | xxxxx | t | DROP ROLE (2 rows) DROP USER test_user; @@ -1390,16 +1390,16 @@ ORDER BY id; id | shard_name | nodename | nodeport --------------------------------------------------------------------- - 1 | users_1200022 | localhost | 57637 - 2 | users_1200025 | localhost | 57638 - 3 | users_1200023 | localhost | 57638 - 4 | users_1200023 | localhost | 57638 - 5 | users_1200022 | localhost | 57637 - 6 | users_1200024 | localhost | 57637 - 7 | users_1200023 | localhost | 57638 - 8 | users_1200022 | localhost | 57637 - 9 | users_1200025 | localhost | 57638 - 10 | users_1200022 | localhost | 57637 + 1 | users_1200022 | | xxxxx + 2 | users_1200025 | | xxxxx + 3 | users_1200023 | | xxxxx + 4 | users_1200023 | | xxxxx + 5 | users_1200022 | | xxxxx + 6 | users_1200024 | | xxxxx + 7 | users_1200023 | | xxxxx + 8 | users_1200022 | | xxxxx + 9 | users_1200025 | | xxxxx + 10 | users_1200022 | | xxxxx (10 rows) END; diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index d1cee6f75..f0621327d 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -439,7 +439,7 @@ INSERT INTO full_access_user_schema.t1 VALUES (1),(2),(3); -- not allowed to create a table SELECT create_distributed_table('full_access_user_schema.t1', 'id'); ERROR: permission denied for schema full_access_user_schema -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx RESET ROLE; SET ROLE usage_access; CREATE TYPE usage_access_type AS ENUM ('a', 'b'); @@ -470,15 +470,15 @@ SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func'; SELECT run_command_on_workers($$SELECT typowner::regrole FROM pg_type WHERE typname = 'usage_access_type'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,usage_access) - (localhost,57638,t,usage_access) + (,xxxxx,t,usage_access) + (,xxxxx,t,usage_access) (2 rows) SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,usage_access) - (localhost,57638,t,usage_access) + (,xxxxx,t,usage_access) + (,xxxxx,t,usage_access) (2 rows) SELECT wait_until_metadata_sync(); @@ -539,19 +539,19 @@ SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func_third'; SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func_third'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,usage_access) - (localhost,57638,t,usage_access) + (,xxxxx,t,usage_access) + (,xxxxx,t,usage_access) (2 rows) -- we don't want other tests to have metadata synced -- that might change the test outputs, so we're just trying to be careful -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +SELECT stop_metadata_sync_to_node('', :worker_2_port); stop_metadata_sync_to_node --------------------------------------------------------------------- @@ -683,19 +683,19 @@ RESET ROLE; -- all attempts for transfer are initiated from other workers \c - - - :worker_2_port -- super user should not be able to copy files created by a user -SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); +SELECT worker_fetch_partition_file(42, 1, 1, 1, '', :worker_1_port); WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.xxxx": No such file or directory -CONTEXT: while executing command on localhost:xxxxx -ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:xxxxx +CONTEXT: while executing command on :xxxxx +ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from :xxxxx -- different user should not be able to fetch partition file SET ROLE usage_access; -SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); +SELECT worker_fetch_partition_file(42, 1, 1, 1, '', :worker_1_port); WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.xxxx": No such file or directory -CONTEXT: while executing command on localhost:xxxxx -ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:xxxxx +CONTEXT: while executing command on :xxxxx +ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from :xxxxx -- only the user whom created the files should be able to fetch SET ROLE full_access; -SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); +SELECT worker_fetch_partition_file(42, 1, 1, 1, '', :worker_1_port); worker_fetch_partition_file --------------------------------------------------------------------- @@ -832,8 +832,8 @@ RESET ROLE; SELECT run_command_on_workers($$SELECT task_tracker_cleanup_job(42);$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"") - (localhost,57638,t,"") + (,xxxxx,t,"") + (,xxxxx,t,"") (2 rows) DROP SCHEMA full_access_user_schema CASCADE; diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index 8f2d99482..7bc40451d 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -10,13 +10,13 @@ CREATE USER reprefuser WITH LOGIN; SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN'); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) SET citus.enable_alter_role_propagation TO ON; ALTER ROLE reprefuser WITH CREATEDB; -SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); +SELECT 1 FROM master_add_node('', :master_port, groupId => 0); ?column? --------------------------------------------------------------------- 1 @@ -29,8 +29,8 @@ SELECT wait_until_metadata_sync(); (1 row) -SELECT verify_metadata('localhost', :worker_1_port), - verify_metadata('localhost', :worker_2_port); +SELECT verify_metadata('', :worker_1_port), + verify_metadata('', :worker_2_port); verify_metadata | verify_metadata --------------------------------------------------------------------- t | t @@ -163,7 +163,7 @@ SELECT count(*) FROM run_command_on_workers('SELECT recover_prepared_transaction 2 (1 row) -SELECT master_remove_node('localhost', :master_port); +SELECT master_remove_node('', :master_port); master_remove_node --------------------------------------------------------------------- @@ -176,8 +176,8 @@ SELECT wait_until_metadata_sync(); (1 row) -SELECT verify_metadata('localhost', :worker_1_port), - verify_metadata('localhost', :worker_2_port); +SELECT verify_metadata('', :worker_1_port), + verify_metadata('', :worker_2_port); verify_metadata | verify_metadata --------------------------------------------------------------------- t | t diff --git a/src/test/regress/expected/multi_mx_call.out b/src/test/regress/expected/multi_mx_call.out index 94f2260a7..27f82545d 100644 --- a/src/test/regress/expected/multi_mx_call.out +++ b/src/test/regress/expected/multi_mx_call.out @@ -389,13 +389,13 @@ DEBUG: warning ERROR: error \set VERBOSITY default -- Test that we don't propagate to non-metadata worker nodes -select stop_metadata_sync_to_node('localhost', :worker_1_port); +select stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -select stop_metadata_sync_to_node('localhost', :worker_2_port); +select stop_metadata_sync_to_node('', :worker_2_port); stop_metadata_sync_to_node --------------------------------------------------------------------- @@ -415,13 +415,13 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment (1 row) SET client_min_messages TO NOTICE; -select start_metadata_sync_to_node('localhost', :worker_1_port); +select start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -select start_metadata_sync_to_node('localhost', :worker_2_port); +select start_metadata_sync_to_node('', :worker_2_port); start_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index e64a49439..7fa37a4d5 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -2,13 +2,13 @@ -- MULTI_MX_CREATE_TABLE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('', :worker_2_port); start_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_explain.out b/src/test/regress/expected/multi_mx_explain.out index fe2f78a29..7154e4a37 100644 --- a/src/test/regress/expected/multi_mx_explain.out +++ b/src/test/regress/expected/multi_mx_explain.out @@ -70,7 +70,7 @@ Sort Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_mx_1220052 lineitem_mx @@ -104,7 +104,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Tasks Shown": "One of 16", "Tasks": [ { - "Node": "host=localhost port=xxxxx dbname=regression", + "Node": "host= port=xxxxx dbname=", "Remote Plan": [ [ { @@ -180,7 +180,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) One of 16 - host=localhost port=xxxxx dbname=regression + host= port=xxxxx dbname= @@ -249,7 +249,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Task Count: 16 Tasks Shown: "One of 16" Tasks: - - Node: "host=localhost port=xxxxx dbname=regression" + - Node: "host= port=xxxxx dbname=" Remote Plan: - Plan: Node Type: "Aggregate" @@ -277,7 +277,7 @@ Sort Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_mx_1220052 lineitem_mx @@ -292,7 +292,7 @@ Aggregate Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) -> Seq Scan on public.lineitem_mx_1220052 lineitem_mx @@ -309,7 +309,7 @@ Limit Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: lineitem_mx.l_quantity @@ -326,7 +326,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Insert on lineitem_mx_1220052 -> Result -- Test update @@ -338,7 +338,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Update on lineitem_mx_1220052 lineitem_mx -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey = 1) @@ -351,7 +351,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Delete on lineitem_mx_1220052 lineitem_mx -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey = 1) @@ -368,7 +368,7 @@ Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Index Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ @@ -385,7 +385,7 @@ Custom Scan (Citus Adaptive) Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Seq Scan on lineitem_mx_1220052 lineitem_mx -- Test all tasks output SET citus.explain_all_tasks TO on; @@ -396,82 +396,82 @@ Aggregate Task Count: 16 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220053 on lineitem_mx_1220053 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220054 on lineitem_mx_1220054 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220056 on lineitem_mx_1220056 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220057 on lineitem_mx_1220057 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220058 on lineitem_mx_1220058 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220059 on lineitem_mx_1220059 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220060 on lineitem_mx_1220060 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220061 on lineitem_mx_1220061 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220062 on lineitem_mx_1220062 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220063 on lineitem_mx_1220063 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220064 on lineitem_mx_1220064 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Seq Scan on lineitem_mx_1220065 lineitem_mx Filter: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220066 on lineitem_mx_1220066 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220067 on lineitem_mx_1220067 lineitem_mx Index Cond: (l_orderkey > 9030) @@ -491,7 +491,7 @@ Aggregate Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey > 9030) @@ -507,7 +507,7 @@ Aggregate Task Count: 16 Tasks Shown: One of 16 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Hash Join Hash Cond: (lineitem_mx.l_orderkey = orders_mx.o_orderkey) @@ -547,7 +547,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Tasks Shown": "One of 16", "Tasks": [ { - "Node": "host=localhost port=xxxxx dbname=regression", + "Node": "host= port=xxxxx dbname=", "Remote Plan": [ [ { @@ -683,7 +683,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) One of 16 - host=localhost port=xxxxx dbname=regression + host= port=xxxxx dbname= @@ -814,7 +814,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Task Count: 16 Tasks Shown: "One of 16" Tasks: - - Node: "host=localhost port=xxxxx dbname=regression" + - Node: "host= port=xxxxx dbname=" Remote Plan: - Plan: Node Type: "Aggregate" diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index 935a01770..df67f4ac8 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -495,13 +495,13 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT test.x, r.cou (0 rows) -- Test that we don't propagate to non-metadata worker nodes -select stop_metadata_sync_to_node('localhost', :worker_1_port); +select stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -select stop_metadata_sync_to_node('localhost', :worker_2_port); +select stop_metadata_sync_to_node('', :worker_2_port); stop_metadata_sync_to_node --------------------------------------------------------------------- @@ -521,13 +521,13 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment (1 row) SET client_min_messages TO NOTICE; -select start_metadata_sync_to_node('localhost', :worker_1_port); +select start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -select start_metadata_sync_to_node('localhost', :worker_2_port); +select start_metadata_sync_to_node('', :worker_2_port); start_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_function_table_reference.out b/src/test/regress/expected/multi_mx_function_table_reference.out index a433615cb..5e05db76f 100644 --- a/src/test/regress/expected/multi_mx_function_table_reference.out +++ b/src/test/regress/expected/multi_mx_function_table_reference.out @@ -10,13 +10,13 @@ SET search_path TO function_table_reference; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('', :worker_2_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -26,7 +26,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port); -- SET client_min_messages TO log; -- remove worker 2, so we can add it after we have created some functions that caused -- problems -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -61,7 +61,7 @@ SELECT create_distributed_function('zoop(int)', '$1'); (1 row) -- now add the worker back, this triggers function distribution which should not fail. -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -81,7 +81,7 @@ drop cascades to function zoop(integer) -- make sure the worker is added at the end irregardless of anything failing to not make -- subsequent tests fail as well. All artifacts created during this test should have been -- dropped by the drop cascade above. -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index cbc57ef7b..025264b53 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -25,13 +25,13 @@ SET search_path TO 'mx_hide_shard_names'; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('', :worker_2_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -63,8 +63,8 @@ SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner --------------------------------------------------------------------- - mx_hide_shard_names | test_table_1130000 | table | postgres - mx_hide_shard_names | test_table_1130002 | table | postgres + mx_hide_shard_names | test_table_1130000 | table | + mx_hide_shard_names | test_table_1130002 | table | (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; @@ -100,15 +100,15 @@ SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner --------------------------------------------------------------------- - mx_hide_shard_names | test_table_1130000 | table | postgres - mx_hide_shard_names | test_table_1130002 | table | postgres + mx_hide_shard_names | test_table_1130000 | table | + mx_hide_shard_names | test_table_1130002 | table | (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; Schema | Name | Type | Owner | Table --------------------------------------------------------------------- - mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000 - mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002 + mx_hide_shard_names | test_index_1130000 | index | | test_table_1130000 + mx_hide_shard_names | test_index_1130002 | index | | test_table_1130002 (2 rows) -- we should be able to select from the shards directly if we @@ -158,19 +158,19 @@ CREATE TABLE test_table_2_1130000(id int, time date); SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner --------------------------------------------------------------------- - mx_hide_shard_names | test_table_102008_1130004 | table | postgres - mx_hide_shard_names | test_table_102008_1130006 | table | postgres - mx_hide_shard_names | test_table_1130000 | table | postgres - mx_hide_shard_names | test_table_1130002 | table | postgres + mx_hide_shard_names | test_table_102008_1130004 | table | + mx_hide_shard_names | test_table_102008_1130006 | table | + mx_hide_shard_names | test_table_1130000 | table | + mx_hide_shard_names | test_table_1130002 | table | (4 rows) \d List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - mx_hide_shard_names | test_table | table | postgres - mx_hide_shard_names | test_table_102008 | table | postgres - mx_hide_shard_names | test_table_2_1130000 | table | postgres + mx_hide_shard_names | test_table | table | + mx_hide_shard_names | test_table_102008 | table | + mx_hide_shard_names | test_table_2_1130000 | table | (3 rows) \c - - - :master_port @@ -193,32 +193,32 @@ SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner --------------------------------------------------------------------- - mx_hide_shard_names | test_table_102008_1130004 | table | postgres - mx_hide_shard_names | test_table_102008_1130006 | table | postgres - mx_hide_shard_names | test_table_1130000 | table | postgres - mx_hide_shard_names | test_table_1130002 | table | postgres + mx_hide_shard_names | test_table_102008_1130004 | table | + mx_hide_shard_names | test_table_102008_1130006 | table | + mx_hide_shard_names | test_table_1130000 | table | + mx_hide_shard_names | test_table_1130002 | table | (4 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; Schema | Name | Type | Owner | Table --------------------------------------------------------------------- - mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000 - mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002 + mx_hide_shard_names | test_index_1130000 | index | | test_table_1130000 + mx_hide_shard_names | test_index_1130002 | index | | test_table_1130002 (2 rows) SET search_path TO 'mx_hide_shard_names_2'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner --------------------------------------------------------------------- - mx_hide_shard_names_2 | test_table_1130008 | table | postgres - mx_hide_shard_names_2 | test_table_1130010 | table | postgres + mx_hide_shard_names_2 | test_table_1130008 | table | + mx_hide_shard_names_2 | test_table_1130010 | table | (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; Schema | Name | Type | Owner | Table --------------------------------------------------------------------- - mx_hide_shard_names_2 | test_index_1130008 | index | postgres | test_table_1130008 - mx_hide_shard_names_2 | test_index_1130010 | index | postgres | test_table_1130010 + mx_hide_shard_names_2 | test_index_1130008 | index | | test_table_1130008 + mx_hide_shard_names_2 | test_index_1130010 | index | | test_table_1130010 (2 rows) SET search_path TO 'mx_hide_shard_names_2, mx_hide_shard_names'; @@ -254,15 +254,15 @@ SET search_path TO 'mx_hide_shard_names_3'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner --------------------------------------------------------------------- - mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130012 | table | postgres - mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130014 | table | postgres + mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130012 | table | + mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130014 | table | (2 rows) \d List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678901234567890 | table | postgres + mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678901234567890 | table | (1 row) -- now try weird schema names @@ -286,29 +286,29 @@ SET search_path TO "CiTuS.TeeN"; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner --------------------------------------------------------------------- - CiTuS.TeeN | TeeNTabLE.1!?!_1130016 | table | postgres - CiTuS.TeeN | TeeNTabLE.1!?!_1130018 | table | postgres + CiTuS.TeeN | TeeNTabLE.1!?!_1130016 | table | + CiTuS.TeeN | TeeNTabLE.1!?!_1130018 | table | (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; Schema | Name | Type | Owner | Table --------------------------------------------------------------------- - CiTuS.TeeN | MyTenantIndex_1130016 | index | postgres | TeeNTabLE.1!?!_1130016 - CiTuS.TeeN | MyTenantIndex_1130018 | index | postgres | TeeNTabLE.1!?!_1130018 + CiTuS.TeeN | MyTenantIndex_1130016 | index | | TeeNTabLE.1!?!_1130016 + CiTuS.TeeN | MyTenantIndex_1130018 | index | | TeeNTabLE.1!?!_1130018 (2 rows) \d List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - CiTuS.TeeN | TeeNTabLE.1!?! | table | postgres + CiTuS.TeeN | TeeNTabLE.1!?! | table | (1 row) \di List of relations Schema | Name | Type | Owner | Table --------------------------------------------------------------------- - CiTuS.TeeN | MyTenantIndex | index | postgres | TeeNTabLE.1!?! + CiTuS.TeeN | MyTenantIndex | index | | TeeNTabLE.1!?! (1 row) -- clean-up @@ -320,15 +320,15 @@ SET search_path TO 'mx_hide_shard_names'; List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - mx_hide_shard_names | test_table | table | postgres - mx_hide_shard_names | test_table_102008 | table | postgres + mx_hide_shard_names | test_table | table | + mx_hide_shard_names | test_table_102008 | table | (2 rows) \di List of relations Schema | Name | Type | Owner | Table --------------------------------------------------------------------- - mx_hide_shard_names | test_index | index | postgres | test_table + mx_hide_shard_names | test_index | index | | test_table (1 row) DROP SCHEMA mx_hide_shard_names CASCADE; diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition.out b/src/test/regress/expected/multi_mx_insert_select_repartition.out index 5cbf44293..5e6f75db3 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition.out @@ -70,7 +70,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT a, max(b) FROM source_table Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: a -> Seq Scan on source_table_4213581 source_table diff --git a/src/test/regress/expected/multi_mx_metadata.out b/src/test/regress/expected/multi_mx_metadata.out index 3198db3c4..4eb538e60 100644 --- a/src/test/regress/expected/multi_mx_metadata.out +++ b/src/test/regress/expected/multi_mx_metadata.out @@ -309,8 +309,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers($$CREATE USER no_access_mx;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) SET ROLE no_access_mx; @@ -370,7 +370,7 @@ SELECT master_drop_sequences(NULL); (1 row) -\c - postgres - :master_port +\c - - :master_port -- finally make sure that the sequence remains SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; Column | Type | Modifiers @@ -418,7 +418,7 @@ HINT: Connect to the coordinator and run it again. -- make sure that we can drop unrelated tables/sequences CREATE TABLE unrelated_table(key serial); DROP TABLE unrelated_table; -\c - postgres - :worker_1_port +\c - - :worker_1_port -- finally make sure that the sequence remains SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; Column | Type | Modifiers @@ -429,7 +429,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx (3 rows) -- Resume ordinary recovery -\c - postgres - :master_port +\c - - :master_port ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); pg_reload_conf diff --git a/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out b/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out index fb2961934..7f979124f 100644 --- a/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out +++ b/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out @@ -7,13 +7,13 @@ SET search_path TO 'mx_modify_reference_table'; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('', :worker_2_port); start_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_modifying_xacts.out b/src/test/regress/expected/multi_mx_modifying_xacts.out index 0466c847e..0198e1b25 100644 --- a/src/test/regress/expected/multi_mx_modifying_xacts.out +++ b/src/test/regress/expected/multi_mx_modifying_xacts.out @@ -217,7 +217,7 @@ INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (1, 'orange'); ERROR: duplicate key value violates unique constraint "objects_mx_pkey_1220103" DETAIL: Key (id)=(X) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; @@ -342,7 +342,7 @@ INSERT INTO objects_mx VALUES (2, 'BAD'); INSERT INTO labs_mx VALUES (9, 'Umbrella Corporation'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: could not commit transaction for shard xxxxx on any active node WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node @@ -370,7 +370,7 @@ INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: could not commit transaction for shard xxxxx on any active node WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node @@ -395,7 +395,7 @@ INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; WARNING: illegal value -WARNING: failed to commit transaction on localhost:xxxxx +WARNING: failed to commit transaction on :xxxxx WARNING: could not commit transaction for shard xxxxx on any active node WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index b44d69a84..f8fd2585c 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -20,11 +20,11 @@ CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLE ARRAY['SELECT pg_reload_conf()'], false); $$; -- add a node to the cluster -SELECT master_add_node('localhost', :worker_1_port) As nodeid_1 \gset +SELECT master_add_node('', :worker_1_port) As nodeid_1 \gset SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- - 2 | localhost | 57637 | f | f + 2 | | xxxxx | f | f (1 row) -- create couple of tables @@ -44,7 +44,7 @@ SELECT create_distributed_table('dist_table_1', 'a'); -- update the node SELECT 1 FROM master_update_node((SELECT nodeid FROM pg_dist_node), - 'localhost', :worker_2_port); + '', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -53,11 +53,11 @@ SELECT 1 FROM master_update_node((SELECT nodeid FROM pg_dist_node), SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- - 2 | localhost | 57638 | f | f + 2 | | xxxxx | f | f (1 row) -- start syncing metadata to the node -SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT 1 FROM start_metadata_sync_to_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -66,7 +66,7 @@ SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- - 2 | localhost | 57638 | t | t + 2 | | xxxxx | t | t (1 row) --------------------------------------------------------------------- @@ -79,10 +79,10 @@ BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- - 2 | localhost | 57638 | t | t + 2 | | xxxxx | t | t (1 row) -SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); +SELECT 1 FROM master_update_node(:nodeid_1, '', :worker_1_port); ?column? --------------------------------------------------------------------- 1 @@ -91,7 +91,7 @@ SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- - 2 | localhost | 57637 | t | f + 2 | | xxxxx | t | f (1 row) END; @@ -109,7 +109,7 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; 2 | t | t (1 row) -SELECT verify_metadata('localhost', :worker_1_port); +SELECT verify_metadata('', :worker_1_port); verify_metadata --------------------------------------------------------------------- t @@ -121,10 +121,10 @@ BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- - 2 | localhost | 57637 | t | t + 2 | | xxxxx | t | t (1 row) -SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); +SELECT 1 FROM master_update_node(:nodeid_1, '', 12345); ?column? --------------------------------------------------------------------- 1 @@ -133,7 +133,7 @@ SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- - 2 | localhost | 12345 | t | f + 2 | | 12345 | t | f (1 row) END; @@ -151,7 +151,7 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; (1 row) -- update it back to :worker_1_port, now metadata should be synced -SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); +SELECT 1 FROM master_update_node(:nodeid_1, '', :worker_1_port); ?column? --------------------------------------------------------------------- 1 @@ -172,9 +172,9 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; --------------------------------------------------------------------- -- Test updating a node when another node is in readonly-mode --------------------------------------------------------------------- -SELECT master_add_node('localhost', :worker_2_port) AS nodeid_2 \gset -NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx -SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT master_add_node('', :worker_2_port) AS nodeid_2 \gset +NOTICE: Replicating reference table "ref_table" to the node :xxxxx +SELECT 1 FROM start_metadata_sync_to_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -189,7 +189,7 @@ SELECT create_distributed_table('dist_table_2', 'a'); (1 row) INSERT INTO dist_table_2 SELECT i FROM generate_series(1, 100) i; -SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); +SELECT mark_node_readonly('', :worker_2_port, TRUE); mark_node_readonly --------------------------------------------------------------------- t @@ -197,7 +197,7 @@ SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); -- Now updating the other node will mark worker 2 as not synced. BEGIN; -SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); +SELECT 1 FROM master_update_node(:nodeid_1, '', 12345); ?column? --------------------------------------------------------------------- 1 @@ -213,7 +213,7 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; COMMIT; -- worker_2 is out of sync, so further updates aren't sent to it and -- we shouldn't see the warnings. -SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 23456); +SELECT 1 FROM master_update_node(:nodeid_1, '', 23456); ?column? --------------------------------------------------------------------- 1 @@ -227,7 +227,7 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; (2 rows) -- Make the node writeable. -SELECT mark_node_readonly('localhost', :worker_2_port, FALSE); +SELECT mark_node_readonly('', :worker_2_port, FALSE); mark_node_readonly --------------------------------------------------------------------- t @@ -240,7 +240,7 @@ SELECT wait_until_metadata_sync(); (1 row) -- Mark the node readonly again, so the following master_update_node warns -SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); +SELECT mark_node_readonly('', :worker_2_port, TRUE); mark_node_readonly --------------------------------------------------------------------- t @@ -248,7 +248,7 @@ SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); -- Revert the nodeport of worker 1. BEGIN; -SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); +SELECT 1 FROM master_update_node(:nodeid_1, '', :worker_1_port); ?column? --------------------------------------------------------------------- 1 @@ -268,7 +268,7 @@ SELECT wait_until_metadata_sync(); (1 row) -- Make the node writeable. -SELECT mark_node_readonly('localhost', :worker_2_port, FALSE); +SELECT mark_node_readonly('', :worker_2_port, FALSE); mark_node_readonly --------------------------------------------------------------------- t @@ -280,14 +280,14 @@ SELECT wait_until_metadata_sync(); (1 row) -SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); +SELECT 1 FROM master_update_node(:nodeid_1, '', :worker_1_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT verify_metadata('localhost', :worker_1_port), - verify_metadata('localhost', :worker_2_port); +SELECT verify_metadata('', :worker_1_port), + verify_metadata('', :worker_2_port); verify_metadata | verify_metadata --------------------------------------------------------------------- t | t @@ -297,15 +297,15 @@ SELECT verify_metadata('localhost', :worker_1_port), -- Test that master_update_node rolls back properly --------------------------------------------------------------------- BEGIN; -SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); +SELECT 1 FROM master_update_node(:nodeid_1, '', 12345); ?column? --------------------------------------------------------------------- 1 (1 row) ROLLBACK; -SELECT verify_metadata('localhost', :worker_1_port), - verify_metadata('localhost', :worker_2_port); +SELECT verify_metadata('', :worker_1_port), + verify_metadata('', :worker_2_port); verify_metadata | verify_metadata --------------------------------------------------------------------- t | t @@ -315,7 +315,7 @@ SELECT verify_metadata('localhost', :worker_1_port), -- Test that master_update_node can appear in a prepared transaction. --------------------------------------------------------------------- BEGIN; -SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); +SELECT 1 FROM master_update_node(:nodeid_1, '', 12345); ?column? --------------------------------------------------------------------- 1 @@ -337,7 +337,7 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; (2 rows) BEGIN; -SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); +SELECT 1 FROM master_update_node(:nodeid_1, '', :worker_1_port); ?column? --------------------------------------------------------------------- 1 @@ -358,8 +358,8 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; 3 | t | t (2 rows) -SELECT verify_metadata('localhost', :worker_1_port), - verify_metadata('localhost', :worker_2_port); +SELECT verify_metadata('', :worker_1_port), + verify_metadata('', :worker_2_port); verify_metadata | verify_metadata --------------------------------------------------------------------- t | t @@ -370,26 +370,26 @@ SELECT verify_metadata('localhost', :worker_1_port), --------------------------------------------------------------------- -- Don't drop the reference table so it has shards on the nodes being disabled DROP TABLE dist_table_1, dist_table_2; -SELECT 1 FROM master_disable_node('localhost', :worker_2_port); +SELECT 1 FROM master_disable_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT verify_metadata('localhost', :worker_1_port); +SELECT verify_metadata('', :worker_1_port); verify_metadata --------------------------------------------------------------------- t (1 row) -SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx +SELECT 1 FROM master_activate_node('', :worker_2_port); +NOTICE: Replicating reference table "ref_table" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT verify_metadata('localhost', :worker_1_port); +SELECT verify_metadata('', :worker_1_port); verify_metadata --------------------------------------------------------------------- t @@ -398,7 +398,7 @@ SELECT verify_metadata('localhost', :worker_1_port); --------------------------------------------------------------------- -- Test master_disable_node() when the node that is being disabled is actually down --------------------------------------------------------------------- -SELECT master_update_node(:nodeid_2, 'localhost', 1); +SELECT master_update_node(:nodeid_2, '', 1); master_update_node --------------------------------------------------------------------- @@ -413,30 +413,30 @@ SELECT wait_until_metadata_sync(); -- set metadatasynced so we try porpagating metadata changes UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2); -- should error out -SELECT 1 FROM master_disable_node('localhost', 1); -ERROR: Disabling localhost:xxxxx failed -DETAIL: connection error: localhost:xxxxx +SELECT 1 FROM master_disable_node('', 1); +ERROR: Disabling :xxxxx failed +DETAIL: connection error: :xxxxx HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. -- try again after stopping metadata sync -SELECT stop_metadata_sync_to_node('localhost', 1); +SELECT stop_metadata_sync_to_node('', 1); stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT 1 FROM master_disable_node('localhost', 1); +SELECT 1 FROM master_disable_node('', 1); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT verify_metadata('localhost', :worker_1_port); +SELECT verify_metadata('', :worker_1_port); verify_metadata --------------------------------------------------------------------- t (1 row) -SELECT master_update_node(:nodeid_2, 'localhost', :worker_2_port); +SELECT master_update_node(:nodeid_2, '', :worker_2_port); master_update_node --------------------------------------------------------------------- @@ -448,14 +448,14 @@ SELECT wait_until_metadata_sync(); (1 row) -SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx +SELECT 1 FROM master_activate_node('', :worker_2_port); +NOTICE: Replicating reference table "ref_table" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT verify_metadata('localhost', :worker_1_port); +SELECT verify_metadata('', :worker_1_port); verify_metadata --------------------------------------------------------------------- t @@ -465,7 +465,7 @@ SELECT verify_metadata('localhost', :worker_1_port); -- Test master_disable_node() when the other node is down --------------------------------------------------------------------- -- node 1 is down. -SELECT master_update_node(:nodeid_1, 'localhost', 1); +SELECT master_update_node(:nodeid_1, '', 1); master_update_node --------------------------------------------------------------------- @@ -480,25 +480,25 @@ SELECT wait_until_metadata_sync(); -- set metadatasynced so we try porpagating metadata changes UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2); -- should error out -SELECT 1 FROM master_disable_node('localhost', :worker_2_port); -ERROR: Disabling localhost:xxxxx failed -DETAIL: connection error: localhost:xxxxx +SELECT 1 FROM master_disable_node('', :worker_2_port); +ERROR: Disabling :xxxxx failed +DETAIL: connection error: :xxxxx HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. -- try again after stopping metadata sync -SELECT stop_metadata_sync_to_node('localhost', 1); +SELECT stop_metadata_sync_to_node('', 1); stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT 1 FROM master_disable_node('localhost', :worker_2_port); +SELECT 1 FROM master_disable_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) -- bring up node 1 -SELECT master_update_node(:nodeid_1, 'localhost', :worker_1_port); +SELECT master_update_node(:nodeid_1, '', :worker_1_port); master_update_node --------------------------------------------------------------------- @@ -510,14 +510,14 @@ SELECT wait_until_metadata_sync(); (1 row) -SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx +SELECT 1 FROM master_activate_node('', :worker_2_port); +NOTICE: Replicating reference table "ref_table" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT verify_metadata('localhost', :worker_1_port); +SELECT verify_metadata('', :worker_1_port); verify_metadata --------------------------------------------------------------------- t diff --git a/src/test/regress/expected/multi_mx_partitioning.out b/src/test/regress/expected/multi_mx_partitioning.out index b985bc146..2338d9881 100644 --- a/src/test/regress/expected/multi_mx_partitioning.out +++ b/src/test/regress/expected/multi_mx_partitioning.out @@ -6,7 +6,7 @@ SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; -- make sure wen can create partitioning tables in MX SET citus.replication_model TO 'streaming'; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -246,19 +246,19 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. \c - - - :master_port -- make sure we can repeatedly call start_metadata_sync_to_node -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_name_lengths.out b/src/test/regress/expected/multi_name_lengths.out index 74d74f1de..ccb45d196 100644 --- a/src/test/regress/expected/multi_name_lengths.out +++ b/src/test/regress/expected/multi_name_lengths.out @@ -25,8 +25,8 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345 List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | too_long_12345678901234567890123456789012345678_e0119164_225000 | table | postgres - public | too_long_12345678901234567890123456789012345678_e0119164_225001 | table | postgres + public | too_long_12345678901234567890123456789012345678_e0119164_225000 | table | + public | too_long_12345678901234567890123456789012345678_e0119164_225001 | table | (2 rows) \c - - - :master_port @@ -169,7 +169,7 @@ CREATE TABLE sneaky_name_lengths ( List of relations Schema | Name | Type | Owner | Table --------------------------------------------------------------------- - public | sneaky_name_lengths_int_col_1234567890123456789012345678901_key | index | postgres | sneaky_name_lengths + public | sneaky_name_lengths_int_col_1234567890123456789012345678901_key | index | | sneaky_name_lengths (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths'::regclass ORDER BY 1 DESC, 2 DESC; @@ -195,7 +195,7 @@ SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); List of relations Schema | Name | Type | Owner | Table --------------------------------------------------------------------- - public | sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225006 | index | postgres | sneaky_name_lengths_225006 + public | sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225006 | index | | sneaky_name_lengths_225006 (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass ORDER BY 1 DESC, 2 DESC; @@ -226,7 +226,7 @@ SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); List of relations Schema | Name | Type | Owner | Table --------------------------------------------------------------------- - public | unique_1234567890123456789012345678901234567890_a5986f27_225008 | index | postgres | sneaky_name_lengths_225008 + public | unique_1234567890123456789012345678901234567890_a5986f27_225008 | index | | sneaky_name_lengths_225008 (1 row) \c - - - :master_port @@ -249,8 +249,8 @@ SELECT create_distributed_table('too_long_12345678901234567890123456789012345678 List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | too_long_1234567890123456789012345678901_e0119164_2250000000000 | table | postgres - public | too_long_1234567890123456789012345678901_e0119164_2250000000001 | table | postgres + public | too_long_1234567890123456789012345678901_e0119164_2250000000000 | table | + public | too_long_1234567890123456789012345678901_e0119164_2250000000001 | table | (2 rows) \c - - - :master_port @@ -281,16 +281,16 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0 List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | elephant_слонслонслонсло_c8b737c2_2250000000002 | table | postgres - public | elephant_слонслонслонсло_c8b737c2_2250000000003 | table | postgres + public | elephant_слонслонслонсло_c8b737c2_2250000000002 | table | + public | elephant_слонслонслонсло_c8b737c2_2250000000003 | table | (2 rows) \di public.elephant_* List of relations Schema | Name | Type | Owner | Table --------------------------------------------------------------------- - public | elephant_слонслонслонсло_14d34928_2250000000002 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000002 - public | elephant_слонслонслонсло_14d34928_2250000000003 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000003 + public | elephant_слонслонслонсло_14d34928_2250000000002 | index | | elephant_слонслонслонсло_c8b737c2_2250000000002 + public | elephant_слонслонслонсло_14d34928_2250000000003 | index | | elephant_слонслонслонсло_c8b737c2_2250000000003 (2 rows) \c - - - :master_port diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning.out b/src/test/regress/expected/multi_null_minmax_value_pruning.out index ea5b81844..2567e5c00 100644 --- a/src/test/regress/expected/multi_null_minmax_value_pruning.out +++ b/src/test/regress/expected/multi_null_minmax_value_pruning.out @@ -53,7 +53,7 @@ DEBUG: join prunable for intervals [8997,14947] and [1,5986] Task Count: 2 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Hash Join Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) @@ -61,7 +61,7 @@ DEBUG: join prunable for intervals [8997,14947] and [1,5986] -> Hash -> Seq Scan on orders_290002 orders -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Aggregate -> Hash Join Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) diff --git a/src/test/regress/expected/multi_orderby_limit_pushdown.out b/src/test/regress/expected/multi_orderby_limit_pushdown.out index 860f8501a..451167869 100644 --- a/src/test/regress/expected/multi_orderby_limit_pushdown.out +++ b/src/test/regress/expected/multi_orderby_limit_pushdown.out @@ -43,7 +43,7 @@ LIMIT 1; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit (cost=1.53..1.53 rows=1 width=36) -> Sort (cost=1.53..1.53 rows=2 width=36) Sort Key: (avg(value_1)) DESC @@ -109,7 +109,7 @@ ORDER BY 2 DESC; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate (cost=1.58..1.61 rows=2 width=36) Group Key: user_id -> Seq Scan on users_table_1400256 users_table (cost=0.00..1.33 rows=33 width=12) @@ -226,7 +226,7 @@ LIMIT 2; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: ((10000 / sum((value_1 + value_2)))) DESC @@ -292,7 +292,7 @@ LIMIT 2; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: (sum(value_1)) DESC @@ -332,7 +332,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: (max(et."time")), (avg(ut.value_1)) @@ -389,7 +389,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: (count(DISTINCT ut.value_2)), (avg(ut.value_1)), ut.user_id DESC diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index 675ee4301..b435fabd1 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -190,11 +190,11 @@ ORDER BY INSERT INTO partitioning_hash_test VALUES (8, 5); ERROR: no partition of relation "partitioning_hash_test_1660012" found for row DETAIL: Partition key of the failing row contains (subid) = (5). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx INSERT INTO partitioning_hash_test VALUES (9, 12); ERROR: no partition of relation "partitioning_hash_test_1660015" found for row DETAIL: Partition key of the failing row contains (subid) = (12). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx CREATE TABLE partitioning_hash_test_2 (id int, subid int); INSERT INTO partitioning_hash_test_2 VALUES (8, 5); ALTER TABLE partitioning_hash_test ATTACH PARTITION partitioning_hash_test_2 FOR VALUES WITH (MODULUS 3, REMAINDER 2); @@ -375,7 +375,7 @@ SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER BY 1; UPDATE partitioning_test SET time = '2020-07-07' WHERE id = 7; ERROR: no partition of relation "partitioning_test_1660001" found for row DETAIL: Partition key of the failing row contains ("time") = (2020-07-07). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- UPDATE with subqueries on partitioned table UPDATE partitioning_test @@ -445,7 +445,7 @@ SELECT * FROM partitioning_test_default ORDER BY 1, 2; -- create a new partition (will fail) CREATE TABLE partitioning_test_2014 PARTITION OF partitioning_test FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); ERROR: updated partition constraint for default partition would be violated by some row -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx BEGIN; ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_default; CREATE TABLE partitioning_test_2014 PARTITION OF partitioning_test FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); @@ -519,13 +519,13 @@ SELECT * FROM partitioning_test_2009 ORDER BY 1; UPDATE partitioning_test_2009 SET time = time + INTERVAL '6 month'; ERROR: new row for relation "partitioning_test_2009_1660005" violates partition constraint DETAIL: Failing row contains (3, 2010-03-11). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- -- DDL in distributed partitioned tables -- -- test CREATE INDEX -- CREATE INDEX on partitioned table - this will error out --- on earlier versions of postgres earlier than 11. +-- on earlier versions of earlier than 11. CREATE INDEX partitioning_index ON partitioning_test(id); -- CREATE INDEX on partition CREATE INDEX partitioning_2009_index ON partitioning_test_2009(id); @@ -1210,7 +1210,7 @@ INSERT INTO multi_column_partitioning_0_0_10_0 VALUES(5, -5); INSERT INTO multi_column_partitioning VALUES(10, 1); ERROR: no partition of relation "multi_column_partitioning_1660101" found for row DETAIL: Partition key of the failing row contains (c1, c2) = (10, 1). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- test with MINVALUE/MAXVALUE CREATE TABLE multi_column_partitioning_10_max_20_min PARTITION OF multi_column_partitioning FOR VALUES FROM (10, MAXVALUE) TO (20, MINVALUE); -- test INSERT to partition with MINVALUE/MAXVALUE bounds @@ -1220,7 +1220,7 @@ INSERT INTO multi_column_partitioning_10_max_20_min VALUES(19, -19); INSERT INTO multi_column_partitioning VALUES(20, -20); ERROR: no partition of relation "multi_column_partitioning_1660101" found for row DETAIL: Partition key of the failing row contains (c1, c2) = (20, -20). -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- see data is loaded to multi-column partitioned table SELECT * FROM multi_column_partitioning ORDER BY 1, 2; c1 | c2 @@ -1590,7 +1590,7 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Gather Workers Planned: 2 -> Parallel Hash Join @@ -1638,7 +1638,7 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Append -> Hash Join Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid)) @@ -1668,7 +1668,7 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Hash Join Hash Cond: (partitioning_hash_join_test.id = partitioning_hash_test.id) -> Append @@ -1932,7 +1932,7 @@ ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2010 FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); ERROR: insert or update on table "partitioning_test_2010_1660191" violates foreign key constraint "partitioning_reference_fkey_1660179" DETAIL: Key (id)=(X) is not present in table "reference_table_1660177". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- Truncate, so attaching again won't fail TRUNCATE partitioning_test_2010; -- Attach a table which already has the same constraint diff --git a/src/test/regress/expected/multi_partitioning_utils.out b/src/test/regress/expected/multi_partitioning_utils.out index 68fa39791..8370e225e 100644 --- a/src/test/regress/expected/multi_partitioning_utils.out +++ b/src/test/regress/expected/multi_partitioning_utils.out @@ -91,7 +91,7 @@ SELECT master_get_table_ddl_events('date_partitioned_table'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.date_partitioned_table (id integer, "time" date) PARTITION BY RANGE ("time") - ALTER TABLE public.date_partitioned_table OWNER TO postgres + ALTER TABLE public.date_partitioned_table OWNER TO (2 rows) -- now create the partitions @@ -173,7 +173,7 @@ SELECT master_get_table_ddl_events('date_partition_2007_100'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.date_partition_2007_100 (id integer, "time" date) - ALTER TABLE public.date_partition_2007_100 OWNER TO postgres + ALTER TABLE public.date_partition_2007_100 OWNER TO (2 rows) -- now break the partitioning hierarcy @@ -318,7 +318,7 @@ SELECT master_get_table_ddl_events('multi_column_partitioned'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.multi_column_partitioned (a integer, b integer, c text) PARTITION BY RANGE (a, (((a + b) + 1)), public.some_function(upper(c))) - ALTER TABLE public.multi_column_partitioned OWNER TO postgres + ALTER TABLE public.multi_column_partitioned OWNER TO (2 rows) SELECT drop_and_recreate_partitioned_table('multi_column_partitioned'); @@ -360,7 +360,7 @@ SELECT master_get_table_ddl_events('list_partitioned'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.list_partitioned (col1 numeric, col2 numeric, col3 character varying(10)) PARTITION BY LIST (col1) - ALTER TABLE public.list_partitioned OWNER TO postgres + ALTER TABLE public.list_partitioned OWNER TO (2 rows) SELECT drop_and_recreate_partitioned_table('list_partitioned'); diff --git a/src/test/regress/expected/multi_prepare_sql.out b/src/test/regress/expected/multi_prepare_sql.out index 8338ee3f9..5ca6d70c9 100644 --- a/src/test/regress/expected/multi_prepare_sql.out +++ b/src/test/regress/expected/multi_prepare_sql.out @@ -956,8 +956,8 @@ SELECT run_command_on_workers($$ $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE DOMAIN") - (localhost,57638,t,"CREATE DOMAIN") + (,xxxxx,t,"CREATE DOMAIN") + (,xxxxx,t,"CREATE DOMAIN") (2 rows) CREATE TABLE domain_partition_column_table ( @@ -1088,7 +1088,7 @@ EXECUTE countsome; -- no replanning (0 rows) -- repair shards, should invalidate via master_metadata_utility.c -SELECT master_copy_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port) +SELECT master_copy_shard_placement(shardid, '', :worker_2_port, '', :worker_1_port) FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_table'::regclass) diff --git a/src/test/regress/expected/multi_read_from_secondaries.out b/src/test/regress/expected/multi_read_from_secondaries.out index cbab871e6..f1f5a7503 100644 --- a/src/test/regress/expected/multi_read_from_secondaries.out +++ b/src/test/regress/expected/multi_read_from_secondaries.out @@ -1,12 +1,12 @@ SET citus.next_shard_id TO 1600000; -\c "dbname=regression options='-c\ citus.use_secondary_nodes=always'" +\c "dbname= options='-c\ citus.use_secondary_nodes=always'" CREATE TABLE dest_table (a int, b int); CREATE TABLE source_table (a int, b int); -- attempts to change metadata should fail while reading from secondaries SELECT create_distributed_table('dest_table', 'a'); ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' -\c "dbname=regression options='-c\ citus.use_secondary_nodes=never'" +\c "dbname= options='-c\ citus.use_secondary_nodes=never'" SELECT create_distributed_table('dest_table', 'a'); create_distributed_table --------------------------------------------------------------------- @@ -26,12 +26,12 @@ INSERT INTO source_table (a, b) VALUES (10, 10); SELECT nodeid, groupid, nodename, nodeport, noderack, isactive, noderole, nodecluster FROM pg_dist_node; nodeid | groupid | nodename | nodeport | noderack | isactive | noderole | nodecluster --------------------------------------------------------------------- - 1 | 1 | localhost | 57637 | default | t | primary | default - 2 | 2 | localhost | 57638 | default | t | primary | default + 1 | 1 | | xxxxx | default | t | primary | default + 2 | 2 | | xxxxx | default | t | primary | default (2 rows) UPDATE pg_dist_node SET noderole = 'secondary'; -\c "dbname=regression options='-c\ citus.use_secondary_nodes=always'" +\c "dbname= options='-c\ citus.use_secondary_nodes=always'" -- inserts are disallowed INSERT INTO dest_table (a, b) VALUES (1, 2); ERROR: writing to worker nodes is not currently allowed @@ -81,6 +81,6 @@ INSERT INTO dest_table (a, b) SELECT a, b FROM source_table; ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' -\c "dbname=regression options='-c\ citus.use_secondary_nodes=never'" +\c "dbname= options='-c\ citus.use_secondary_nodes=never'" UPDATE pg_dist_node SET noderole = 'primary'; DROP TABLE dest_table; diff --git a/src/test/regress/expected/multi_real_time_transaction.out b/src/test/regress/expected/multi_real_time_transaction.out index 663d8c8d2..0f0f6378b 100644 --- a/src/test/regress/expected/multi_real_time_transaction.out +++ b/src/test/regress/expected/multi_real_time_transaction.out @@ -398,8 +398,8 @@ CREATE USER rls_user; SELECT run_command_on_workers('CREATE USER rls_user'); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user; @@ -407,15 +407,15 @@ GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user; SELECT run_command_on_workers('GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user'); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) + (,xxxxx,t,GRANT) + (,xxxxx,t,GRANT) (2 rows) SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user'); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) + (,xxxxx,t,GRANT) + (,xxxxx,t,GRANT) (2 rows) -- create trigger on one worker to reject access if GUC not diff --git a/src/test/regress/expected/multi_reference_table.out b/src/test/regress/expected/multi_reference_table.out index 8763eaad5..db032342c 100644 --- a/src/test/regress/expected/multi_reference_table.out +++ b/src/test/regress/expected/multi_reference_table.out @@ -1426,14 +1426,14 @@ SELECT master_update_shard_statistics(:a_shard_id); (1 row) CREATE TABLE append_reference_tmp_table (id INT); -SELECT master_append_table_to_shard(:a_shard_id, 'append_reference_tmp_table', 'localhost', :master_port); +SELECT master_append_table_to_shard(:a_shard_id, 'append_reference_tmp_table', '', :master_port); ERROR: cannot append to shardId 1250019 DETAIL: We currently don't support appending to shards in hash-partitioned or reference tables SELECT master_get_table_ddl_events('reference_schema.reference_table_ddl'); master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE reference_schema.reference_table_ddl (value_2 double precision DEFAULT 25.0, value_3 text NOT NULL, value_4 timestamp without time zone, value_5 double precision) - ALTER TABLE reference_schema.reference_table_ddl OWNER TO postgres + ALTER TABLE reference_schema.reference_table_ddl OWNER TO (2 rows) -- in reality, we wouldn't need to repair any reference table shard placements @@ -1441,7 +1441,7 @@ SELECT master_get_table_ddl_events('reference_schema.reference_table_ddl'); SELECT placementid AS a_placement_id FROM pg_dist_shard_placement WHERE shardid = :a_shard_id AND nodeport = :worker_1_port \gset SELECT placementid AS b_placement_id FROM pg_dist_shard_placement WHERE shardid = :a_shard_id AND nodeport = :worker_2_port \gset UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE placementid = :a_placement_id; -SELECT master_copy_shard_placement(:a_shard_id, 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT master_copy_shard_placement(:a_shard_id, '', :worker_2_port, '', :worker_1_port); master_copy_shard_placement --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_remove_node_reference_table.out b/src/test/regress/expected/multi_remove_node_reference_table.out index 28fd7f502..6016ea95a 100644 --- a/src/test/regress/expected/multi_remove_node_reference_table.out +++ b/src/test/regress/expected/multi_remove_node_reference_table.out @@ -10,15 +10,15 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1380000; CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; -- make worker 1 receive metadata changes -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -- remove non-existing node -SELECT master_remove_node('localhost', 55555); -ERROR: node at "localhost:xxxxx" does not exist +SELECT master_remove_node('', 55555); +ERROR: node at ":xxxxx" does not exist -- remove a node with no reference tables -- verify node exist before removal SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; @@ -27,7 +27,7 @@ SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; 1 (1 row) -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -41,10 +41,10 @@ SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; (1 row) -- re-add the node for next tests -SELECT master_add_node('localhost', :worker_2_port) AS worker_2_nodeid \gset +SELECT master_add_node('', :worker_2_port) AS worker_2_nodeid \gset SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeid=:worker_2_nodeid \gset -- add a secondary to check we don't attempt to replicate the table to it -SELECT 1 FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary'); +SELECT 1 FROM master_add_node('', 9000, groupid=>:worker_2_group, noderole=>'secondary'); ?column? --------------------------------------------------------------------- 1 @@ -59,7 +59,7 @@ SELECT create_reference_table('remove_node_reference_table'); (1 row) -- make sure when we add a secondary we don't attempt to add placements to it -SELECT 1 FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary'); +SELECT 1 FROM master_add_node('', 9001, groupid=>:worker_2_group, noderole=>'secondary'); ?column? --------------------------------------------------------------------- 1 @@ -72,7 +72,7 @@ SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; (1 row) -- make sure when we disable a secondary we don't remove any placements -SELECT master_disable_node('localhost', 9001); +SELECT master_disable_node('', 9001); master_disable_node --------------------------------------------------------------------- @@ -91,7 +91,7 @@ SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; (1 row) -- make sure when we activate a secondary we don't add any placements -SELECT 1 FROM master_activate_node('localhost', 9001); +SELECT 1 FROM master_activate_node('', 9001); ?column? --------------------------------------------------------------------- 1 @@ -104,7 +104,7 @@ SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; (1 row) -- make sure when we remove a secondary we don't remove any placements -SELECT master_remove_node('localhost', 9001); +SELECT master_remove_node('', 9001); master_remove_node --------------------------------------------------------------------- @@ -131,7 +131,7 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -160,11 +160,11 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) \c - - - :master_port -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -217,32 +217,32 @@ WHERE \c - - - :master_port -- remove same node twice -SELECT master_remove_node('localhost', :worker_2_port); -ERROR: node at "localhost:xxxxx" does not exist +SELECT master_remove_node('', :worker_2_port); +ERROR: node at ":xxxxx" does not exist -- re-add the node for next tests -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "remove_node_reference_table" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 (1 row) -- try to disable the node before removing it (this used to crash) -SELECT master_disable_node('localhost', :worker_2_port); +SELECT master_disable_node('', :worker_2_port); master_disable_node --------------------------------------------------------------------- (1 row) -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) -- re-add the node for the next test -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "remove_node_reference_table" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -264,7 +264,7 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -293,12 +293,12 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) \c - - - :master_port BEGIN; -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -320,7 +320,7 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -349,7 +349,7 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) \c - - - :master_port @@ -369,7 +369,7 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -398,12 +398,12 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) \c - - - :master_port BEGIN; -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -457,8 +457,8 @@ WHERE \c - - - :master_port -- re-add the node for next tests -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "remove_node_reference_table" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -480,7 +480,7 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -509,13 +509,13 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) \c - - - :master_port BEGIN; INSERT INTO remove_node_reference_table VALUES(1); -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -582,8 +582,8 @@ SELECT * FROM remove_node_reference_table; \c - - - :master_port -- re-add the node for next tests -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "remove_node_reference_table" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -605,7 +605,7 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -634,13 +634,13 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) \c - - - :master_port BEGIN; ALTER TABLE remove_node_reference_table ADD column2 int; -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -703,8 +703,8 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_ (2 rows) -- re-add the node for next tests -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "remove_node_reference_table" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -726,7 +726,7 @@ WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380000 | 1 | 0 | localhost | 57638 + 1380000 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -741,7 +741,7 @@ WHERE colocationid IN (1 row) BEGIN; -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -772,7 +772,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid = 1380000; (0 rows) -- re-add the node for next tests -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -812,8 +812,8 @@ ORDER BY shardid; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380001 | 1 | 0 | localhost | 57638 - 1380002 | 1 | 0 | localhost | 57638 + 1380001 | 1 | 0 | | xxxxx + 1380002 | 1 | 0 | | xxxxx (2 rows) SELECT * @@ -844,12 +844,12 @@ ORDER BY shardid; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380001 | 1 | 0 | localhost | 57638 - 1380002 | 1 | 0 | localhost | 57638 + 1380001 | 1 | 0 | | xxxxx + 1380002 | 1 | 0 | | xxxxx (2 rows) \c - - - :master_port -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -902,9 +902,9 @@ WHERE \c - - - :master_port -- re-add the node for next tests -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx -NOTICE: Replicating reference table "table1" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "remove_node_reference_table" to the node :xxxxx +NOTICE: Replicating reference table "table1" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -928,8 +928,8 @@ ORDER BY shardid; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380001 | 1 | 0 | localhost | 57638 - 1380002 | 1 | 0 | localhost | 57638 + 1380001 | 1 | 0 | | xxxxx + 1380002 | 1 | 0 | | xxxxx (2 rows) SELECT * @@ -959,12 +959,12 @@ WHERE ORDER BY shardid ASC; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1380001 | 1 | 0 | localhost | 57638 - 1380002 | 1 | 0 | localhost | 57638 + 1380001 | 1 | 0 | | xxxxx + 1380002 | 1 | 0 | | xxxxx (2 rows) \c - - - :master_port -SELECT master_disable_node('localhost', :worker_2_port); +SELECT master_disable_node('', :worker_2_port); master_disable_node --------------------------------------------------------------------- @@ -1017,9 +1017,9 @@ WHERE \c - - - :master_port -- re-add the node for next tests -SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx -NOTICE: Replicating reference table "table1" to the node localhost:xxxxx +SELECT 1 FROM master_activate_node('', :worker_2_port); +NOTICE: Replicating reference table "remove_node_reference_table" to the node :xxxxx +NOTICE: Replicating reference table "table1" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -1029,7 +1029,7 @@ NOTICE: Replicating reference table "table1" to the node localhost:xxxxx DROP TABLE remove_node_reference_table; DROP TABLE remove_node_reference_table_schema.table1; DROP SCHEMA remove_node_reference_table_schema CASCADE; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_repair_shards.out b/src/test/regress/expected/multi_repair_shards.out index 710d5a6f1..80673edca 100644 --- a/src/test/regress/expected/multi_repair_shards.out +++ b/src/test/regress/expected/multi_repair_shards.out @@ -39,17 +39,17 @@ UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid -- cannot repair a shard after a modification (transaction still open during repair) BEGIN; ALTER TABLE customer_engagements ADD COLUMN value float; -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:newshardid, '', :worker_1_port, '', :worker_2_port); ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; BEGIN; INSERT INTO customer_engagements VALUES (4, '04-01-2015', 'fourth event'); -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:newshardid, '', :worker_1_port, '', :worker_2_port); ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; -- modifications after reparing a shard are fine (will use new metadata) BEGIN; -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:newshardid, '', :worker_1_port, '', :worker_2_port); master_copy_shard_placement --------------------------------------------------------------------- @@ -58,7 +58,7 @@ SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'lo ALTER TABLE customer_engagements ADD COLUMN value float; ROLLBACK; BEGIN; -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:newshardid, '', :worker_1_port, '', :worker_2_port); master_copy_shard_placement --------------------------------------------------------------------- @@ -68,14 +68,14 @@ INSERT INTO customer_engagements VALUES (4, '04-01-2015', 'fourth event'); ROLLBACK; -- deactivate placement UPDATE pg_dist_placement SET shardstate = 1 WHERE groupid = :worker_2_group and shardid = :newshardid; -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:newshardid, '', :worker_1_port, '', :worker_2_port); ERROR: target placement must be in inactive state UPDATE pg_dist_placement SET shardstate = 3 WHERE groupid = :worker_2_group and shardid = :newshardid; -- also try to copy from an inactive placement -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT master_copy_shard_placement(:newshardid, '', :worker_2_port, '', :worker_1_port); ERROR: source placement must be in active state -- "copy" this shard from the first placement to the second one -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:newshardid, '', :worker_1_port, '', :worker_2_port); master_copy_shard_placement --------------------------------------------------------------------- @@ -115,6 +115,6 @@ SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remo -- now, update the second placement as unhealthy UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :remotenewshardid AND groupid = :worker_2_group; -- oops! we don't support repairing shards backed by foreign tables -SELECT master_copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:remotenewshardid, '', :worker_1_port, '', :worker_2_port); ERROR: cannot repair shard DETAIL: Table remote_engagements is a foreign table. Repairing shards backed by foreign tables is not supported. diff --git a/src/test/regress/expected/multi_repartition_join_planning.out b/src/test/regress/expected/multi_repartition_join_planning.out index 3f3340469..cde9543d3 100644 --- a/src/test/regress/expected/multi_repartition_join_planning.out +++ b/src/test/regress/expected/multi_repartition_join_planning.out @@ -71,8 +71,8 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290002 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290003 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: generated sql query for task 2 @@ -83,8 +83,8 @@ DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] @@ -103,9 +103,9 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 11 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: completed cleanup query for job 3 DEBUG: completed cleanup query for job 3 DEBUG: completed cleanup query for job 2 @@ -161,14 +161,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -205,10 +205,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: completed cleanup query for job 6 DEBUG: completed cleanup query for job 6 DEBUG: completed cleanup query for job 4 @@ -235,14 +235,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -279,10 +279,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: completed cleanup query for job 9 DEBUG: completed cleanup query for job 9 DEBUG: completed cleanup query for job 7 @@ -311,14 +311,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -355,10 +355,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: completed cleanup query for job 12 DEBUG: completed cleanup query for job 12 DEBUG: completed cleanup query for job 10 @@ -385,14 +385,14 @@ DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -429,10 +429,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: completed cleanup query for job 15 DEBUG: completed cleanup query for job 15 DEBUG: completed cleanup query for job 13 @@ -461,10 +461,10 @@ DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690006 stock WHERE true" DEBUG: generated sql query for task 4 DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690007 stock WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: generated sql query for task 1 DETAIL: query string: "SELECT ol_i_id FROM order_line_690000 order_line WHERE true" DEBUG: generated sql query for task 2 @@ -473,10 +473,10 @@ DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT ol_i_id FROM order_line_690002 order_line WHERE true" DEBUG: generated sql query for task 4 DETAIL: query string: "SELECT ol_i_id FROM order_line_690003 order_line WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -513,10 +513,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 20 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: completed cleanup query for job 18 DEBUG: completed cleanup query for job 18 DEBUG: completed cleanup query for job 16 diff --git a/src/test/regress/expected/multi_repartition_join_task_assignment.out b/src/test/regress/expected/multi_repartition_join_task_assignment.out index d3146999d..6a42113e6 100644 --- a/src/test/regress/expected/multi_repartition_join_task_assignment.out +++ b/src/test/regress/expected/multi_repartition_join_task_assignment.out @@ -18,8 +18,8 @@ FROM WHERE o_custkey = c_custkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] @@ -32,9 +32,9 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 9 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx count --------------------------------------------------------------------- 2985 @@ -52,17 +52,17 @@ WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 4 DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 8 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx count --------------------------------------------------------------------- 12000 @@ -77,11 +77,11 @@ FROM WHERE l_partkey = c_nationkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -110,10 +110,10 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 16 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx count --------------------------------------------------------------------- 125 diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out index 714e8353e..e6043ac33 100644 --- a/src/test/regress/expected/multi_replicate_reference_table.out +++ b/src/test/regress/expected/multi_replicate_reference_table.out @@ -9,7 +9,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000; -- remove a node for testing purposes CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -23,7 +23,7 @@ SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; 0 (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -48,7 +48,7 @@ WHERE (0 rows) -- test adding new node with a reference table which does not have any healthy placement -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -69,7 +69,7 @@ SELECT create_reference_table('replicate_reference_table_unhealthy'); (1 row) UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1370000; -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ERROR: could not find any healthy placement for shard xxxxx -- verify node is not added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; @@ -121,8 +121,8 @@ WHERE colocationid IN 10004 | 1 | -1 | 0 | 0 (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_valid" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "replicate_reference_table_valid" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -138,7 +138,7 @@ WHERE ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1370001 | 1 | 0 | localhost | 57638 + 1370001 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -163,7 +163,7 @@ WHERE ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1370001 | 1 | 0 | localhost | 57638 + 1370001 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -177,7 +177,7 @@ WHERE colocationid IN 10004 | 1 | -1 | 0 | 0 (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -193,7 +193,7 @@ WHERE ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1370001 | 1 | 0 | localhost | 57638 + 1370001 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -209,7 +209,7 @@ WHERE colocationid IN DROP TABLE replicate_reference_table_valid; -- test replicating a reference table when a new node added in TRANSACTION + ROLLBACK -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -246,8 +246,8 @@ WHERE colocationid IN (1 row) BEGIN; -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_rollback" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "replicate_reference_table_rollback" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -310,8 +310,8 @@ WHERE colocationid IN (1 row) BEGIN; -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_commit" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "replicate_reference_table_commit" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -328,7 +328,7 @@ WHERE ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1370003 | 1 | 0 | localhost | 57638 + 1370003 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -344,7 +344,7 @@ WHERE colocationid IN DROP TABLE replicate_reference_table_commit; -- test adding new node + upgrading another hash distributed table to reference table + creating new reference table in TRANSACTION -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -408,7 +408,7 @@ ORDER BY logicalrelid; BEGIN; SET LOCAL client_min_messages TO ERROR; -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -437,9 +437,9 @@ WHERE ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1370004 | 1 | 0 | localhost | 57638 - 1370005 | 1 | 0 | localhost | 57638 - 1370006 | 1 | 0 | localhost | 57638 + 1370004 | 1 | 0 | | xxxxx + 1370005 | 1 | 0 | | xxxxx + 1370006 | 1 | 0 | | xxxxx (3 rows) SELECT * @@ -472,7 +472,7 @@ DROP TABLE replicate_reference_table_reference_one; DROP TABLE replicate_reference_table_hash; DROP TABLE replicate_reference_table_reference_two; -- test inserting a value then adding a new node in a transaction -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -487,7 +487,7 @@ SELECT create_reference_table('replicate_reference_table_insert'); BEGIN; INSERT INTO replicate_reference_table_insert VALUES(1); -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; DROP TABLE replicate_reference_table_insert; @@ -501,7 +501,7 @@ SELECT create_reference_table('replicate_reference_table_copy'); BEGIN; COPY replicate_reference_table_copy FROM STDIN; -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; DROP TABLE replicate_reference_table_copy; @@ -515,7 +515,7 @@ SELECT create_reference_table('replicate_reference_table_ddl'); BEGIN; ALTER TABLE replicate_reference_table_ddl ADD column2 int; -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; DROP TABLE replicate_reference_table_ddl; @@ -551,8 +551,8 @@ WHERE colocationid IN (1 row) BEGIN; -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "replicate_reference_table_drop" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "replicate_reference_table_drop" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -578,7 +578,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid = 1370009; (0 rows) -- test adding a node while there is a reference table at another schema -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -615,8 +615,8 @@ WHERE colocationid IN 10004 | 1 | -1 | 0 | 0 (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "table1" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "table1" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -632,7 +632,7 @@ WHERE ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1370011 | 1 | 0 | localhost | 57638 + 1370011 | 1 | 0 | | xxxxx (1 row) SELECT * @@ -649,7 +649,7 @@ WHERE colocationid IN DROP TABLE replicate_reference_table_schema.table1; DROP SCHEMA replicate_reference_table_schema CASCADE; -- test adding a node when there are foreign keys between reference tables -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -678,10 +678,10 @@ ORDER BY shardid, nodeport; --------------------------------------------------------------------- (0 rows) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table_1" to the node localhost:xxxxx -NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx -NOTICE: Replicating reference table "ref_table_3" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "ref_table_1" to the node :xxxxx +NOTICE: Replicating reference table "ref_table_2" to the node :xxxxx +NOTICE: Replicating reference table "ref_table_3" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -697,22 +697,22 @@ WHERE ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1370012 | 1 | 0 | localhost | 57638 - 1370013 | 1 | 0 | localhost | 57638 - 1370014 | 1 | 0 | localhost | 57638 + 1370012 | 1 | 0 | | xxxxx + 1370013 | 1 | 0 | | xxxxx + 1370014 | 1 | 0 | | xxxxx (3 rows) -- verify constraints have been created on the new node SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname like ''ref_table%'';'); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,2) - (localhost,57638,t,2) + (,xxxxx,t,2) + (,xxxxx,t,2) (2 rows) DROP TABLE ref_table_1, ref_table_2, ref_table_3; -- do some tests with inactive node -SELECT master_remove_node('localhost', :worker_2_port); +SELECT master_remove_node('', :worker_2_port); master_remove_node --------------------------------------------------------------------- @@ -725,7 +725,7 @@ SELECT create_reference_table('initially_not_replicated_reference_table'); (1 row) -SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_inactive_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -747,12 +747,12 @@ WHERE ORDER BY 1,4,5; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1370015 | 1 | 0 | localhost | 57637 + 1370015 | 1 | 0 | | xxxxx (1 row) -- we should see the two shard placements after activation -SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "initially_not_replicated_reference_table" to the node localhost:xxxxx +SELECT 1 FROM master_activate_node('', :worker_2_port); +NOTICE: Replicating reference table "initially_not_replicated_reference_table" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -773,12 +773,12 @@ WHERE ORDER BY 1,4,5; shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- - 1370015 | 1 | 0 | localhost | 57637 - 1370015 | 1 | 0 | localhost | 57638 + 1370015 | 1 | 0 | | xxxxx + 1370015 | 1 | 0 | | xxxxx (2 rows) -- this should have no effect -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index b982e583e..86b1b6d98 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -1732,7 +1732,7 @@ SELECT * FROM articles_range where author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable NOTICE: issuing SELECT id, author_id, title, word_count FROM public.articles_range_840012 articles_range WHERE (author_id OPERATOR(pg_catalog.=) 1) -DETAIL: on server postgres@localhost:xxxxx connectionId: 2 +DETAIL: on server @:xxxxx connectionId: 2 id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1741,7 +1741,7 @@ SELECT * FROM articles_range where author_id = 1 or author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable NOTICE: issuing SELECT id, author_id, title, word_count FROM public.articles_range_840012 articles_range WHERE ((author_id OPERATOR(pg_catalog.=) 1) OR (author_id OPERATOR(pg_catalog.=) 5)) -DETAIL: on server postgres@localhost:xxxxx connectionId: 2 +DETAIL: on server @:xxxxx connectionId: 2 id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1761,7 +1761,7 @@ SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) DEBUG: Creating router plan DEBUG: Plan is router executable NOTICE: issuing SELECT ar.id, ar.author_id, ar.title, ar.word_count, au.name, au.id FROM (public.articles_range_840012 ar JOIN public.authors_range_840008 au ON ((ar.author_id OPERATOR(pg_catalog.=) au.id))) WHERE (ar.author_id OPERATOR(pg_catalog.=) 1) -DETAIL: on server postgres@localhost:xxxxx connectionId: 2 +DETAIL: on server @:xxxxx connectionId: 2 id | author_id | title | word_count | name | id --------------------------------------------------------------------- (0 rows) @@ -2411,7 +2411,7 @@ GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; -- we will fail to connect to worker 2, since the user does not exist BEGIN; INSERT INTO failure_test VALUES (1, 1); -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: FATAL: role "router_user" does not exist SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( @@ -2421,15 +2421,15 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement ORDER BY placementid; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 840017 | 1 | localhost | 57637 - 840017 | 3 | localhost | 57638 - 840018 | 1 | localhost | 57638 - 840018 | 1 | localhost | 57637 + 840017 | 1 | | xxxxx + 840017 | 3 | | xxxxx + 840018 | 1 | | xxxxx + 840018 | 1 | | xxxxx (4 rows) ROLLBACK; INSERT INTO failure_test VALUES (2, 1); -WARNING: connection error: localhost:xxxxx +WARNING: connection error: :xxxxx DETAIL: FATAL: role "router_user" does not exist SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( @@ -2439,13 +2439,13 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement ORDER BY placementid; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 840017 | 1 | localhost | 57637 - 840017 | 1 | localhost | 57638 - 840018 | 3 | localhost | 57638 - 840018 | 1 | localhost | 57637 + 840017 | 1 | | xxxxx + 840017 | 1 | | xxxxx + 840018 | 3 | | xxxxx + 840018 | 1 | | xxxxx (4 rows) -\c - postgres - :worker_1_port +\c - - :worker_1_port DROP OWNED BY router_user; DROP USER router_user; \c - - - :master_port diff --git a/src/test/regress/expected/multi_row_insert.out b/src/test/regress/expected/multi_row_insert.out index 1d976987a..3d3fdf9a3 100644 --- a/src/test/regress/expected/multi_row_insert.out +++ b/src/test/regress/expected/multi_row_insert.out @@ -21,8 +21,8 @@ SELECT * FROM pg_dist_shard WHERE logicalrelid='source_table_xyz'::regclass::oid SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement WHERE EXISTS(SELECT shardid FROM pg_dist_shard WHERE shardid=pg_dist_shard_placement.shardid AND logicalrelid='source_table_xyz'::regclass::oid); shardid | nodename | nodeport --------------------------------------------------------------------- - 4213581 | localhost | 57637 - 4213582 | localhost | 57638 + 4213581 | | xxxxx + 4213582 | | xxxxx (2 rows) INSERT INTO source_table_xyz VALUES ((0, 'a'), 1, (0, 'a')), diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index dae406c50..8bd232e6d 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -32,7 +32,7 @@ SELECT master_create_empty_shard('test_schema_support.nation_append'); (1 row) -- append table to shard -SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); +SELECT master_append_table_to_shard(1190000, 'public.nation_local', '', :master_port); master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 @@ -63,7 +63,7 @@ SELECT master_create_empty_shard('test_schema_support."nation._''append"'); 1190001 (1 row) -SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); +SELECT master_append_table_to_shard(1190001, 'nation_local', '', :master_port); master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 @@ -78,7 +78,7 @@ SELECT COUNT(*) FROM test_schema_support."nation._'append"; -- test master_append_table_to_shard with schema with search_path is set SET search_path TO test_schema_support; -SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); +SELECT master_append_table_to_shard(1190000, 'public.nation_local', '', :master_port); master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 @@ -92,7 +92,7 @@ SELECT COUNT(*) FROM nation_append; (1 row) -- test with search_path is set and shard name contains special characters -SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); +SELECT master_append_table_to_shard(1190001, 'nation_local', '', :master_port); master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 @@ -445,7 +445,7 @@ CREATE TABLE test_schema_support.nation_hash_collation( SELECT master_get_table_ddl_events('test_schema_support.nation_hash_collation') ORDER BY 1; master_get_table_ddl_events --------------------------------------------------------------------- - ALTER TABLE test_schema_support.nation_hash_collation OWNER TO postgres + ALTER TABLE test_schema_support.nation_hash_collation OWNER TO CREATE TABLE test_schema_support.nation_hash_collation (n_nationkey integer NOT NULL, n_name character(25) NOT NULL COLLATE test_schema_support.english, n_regionkey integer NOT NULL, n_comment character varying(152)) (2 rows) @@ -727,7 +727,7 @@ DROP INDEX index1; SET search_path TO public; -- mark shard as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; -SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT master_copy_shard_placement(1190000, '', :worker_2_port, '', :worker_1_port); master_copy_shard_placement --------------------------------------------------------------------- @@ -737,15 +737,15 @@ SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localh SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; shardstate | nodename | nodeport --------------------------------------------------------------------- - 1 | localhost | 57637 - 1 | localhost | 57638 + 1 | | xxxxx + 1 | | xxxxx (2 rows) --test with search_path is set SET search_path TO test_schema_support; -- mark shard as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; -SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT master_copy_shard_placement(1190000, '', :worker_2_port, '', :worker_1_port); master_copy_shard_placement --------------------------------------------------------------------- @@ -755,8 +755,8 @@ SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localh SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; shardstate | nodename | nodeport --------------------------------------------------------------------- - 1 | localhost | 57637 - 1 | localhost | 57638 + 1 | | xxxxx + 1 | | xxxxx (2 rows) -- test master_apply_delete_command with schemas @@ -1239,7 +1239,7 @@ PL/pgSQL function run_command_on_coordinator_and_workers(text) line 3 at EXECUTE (1 row) -SELECT run_command_on_coordinator_and_workers('GRANT ALL ON DATABASE postgres to "test-user"'); +SELECT run_command_on_coordinator_and_workers('GRANT ALL ON DATABASE to "test-user"'); run_command_on_coordinator_and_workers --------------------------------------------------------------------- @@ -1269,8 +1269,8 @@ NOTICE: drop cascades to table schema_with_user.test_table SELECT run_command_on_workers('DROP OWNED BY "test-user" CASCADE'); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP OWNED") - (localhost,57638,t,"DROP OWNED") + (,xxxxx,t,"DROP OWNED") + (,xxxxx,t,"DROP OWNED") (2 rows) SELECT run_command_on_coordinator_and_workers('DROP USER "test-user"'); diff --git a/src/test/regress/expected/multi_select_distinct.out b/src/test/regress/expected/multi_select_distinct.out index 433c420dc..97eea2c31 100644 --- a/src/test/regress/expected/multi_select_distinct.out +++ b/src/test/regress/expected/multi_select_distinct.out @@ -217,7 +217,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_orderkey Filter: (count(*) > 5) @@ -245,7 +245,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_orderkey Filter: (count(*) > 5) @@ -288,7 +288,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -317,7 +317,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -365,7 +365,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -396,7 +396,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -445,7 +445,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -476,7 +476,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -523,7 +523,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -552,7 +552,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -599,7 +599,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -630,7 +630,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -677,7 +677,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -708,7 +708,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey, l_linenumber -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -755,7 +755,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Unique -> Group @@ -786,7 +786,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Unique -> Group @@ -848,7 +848,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> GroupAggregate Group Key: l_orderkey -> Sort @@ -875,7 +875,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> GroupAggregate Group Key: l_orderkey -> Sort @@ -916,7 +916,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -944,7 +944,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_suppkey -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -969,7 +969,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> GroupAggregate Group Key: l_orderkey -> Sort @@ -997,7 +997,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> GroupAggregate Group Key: l_orderkey -> Sort @@ -1046,7 +1046,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: l_partkey -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part @@ -1117,7 +1117,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Unique -> Sort Sort Key: l_partkey, l_suppkey @@ -1158,7 +1158,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Unique -> Sort Sort Key: l_orderkey @@ -1213,7 +1213,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Unique -> Sort @@ -1413,7 +1413,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: l_orderkey, l_partkey @@ -1464,7 +1464,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: lineitem_hash_part.l_orderkey, (count(*)) @@ -1518,7 +1518,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Unique -> Sort @@ -1570,7 +1570,7 @@ EXPLAIN (COSTS FALSE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Unique -> Sort diff --git a/src/test/regress/expected/multi_shard_update_delete.out b/src/test/regress/expected/multi_shard_update_delete.out index f00fe3570..ea5887549 100644 --- a/src/test/regress/expected/multi_shard_update_delete.out +++ b/src/test/regress/expected/multi_shard_update_delete.out @@ -185,7 +185,7 @@ SELECT master_create_empty_shard('test_append_table'); 1440010 (1 row) -SELECT * FROM master_append_table_to_shard(1440010, 'append_stage_table', 'localhost', :master_port); +SELECT * FROM master_append_table_to_shard(1440010, 'append_stage_table', '', :master_port); master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 @@ -197,7 +197,7 @@ SELECT master_create_empty_shard('test_append_table') AS new_shard_id; 1440011 (1 row) -SELECT * FROM master_append_table_to_shard(1440011, 'append_stage_table_2', 'localhost', :master_port); +SELECT * FROM master_append_table_to_shard(1440011, 'append_stage_table_2', '', :master_port); master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 @@ -613,7 +613,7 @@ UPDATE users_test_table as utt SET value_1 = 3 WHERE value_2 > (SELECT value_3 FROM events_test_table as ett WHERE utt.user_id = ett.user_id); ERROR: more than one row returned by a subquery used as an expression -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- We can not pushdown a query if the target relation is reference table UPDATE users_reference_copy_table SET value_2 = 5 diff --git a/src/test/regress/expected/multi_sql_function.out b/src/test/regress/expected/multi_sql_function.out index c762e827d..ca757a94c 100644 --- a/src/test/regress/expected/multi_sql_function.out +++ b/src/test/regress/expected/multi_sql_function.out @@ -327,7 +327,7 @@ ERROR: could not create distributed plan DETAIL: Possibly this is caused by the use of parameters in SQL functions, which is not supported in Citus. HINT: Consider using PL/pgSQL functions instead. CONTEXT: SQL function "test_parameterized_sql_function_in_subquery_where" statement 1 --- postgres behaves slightly differently for the following +-- behaves slightly differently for the following -- query where the target list is empty SELECT test_parameterized_sql_function(1); test_parameterized_sql_function @@ -354,7 +354,7 @@ $$ LANGUAGE SQL; SELECT insert_twice(); ERROR: duplicate key value violates unique constraint "table_with_unique_constraint_a_key_1230009" DETAIL: Key (a)=(4) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SQL function "insert_twice" statement 2 SELECT * FROM table_with_unique_constraint ORDER BY a; a diff --git a/src/test/regress/expected/multi_subquery_behavioral_analytics.out b/src/test/regress/expected/multi_subquery_behavioral_analytics.out index 3a4b779d1..deda5829f 100644 --- a/src/test/regress/expected/multi_subquery_behavioral_analytics.out +++ b/src/test/regress/expected/multi_subquery_behavioral_analytics.out @@ -1573,8 +1573,8 @@ SELECT * FROM run_command_on_workers('CREATE OR REPLACE FUNCTION array_index(AN ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | CREATE FUNCTION - localhost | 57638 | t | CREATE FUNCTION + | xxxxx | t | CREATE FUNCTION + | xxxxx | t | CREATE FUNCTION (2 rows) CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) @@ -1747,8 +1747,8 @@ SELECT * FROM run_command_on_workers('DROP FUNCTION array_index(ANYARRAY, ANYELE ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | DROP FUNCTION - localhost | 57638 | t | DROP FUNCTION + | xxxxx | t | DROP FUNCTION + | xxxxx | t | DROP FUNCTION (2 rows) DROP FUNCTION array_index(ANYARRAY, ANYELEMENT); @@ -1994,8 +1994,8 @@ CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool $f$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") - (localhost,57638,t,"CREATE FUNCTION") + (,xxxxx,t,"CREATE FUNCTION") + (,xxxxx,t,"CREATE FUNCTION") (2 rows) -- we don't support joins via functions @@ -2288,8 +2288,8 @@ SELECT run_command_on_workers($f$ $f$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"DROP FUNCTION") - (localhost,57638,t,"DROP FUNCTION") + (,xxxxx,t,"DROP FUNCTION") + (,xxxxx,t,"DROP FUNCTION") (2 rows) SET citus.enable_router_execution TO TRUE; diff --git a/src/test/regress/expected/multi_subquery_window_functions.out b/src/test/regress/expected/multi_subquery_window_functions.out index aabfb9068..9309ef170 100644 --- a/src/test/regress/expected/multi_subquery_window_functions.out +++ b/src/test/regress/expected/multi_subquery_window_functions.out @@ -721,7 +721,7 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE) Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?)))) -> Sort diff --git a/src/test/regress/expected/multi_table_ddl.out b/src/test/regress/expected/multi_table_ddl.out index 826b7aad1..740e83981 100644 --- a/src/test/regress/expected/multi_table_ddl.out +++ b/src/test/regress/expected/multi_table_ddl.out @@ -76,13 +76,13 @@ SELECT * FROM pg_dist_shard_placement; DROP EXTENSION citus; CREATE EXTENSION citus; -- re-add the nodes to the cluster -SELECT 1 FROM master_add_node('localhost', :worker_1_port); +SELECT 1 FROM master_add_node('', :worker_1_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT 1 FROM master_add_node('localhost', :worker_2_port); +SELECT 1 FROM master_add_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/expected/multi_task_assignment_policy.out b/src/test/regress/expected/multi_task_assignment_policy.out index e44d8ee9a..0659852ce 100644 --- a/src/test/regress/expected/multi_task_assignment_policy.out +++ b/src/test/regress/expected/multi_task_assignment_policy.out @@ -70,9 +70,9 @@ SET client_min_messages TO DEBUG3; SET citus.task_assignment_policy TO 'greedy'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -82,9 +82,9 @@ DEBUG: assigned task to node localhost:xxxxx EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -96,9 +96,9 @@ DEBUG: assigned task to node localhost:xxxxx SET citus.task_assignment_policy TO 'first-replica'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -108,9 +108,9 @@ DEBUG: assigned task to node localhost:xxxxx EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx +DEBUG: assigned task to node :xxxxx QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index c901beb5f..ad8532ae9 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -94,7 +94,7 @@ SELECT pg_reload_conf(); (1 row) -- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator -CREATE OR REPLACE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636) +CREATE OR REPLACE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT xxxxx) RETURNS BOOLEAN LANGUAGE sql AS $$ @@ -104,7 +104,7 @@ WITH dist_node_summary AS ( ), dist_node_check AS ( SELECT count(distinct result) = 1 AS matches FROM dist_node_summary CROSS JOIN LATERAL - master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port], + master_run_on_worker(ARRAY[hostname, ''], ARRAY[port, master_port], ARRAY[dist_node_summary.query, dist_node_summary.query], false) ), dist_placement_summary AS ( @@ -112,7 +112,7 @@ WITH dist_node_summary AS ( ), dist_placement_check AS ( SELECT count(distinct result) = 1 AS matches FROM dist_placement_summary CROSS JOIN LATERAL - master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port], + master_run_on_worker(ARRAY[hostname, ''], ARRAY[port, master_port], ARRAY[dist_placement_summary.query, dist_placement_summary.query], false) ) diff --git a/src/test/regress/expected/multi_transaction_recovery.out b/src/test/regress/expected/multi_transaction_recovery.out index 7c5cef8e4..9a816f418 100644 --- a/src/test/regress/expected/multi_transaction_recovery.out +++ b/src/test/regress/expected/multi_transaction_recovery.out @@ -4,7 +4,7 @@ SET citus.next_shard_id TO 1220000; -- verify we recover transactions which do DML on coordinator placements -- properly. SET client_min_messages TO ERROR; -SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); +SELECT 1 FROM master_add_node('', :master_port, groupid => 0); ?column? --------------------------------------------------------------------- 1 @@ -362,7 +362,7 @@ SELECT pg_reload_conf(); DROP TABLE test_recovery_ref; DROP TABLE test_recovery; DROP TABLE test_recovery_single; -SELECT 1 FROM master_remove_node('localhost', :master_port); +SELECT 1 FROM master_remove_node('', :master_port); ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/expected/multi_transactional_drop_shards.out b/src/test/regress/expected/multi_transactional_drop_shards.out index 31094b237..2463f68ec 100644 --- a/src/test/regress/expected/multi_transactional_drop_shards.out +++ b/src/test/regress/expected/multi_transactional_drop_shards.out @@ -35,14 +35,14 @@ ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1410000 | 1 | localhost | 57637 - 1410000 | 1 | localhost | 57638 - 1410001 | 1 | localhost | 57637 - 1410001 | 1 | localhost | 57638 - 1410002 | 1 | localhost | 57637 - 1410002 | 1 | localhost | 57638 - 1410003 | 1 | localhost | 57637 - 1410003 | 1 | localhost | 57638 + 1410000 | 1 | | xxxxx + 1410000 | 1 | | xxxxx + 1410001 | 1 | | xxxxx + 1410001 | 1 | | xxxxx + 1410002 | 1 | | xxxxx + 1410002 | 1 | | xxxxx + 1410003 | 1 | | xxxxx + 1410003 | 1 | | xxxxx (8 rows) -- verify table is not dropped @@ -50,7 +50,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_shards | table | postgres + public | transactional_drop_shards | table | (1 row) -- verify shards are not dropped @@ -59,10 +59,10 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_shards_1410000 | table | postgres - public | transactional_drop_shards_1410001 | table | postgres - public | transactional_drop_shards_1410002 | table | postgres - public | transactional_drop_shards_1410003 | table | postgres + public | transactional_drop_shards_1410000 | table | + public | transactional_drop_shards_1410001 | table | + public | transactional_drop_shards_1410002 | table | + public | transactional_drop_shards_1410003 | table | (4 rows) \c - - - :master_port @@ -143,8 +143,8 @@ ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1410004 | 1 | localhost | 57637 - 1410004 | 1 | localhost | 57638 + 1410004 | 1 | | xxxxx + 1410004 | 1 | | xxxxx (2 rows) -- verify shards are not dropped @@ -153,7 +153,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_shards_1410004 | table | postgres + public | transactional_drop_shards_1410004 | table | (1 row) \c - - - :master_port @@ -221,8 +221,8 @@ ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1410005 | 1 | localhost | 57637 - 1410005 | 1 | localhost | 57638 + 1410005 | 1 | | xxxxx + 1410005 | 1 | | xxxxx (2 rows) -- verify table is not dropped @@ -230,7 +230,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_shards | table | postgres + public | transactional_drop_shards | table | (1 row) -- verify shards are not dropped @@ -239,7 +239,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_shards_1410005 | table | postgres + public | transactional_drop_shards_1410005 | table | (1 row) \c - - - :master_port @@ -270,8 +270,8 @@ ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1410005 | 1 | localhost | 57637 - 1410005 | 1 | localhost | 57638 + 1410005 | 1 | | xxxxx + 1410005 | 1 | | xxxxx (2 rows) -- verify shards are not dropped @@ -280,7 +280,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_shards_1410005 | table | postgres + public | transactional_drop_shards_1410005 | table | (1 row) -- test DROP table with failing worker @@ -312,8 +312,8 @@ ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1410005 | 1 | localhost | 57637 - 1410005 | 1 | localhost | 57638 + 1410005 | 1 | | xxxxx + 1410005 | 1 | | xxxxx (2 rows) -- verify table is not dropped @@ -321,7 +321,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_shards | table | postgres + public | transactional_drop_shards | table | (1 row) -- verify shards are not dropped @@ -330,7 +330,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_shards_1410005 | table | postgres + public | transactional_drop_shards_1410005 | table | (1 row) \c - - - :master_port @@ -363,8 +363,8 @@ ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1410006 | 1 | localhost | 57637 - 1410006 | 1 | localhost | 57638 + 1410006 | 1 | | xxxxx + 1410006 | 1 | | xxxxx (2 rows) -- verify table is not dropped @@ -372,7 +372,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_reference | table | postgres + public | transactional_drop_reference | table | (1 row) -- verify shards are not dropped @@ -381,7 +381,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_reference_1410006 | table | postgres + public | transactional_drop_reference_1410006 | table | (1 row) \c - - - :master_port @@ -407,8 +407,8 @@ ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1410005 | 1 | localhost | 57637 - 1410005 | 1 | localhost | 57638 + 1410005 | 1 | | xxxxx + 1410005 | 1 | | xxxxx (2 rows) -- verify shards are not dropped @@ -417,7 +417,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_shards_1410005 | table | postgres + public | transactional_drop_shards_1410005 | table | (1 row) DROP EVENT TRIGGER fail_drop_table; @@ -459,22 +459,22 @@ ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1410007 | 1 | localhost | 57637 - 1410007 | 1 | localhost | 57638 - 1410008 | 1 | localhost | 57637 - 1410008 | 1 | localhost | 57638 - 1410009 | 1 | localhost | 57637 - 1410009 | 1 | localhost | 57638 - 1410010 | 1 | localhost | 57637 - 1410010 | 1 | localhost | 57638 - 1410011 | 1 | localhost | 57637 - 1410011 | 1 | localhost | 57638 - 1410012 | 1 | localhost | 57637 - 1410012 | 1 | localhost | 57638 - 1410013 | 1 | localhost | 57637 - 1410013 | 1 | localhost | 57638 - 1410014 | 1 | localhost | 57637 - 1410014 | 1 | localhost | 57638 + 1410007 | 1 | | xxxxx + 1410007 | 1 | | xxxxx + 1410008 | 1 | | xxxxx + 1410008 | 1 | | xxxxx + 1410009 | 1 | | xxxxx + 1410009 | 1 | | xxxxx + 1410010 | 1 | | xxxxx + 1410010 | 1 | | xxxxx + 1410011 | 1 | | xxxxx + 1410011 | 1 | | xxxxx + 1410012 | 1 | | xxxxx + 1410012 | 1 | | xxxxx + 1410013 | 1 | | xxxxx + 1410013 | 1 | | xxxxx + 1410014 | 1 | | xxxxx + 1410014 | 1 | | xxxxx (16 rows) -- verify table is not dropped @@ -482,7 +482,7 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_serial | table | postgres + public | transactional_drop_serial | table | (1 row) -- verify shards and sequence are not dropped @@ -491,14 +491,14 @@ ORDER BY List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | transactional_drop_serial_1410007 | table | postgres - public | transactional_drop_serial_1410008 | table | postgres - public | transactional_drop_serial_1410009 | table | postgres - public | transactional_drop_serial_1410010 | table | postgres - public | transactional_drop_serial_1410011 | table | postgres - public | transactional_drop_serial_1410012 | table | postgres - public | transactional_drop_serial_1410013 | table | postgres - public | transactional_drop_serial_1410014 | table | postgres + public | transactional_drop_serial_1410007 | table | + public | transactional_drop_serial_1410008 | table | + public | transactional_drop_serial_1410009 | table | + public | transactional_drop_serial_1410010 | table | + public | transactional_drop_serial_1410011 | table | + public | transactional_drop_serial_1410012 | table | + public | transactional_drop_serial_1410013 | table | + public | transactional_drop_serial_1410014 | table | (8 rows) \ds transactional_drop_serial_column2_seq @@ -564,7 +564,7 @@ SELECT create_distributed_table('transactional_drop_mx', 'column1'); UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='transactional_drop_mx'::regclass; -- make worker 1 receive metadata changes -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -591,10 +591,10 @@ ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1410015 | 1 | localhost | 57637 - 1410016 | 1 | localhost | 57638 - 1410017 | 1 | localhost | 57637 - 1410018 | 1 | localhost | 57638 + 1410015 | 1 | | xxxxx + 1410016 | 1 | | xxxxx + 1410017 | 1 | | xxxxx + 1410018 | 1 | | xxxxx (4 rows) \c - - - :master_port @@ -622,10 +622,10 @@ ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- - 1410015 | 1 | localhost | 57637 - 1410016 | 1 | localhost | 57638 - 1410017 | 1 | localhost | 57637 - 1410018 | 1 | localhost | 57638 + 1410015 | 1 | | xxxxx + 1410016 | 1 | | xxxxx + 1410017 | 1 | | xxxxx + 1410018 | 1 | | xxxxx (4 rows) -- test with MX, DROP TABLE, then COMMIT @@ -654,8 +654,8 @@ ORDER BY \c - - - :master_port -- try using the coordinator as a worker and then dropping the table -SELECT 1 FROM master_add_node('localhost', :master_port); -NOTICE: Replicating reference table "transactional_drop_reference" to the node localhost:xxxxx +SELECT 1 FROM master_add_node('', :master_port); +NOTICE: Replicating reference table "transactional_drop_reference" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -670,7 +670,7 @@ SELECT create_distributed_table('citus_local', 'id'); INSERT INTO citus_local (k) VALUES (2); DROP TABLE citus_local; -SELECT master_remove_node('localhost', :master_port); +SELECT master_remove_node('', :master_port); master_remove_node --------------------------------------------------------------------- @@ -678,7 +678,7 @@ SELECT master_remove_node('localhost', :master_port); -- clean the workspace DROP TABLE transactional_drop_shards, transactional_drop_reference; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- @@ -691,8 +691,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a SELECT run_command_on_workers('CREATE USER try_drop_table WITH LOGIN'); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") + (,xxxxx,t,"CREATE ROLE") (2 rows) GRANT ALL ON SCHEMA public TO try_drop_table; diff --git a/src/test/regress/expected/multi_unsupported_worker_operations.out b/src/test/regress/expected/multi_unsupported_worker_operations.out index af0b2421f..c47e49b7e 100644 --- a/src/test/regress/expected/multi_unsupported_worker_operations.out +++ b/src/test/regress/expected/multi_unsupported_worker_operations.out @@ -46,7 +46,7 @@ ORDER BY logicalrelid; mx_table_2 | s | 150000 (2 rows) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -113,10 +113,10 @@ DROP TABLE mx_ref_table; CREATE UNIQUE INDEX mx_test_uniq_index ON mx_table(col_1); \c - - - :worker_1_port -- changing isdatanode -SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', false); +SELECT * from master_set_node_property('', 8888, 'shouldhaveshards', false); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', true); +SELECT * from master_set_node_property('', 8888, 'shouldhaveshards', true); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -- DDL commands @@ -167,10 +167,10 @@ SELECT count(*) FROM mx_table; (1 row) -- master_add_inactive_node -SELECT 1 FROM master_add_inactive_node('localhost', 5432); +SELECT 1 FROM master_add_inactive_node('', 5432); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; +SELECT count(1) FROM pg_dist_node WHERE nodename='' AND nodeport=5432; count --------------------------------------------------------------------- 0 @@ -179,24 +179,24 @@ SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; -- master_remove_node \c - - - :master_port DROP INDEX mx_test_uniq_index; -SELECT 1 FROM master_add_inactive_node('localhost', 5432); +SELECT 1 FROM master_add_inactive_node('', 5432); ?column? --------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port -SELECT master_remove_node('localhost', 5432); +SELECT master_remove_node('', 5432); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; +SELECT count(1) FROM pg_dist_node WHERE nodename='' AND nodeport=5432; count --------------------------------------------------------------------- 1 (1 row) \c - - - :master_port -SELECT master_remove_node('localhost', 5432); +SELECT master_remove_node('', 5432); master_remove_node --------------------------------------------------------------------- @@ -221,7 +221,7 @@ UPDATE pg_dist_partition SET colocationid = :old_colocation_id WHERE logicalrelid='mx_table_2'::regclass; -- start_metadata_sync_to_node -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('', :worker_2_port); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; @@ -232,14 +232,14 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; -- stop_metadata_sync_to_node \c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('', :worker_2_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) \c - - - :worker_1_port -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +SELECT stop_metadata_sync_to_node('', :worker_2_port); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. \c - - - :master_port @@ -249,7 +249,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; t (1 row) -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +SELECT stop_metadata_sync_to_node('', :worker_2_port); stop_metadata_sync_to_node --------------------------------------------------------------------- @@ -305,7 +305,7 @@ LIMIT 1 \gset SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset INSERT INTO pg_dist_placement (groupid, shardid, shardstate, shardlength) VALUES (:worker_2_group, :testshardid, 3, 0); -SELECT master_copy_shard_placement(:testshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:testshardid, '', :worker_1_port, '', :worker_2_port); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT shardid, nodename, nodeport, shardstate @@ -314,8 +314,8 @@ WHERE shardid = :testshardid ORDER BY nodeport; shardid | nodename | nodeport | shardstate --------------------------------------------------------------------- - 1270000 | localhost | 57637 | 1 - 1270000 | localhost | 57638 | 3 + 1270000 | | xxxxx | 1 + 1270000 | | xxxxx | 3 (2 rows) DELETE FROM pg_dist_placement WHERE groupid = :worker_2_group AND shardid = :testshardid; @@ -355,7 +355,7 @@ ROLLBACK; \c - - - :master_port DROP TABLE mx_table; DROP TABLE mx_table_2; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_upgrade_reference_table.out b/src/test/regress/expected/multi_upgrade_reference_table.out index 010bf690a..e964c3534 100644 --- a/src/test/regress/expected/multi_upgrade_reference_table.out +++ b/src/test/regress/expected/multi_upgrade_reference_table.out @@ -803,7 +803,7 @@ ORDER BY shardid; List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- - public | upgrade_reference_table_transaction_commit_1360014 | table | postgres + public | upgrade_reference_table_transaction_commit_1360014 | table | (1 row) \c - - - :master_port @@ -933,7 +933,7 @@ SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='upgrade_reference_table_mx'::regclass); -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -1082,7 +1082,7 @@ ORDER BY shardid; \c - - - :master_port DROP TABLE upgrade_reference_table_mx; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('', :worker_1_port); stop_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_upsert.out b/src/test/regress/expected/multi_upsert.out index 08308aba0..736309684 100644 --- a/src/test/regress/expected/multi_upsert.out +++ b/src/test/regress/expected/multi_upsert.out @@ -153,7 +153,7 @@ INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ -- this errors out since there is no unique constraint on partition key INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO NOTHING; ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- create another table CREATE TABLE upsert_test_3 ( @@ -172,7 +172,7 @@ SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash'); -- since there are no unique indexes, error-out INSERT INTO upsert_test_3 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_3.count + 1; ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- create another table CREATE TABLE upsert_test_4 ( diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index 240b63061..0a26c0031 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -390,22 +390,22 @@ VACUUM dustbunnies, second_dustbunnies; SELECT run_command_on_workers($$SELECT wait_for_stats()$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"") - (localhost,57638,t,"") + (,xxxxx,t,"") + (,xxxxx,t,"") (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_vacuum_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,4) - (localhost,57638,t,4) + (,xxxxx,t,4) + (,xxxxx,t,4) (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,3) - (localhost,57638,t,3) + (,xxxxx,t,3) + (,xxxxx,t,3) (2 rows) -- and warning when using targeted VACUUM without DDL propagation @@ -421,22 +421,22 @@ SET citus.enable_ddl_propagation to DEFAULT; SELECT run_command_on_workers($$SELECT wait_for_stats()$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"") - (localhost,57638,t,"") + (,xxxxx,t,"") + (,xxxxx,t,"") (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_vacuum_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,4) - (localhost,57638,t,4) + (,xxxxx,t,4) + (,xxxxx,t,4) (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,3) - (localhost,57638,t,3) + (,xxxxx,t,3) + (,xxxxx,t,3) (2 rows) -- test worker_hash diff --git a/src/test/regress/expected/multi_utility_warnings.out b/src/test/regress/expected/multi_utility_warnings.out index 6a417ef96..2f71b8017 100644 --- a/src/test/regress/expected/multi_utility_warnings.out +++ b/src/test/regress/expected/multi_utility_warnings.out @@ -19,7 +19,7 @@ ERROR: cannot write to pg_dist_authinfo DETAIL: Citus Community Edition does not support the use of custom authentication options. HINT: To learn more about using advanced authentication schemes with Citus, please contact us at https://citusdata.com/about/contact_us BEGIN; -INSERT INTO pg_dist_node VALUES (1234567890, 1234567890, 'localhost', 5432); +INSERT INTO pg_dist_node VALUES (1234567890, 1234567890, '', 5432); INSERT INTO pg_dist_poolinfo VALUES (1234567890, 'port=1234'); ERROR: cannot write to pg_dist_poolinfo DETAIL: Citus Community Edition does not support the use of pooler options. diff --git a/src/test/regress/expected/multi_view.out b/src/test/regress/expected/multi_view.out index 82e20b8f3..aa791b084 100644 --- a/src/test/regress/expected/multi_view.out +++ b/src/test/regress/expected/multi_view.out @@ -757,7 +757,7 @@ EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> HashAggregate Group Key: users_table.user_id -> Nested Loop @@ -787,7 +787,7 @@ EXPLAIN (COSTS FALSE) SELECT * Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Unique -> Sort Sort Key: recent_users.user_id @@ -821,7 +821,7 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: (max("time")) DESC @@ -831,7 +831,7 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: et."time" DESC @@ -853,7 +853,7 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Limit -> Sort Sort Key: et."time" DESC diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out index b32ef305e..0cb720ef9 100644 --- a/src/test/regress/expected/pg12.out +++ b/src/test/regress/expected/pg12.out @@ -257,7 +257,7 @@ CREATE TABLE collection_users (used_id integer, collection_id integer, key bigint); ALTER TABLE collection_users ADD CONSTRAINT collection_users_fkey FOREIGN KEY (key, collection_id) REFERENCES collections_list (key, collection_id); --- sanity check for postgres +-- sanity check for INSERT INTO collections_list VALUES (1, 0, '1.1'); INSERT INTO collection_users VALUES (1, 0, 1); -- should fail because of fkey @@ -282,7 +282,7 @@ NOTICE: Copying data from local table... INSERT INTO collection_users VALUES (1, 1000, 1); ERROR: insert or update on table "collection_users_60028" violates foreign key constraint "collection_users_fkey_60028" DETAIL: Key (key, collection_id)=(1, 1000) is not present in table "collections_list_60016". -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx -- whereas new record with partition should go through INSERT INTO collections_list VALUES (2, 1, '1.2'); INSERT INTO collection_users VALUES (5, 1, 2); diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index 64166ac65..0a0e4bef9 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -75,8 +75,8 @@ SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_exte SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1) - (localhost,57638,t,1) + (,xxxxx,t,1) + (,xxxxx,t,1) (2 rows) CREATE TABLE ref_table (a public.issn); @@ -96,8 +96,8 @@ RESET client_min_messages; SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1.1) - (localhost,57638,t,1.1) + (,xxxxx,t,1.1) + (,xxxxx,t,1.1) (2 rows) -- now, update to a newer version @@ -106,16 +106,16 @@ ALTER EXTENSION isn UPDATE TO '1.2'; SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1.2) - (localhost,57638,t,1.2) + (,xxxxx,t,1.2) + (,xxxxx,t,1.2) (2 rows) -- before changing the schema, ensure the current schmea SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,extension'test) - (localhost,57638,t,extension'test) + (,xxxxx,t,extension'test) + (,xxxxx,t,extension'test) (2 rows) -- now change the schema @@ -133,8 +133,8 @@ SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_exte SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,public) - (localhost,57638,t,public) + (,xxxxx,t,public) + (,xxxxx,t,public) (2 rows) -- SET client_min_messages TO WARNING before executing a DROP EXTENSION statement @@ -153,7 +153,7 @@ DROP EXTENSION seg CASCADE; -- before remove, first remove the existing relations (due to the other tests) DROP SCHEMA "extension'test" CASCADE; RESET client_min_messages; -SELECT 1 from master_remove_node('localhost', :worker_2_port); +SELECT 1 from master_remove_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -165,13 +165,13 @@ CREATE EXTENSION seg; SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1) + (,xxxxx,t,1) (1 row) SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1.3) + (,xxxxx,t,1.3) (1 row) -- now create the reference table @@ -183,8 +183,8 @@ SELECT create_reference_table('ref_table_2'); (1 row) -- and add the other node -SELECT 1 from master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx +SELECT 1 from master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "ref_table_2" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -194,15 +194,15 @@ NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1) - (localhost,57638,t,1) + (,xxxxx,t,1) + (,xxxxx,t,1) (2 rows) SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1.3) - (localhost,57638,t,1.3) + (,xxxxx,t,1.3) + (,xxxxx,t,1.3) (2 rows) -- and similarly check for the reference table @@ -235,8 +235,8 @@ SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_exte SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) + (,xxxxx,t,0) + (,xxxxx,t,0) (2 rows) -- give a notice for the following commands saying that it is not @@ -281,8 +281,8 @@ SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_exte SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) + (,xxxxx,t,0) + (,xxxxx,t,0) (2 rows) -- restore client_min_messages after DROP EXTENSION @@ -320,8 +320,8 @@ ROLLBACK; SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,1.3) - (localhost,57638,t,1.3) + (,xxxxx,t,1.3) + (,xxxxx,t,1.3) (2 rows) -- drop the schema and all the objects @@ -333,7 +333,7 @@ CREATE SCHEMA "extension'test"; SET search_path TO "extension'test"; RESET client_min_messages; -- remove the node, we'll add back again -SELECT 1 from master_remove_node('localhost', :worker_2_port); +SELECT 1 from master_remove_node('', :worker_2_port); ?column? --------------------------------------------------------------------- 1 @@ -364,8 +364,8 @@ BEGIN; COMMIT; -- add the node back -SELECT 1 from master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "t3" to the node localhost:xxxxx +SELECT 1 from master_add_node('', :worker_2_port); +NOTICE: Replicating reference table "t3" to the node :xxxxx ?column? --------------------------------------------------------------------- 1 @@ -381,8 +381,8 @@ SELECT count(*) FROM citus.pg_dist_object WHERE objid IN (SELECT oid FROM pg_ext SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname IN ('seg', 'isn')$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,2) - (localhost,57638,t,2) + (,xxxxx,t,2) + (,xxxxx,t,2) (2 rows) -- drop the schema and all the objects diff --git a/src/test/regress/expected/remove_coordinator.out b/src/test/regress/expected/remove_coordinator.out index e59a1f89e..2d61e68b4 100644 --- a/src/test/regress/expected/remove_coordinator.out +++ b/src/test/regress/expected/remove_coordinator.out @@ -1,5 +1,5 @@ -- removing coordinator from pg_dist_node should update pg_dist_colocation -SELECT master_remove_node('localhost', :master_port); +SELECT master_remove_node('', :master_port); master_remove_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index aae9ad9cc..e8898070e 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -44,7 +44,7 @@ EXPLAIN INSERT INTO squares SELECT a, a*a FROM numbers; Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Insert on squares_8000000 citus_table_alias (cost=0.00..41.88 rows=2550 width=8) -> Seq Scan on numbers_8000001 numbers (cost=0.00..41.88 rows=2550 width=8) (7 rows) @@ -66,7 +66,7 @@ EXPLAIN INSERT INTO numbers SELECT a FROM squares WHERE a < 3; Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> Insert on numbers_8000001 citus_table_alias (cost=0.00..38.25 rows=753 width=4) -> Seq Scan on squares_8000000 squares (cost=0.00..38.25 rows=753 width=4) Filter: (a < 3) diff --git a/src/test/regress/expected/replicated_partitioned_table.out b/src/test/regress/expected/replicated_partitioned_table.out index 5ca5dc80b..a42bdcc9f 100644 --- a/src/test/regress/expected/replicated_partitioned_table.out +++ b/src/test/regress/expected/replicated_partitioned_table.out @@ -239,12 +239,12 @@ UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid -- cannot repair a shard after a modification (transaction still open during repair) BEGIN; INSERT INTO customer_engagements VALUES (1, 1); -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:newshardid, '', :worker_1_port, '', :worker_2_port); ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; -- modifications after reparing a shard are fine (will use new metadata) BEGIN; -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:newshardid, '', :worker_1_port, '', :worker_2_port); master_copy_shard_placement --------------------------------------------------------------------- @@ -262,7 +262,7 @@ SELECT * FROM customer_engagements ORDER BY 1,2,3; ROLLBACK; BEGIN; -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_copy_shard_placement(:newshardid, '', :worker_1_port, '', :worker_2_port); master_copy_shard_placement --------------------------------------------------------------------- diff --git a/src/test/regress/expected/sql_procedure.out b/src/test/regress/expected/sql_procedure.out index ac7f878eb..d97448efb 100644 --- a/src/test/regress/expected/sql_procedure.out +++ b/src/test/regress/expected/sql_procedure.out @@ -90,7 +90,7 @@ $$; CALL test_procedure_modify_insert(2,12); ERROR: duplicate key value violates unique constraint "idx_table_100503" DETAIL: Key (id, org_id)=(2, 12) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" PL/pgSQL function test_procedure_modify_insert(integer,integer) line 5 at SQL statement SELECT * FROM test_table ORDER BY 1, 2; @@ -110,7 +110,7 @@ $$; CALL test_procedure_modify_insert_commit(2,30); ERROR: duplicate key value violates unique constraint "idx_table_100503" DETAIL: Key (id, org_id)=(2, 30) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on :xxxxx SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" PL/pgSQL function test_procedure_modify_insert_commit(integer,integer) line 5 at SQL statement SELECT * FROM test_table ORDER BY 1, 2; diff --git a/src/test/regress/expected/ssl_by_default.out b/src/test/regress/expected/ssl_by_default.out index d75bc1b28..a39a532de 100644 --- a/src/test/regress/expected/ssl_by_default.out +++ b/src/test/regress/expected/ssl_by_default.out @@ -22,8 +22,8 @@ SELECT run_command_on_workers($$ $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,on) - (localhost,57638,t,on) + (,xxxxx,t,on) + (,xxxxx,t,on) (2 rows) SHOW citus.node_conninfo; @@ -37,8 +37,8 @@ SELECT run_command_on_workers($$ $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,sslmode=require) - (localhost,57638,t,sslmode=require) + (,xxxxx,t,sslmode=require) + (,xxxxx,t,sslmode=require) (2 rows) SELECT run_command_on_workers($$ @@ -46,8 +46,8 @@ SELECT run_command_on_workers($$ $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,t) - (localhost,57638,t,t) + (,xxxxx,t,t) + (,xxxxx,t,t) (2 rows) SHOW ssl_ciphers; @@ -61,7 +61,7 @@ SELECT run_command_on_workers($$ $$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,TLSv1.2+HIGH:!aNULL:!eNULL) - (localhost,57638,t,TLSv1.2+HIGH:!aNULL:!eNULL) + (,xxxxx,t,TLSv1.2+HIGH:!aNULL:!eNULL) + (,xxxxx,t,TLSv1.2+HIGH:!aNULL:!eNULL) (2 rows) diff --git a/src/test/regress/expected/subquery_basics.out b/src/test/regress/expected/subquery_basics.out index d43a48f0b..31aafcec2 100644 --- a/src/test/regress/expected/subquery_basics.out +++ b/src/test/regress/expected/subquery_basics.out @@ -192,7 +192,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.value_2, -- same query with alias in the subquery SELECT - DISTINCT ON (citus) citus, postgres, citus + 1 as c1, postgres-1 as p1 + DISTINCT ON (citus) citus, , citus + 1 as c1, -1 as p1 FROM (SELECT users_table.value_2 @@ -203,7 +203,7 @@ FROM event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC - ) as foo(postgres), + ) as foo(), (SELECT users_table.user_id FROM @@ -213,13 +213,13 @@ FROM event_type IN (5,6,7,8) ORDER BY 1 DESC ) as bar (citus) - WHERE foo.postgres = bar.citus + WHERE foo. = bar.citus ORDER BY 1 DESC, 2 DESC LIMIT 3; DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT ON (bar.citus) bar.citus, foo.postgres, (bar.citus OPERATOR(pg_catalog.+) 1) AS c1, (foo.postgres OPERATOR(pg_catalog.-) 1) AS p1 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo(postgres), (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar(citus) WHERE (foo.postgres OPERATOR(pg_catalog.=) bar.citus) ORDER BY bar.citus DESC, foo.postgres DESC LIMIT 3 +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT ON (bar.citus) bar.citus, foo., (bar.citus OPERATOR(pg_catalog.+) 1) AS c1, (foo. OPERATOR(pg_catalog.-) 1) AS p1 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo(), (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar(citus) WHERE (foo. OPERATOR(pg_catalog.=) bar.citus) ORDER BY bar.citus DESC, foo. DESC LIMIT 3 DEBUG: push down of limit count: 3 - citus | postgres | c1 | p1 + citus | | c1 | p1 --------------------------------------------------------------------- 5 | 5 | 6 | 4 4 | 4 | 5 | 3 diff --git a/src/test/regress/expected/subquery_prepared_statements.out b/src/test/regress/expected/subquery_prepared_statements.out index 049206481..ea8687873 100644 --- a/src/test/regress/expected/subquery_prepared_statements.out +++ b/src/test/regress/expected/subquery_prepared_statements.out @@ -5,8 +5,8 @@ CREATE SCHEMA subquery_prepared_statements; SELECT run_command_on_workers('CREATE SCHEMA subquery_prepared_statements;'); run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") (2 rows) SET search_path TO subquery_prepared_statements, public; diff --git a/src/test/regress/expected/upgrade_basic_after.out b/src/test/regress/expected/upgrade_basic_after.out index 88e60c649..dc682867b 100644 --- a/src/test/regress/expected/upgrade_basic_after.out +++ b/src/test/regress/expected/upgrade_basic_after.out @@ -113,7 +113,7 @@ EXPLAIN (COSTS FALSE) SELECT * from t; Task Count: 32 Tasks Shown: One of 32 -> Task - Node: host=localhost port=xxxxx dbname=postgres + Node: host= port=xxxxx dbname= -> Seq Scan on t_102008 t (6 rows) @@ -124,7 +124,7 @@ EXPLAIN (COSTS FALSE) SELECT * from t WHERE a = 1; Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=postgres + Node: host= port=xxxxx dbname= -> Bitmap Heap Scan on t_102009 t Recheck Cond: (a = 1) -> Bitmap Index Scan on t_a_idx_102009 diff --git a/src/test/regress/expected/upgrade_pg_dist_object_test_before.out b/src/test/regress/expected/upgrade_pg_dist_object_test_before.out index 2bdcedf5c..6d25776d8 100644 --- a/src/test/regress/expected/upgrade_pg_dist_object_test_before.out +++ b/src/test/regress/expected/upgrade_pg_dist_object_test_before.out @@ -6,8 +6,8 @@ CREATE EXTENSION isn; SELECT run_command_on_workers($$CREATE EXTENSION isn;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57636,t,"CREATE EXTENSION") - (localhost,57637,t,"CREATE EXTENSION") + (,xxxxx,t,"CREATE EXTENSION") + (,xxxxx,t,"CREATE EXTENSION") (2 rows) CREATE TABLE isn_dist_table (key int, value issn); @@ -22,8 +22,8 @@ CREATE EXTENSION seg; SELECT run_command_on_workers($$CREATE EXTENSION seg;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57636,t,"CREATE EXTENSION") - (localhost,57637,t,"CREATE EXTENSION") + (,xxxxx,t,"CREATE EXTENSION") + (,xxxxx,t,"CREATE EXTENSION") (2 rows) -- schema propagation -- @@ -54,15 +54,15 @@ CREATE TYPE fooschema.footype AS (x int, y int); SELECT run_command_on_workers($$CREATE SCHEMA fooschema;$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57636,t,"CREATE SCHEMA") - (localhost,57637,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") + (,xxxxx,t,"CREATE SCHEMA") (2 rows) SELECT run_command_on_workers($$CREATE TYPE fooschema.footype AS (x int, y int);$$); run_command_on_workers --------------------------------------------------------------------- - (localhost,57636,t,"CREATE TYPE") - (localhost,57637,t,"CREATE TYPE") + (,xxxxx,t,"CREATE TYPE") + (,xxxxx,t,"CREATE TYPE") (2 rows) CREATE TABLE fooschema.footable (f fooschema.footype); diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_before.out b/src/test/regress/expected/upgrade_rebalance_strategy_before.out index 0a12b1d60..9e1f83d3f 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_before.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_before.out @@ -6,13 +6,13 @@ CREATE FUNCTION shard_cost_2(bigint) CREATE FUNCTION capacity_high_worker_1(nodeidarg int) RETURNS real AS $$ SELECT - (CASE WHEN nodeport = 57637 THEN 1000 ELSE 1 END)::real + (CASE WHEN nodeport = xxxxx THEN 1000 ELSE 1 END)::real FROM pg_dist_node where nodeid = nodeidarg $$ LANGUAGE sql; CREATE FUNCTION only_worker_2(shardid bigint, nodeidarg int) RETURNS boolean AS $$ SELECT - (CASE WHEN nodeport = 57638 THEN TRUE ELSE FALSE END) + (CASE WHEN nodeport = xxxxx THEN TRUE ELSE FALSE END) FROM pg_dist_node where nodeid = nodeidarg $$ LANGUAGE sql; ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_enterprise_check_trigger; diff --git a/src/test/regress/expected/window_functions.out b/src/test/regress/expected/window_functions.out index 3cdfdff85..b32b1ba0c 100644 --- a/src/test/regress/expected/window_functions.out +++ b/src/test/regress/expected/window_functions.out @@ -949,7 +949,7 @@ ORDER BY user_id, avg(value_1) DESC; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> WindowAgg -> Sort Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1)))) @@ -1025,7 +1025,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> WindowAgg -> Sort Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1)))) @@ -1055,7 +1055,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> WindowAgg -> Sort Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1)))) @@ -1085,7 +1085,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> WindowAgg -> Sort Sort Key: users_table.user_id, ((1 / (1 + sum(users_table.value_2)))) @@ -1115,7 +1115,7 @@ LIMIT 5; Task Count: 4 Tasks Shown: One of 4 -> Task - Node: host=localhost port=xxxxx dbname=regression + Node: host= port=xxxxx dbname= -> WindowAgg -> Sort Sort Key: users_table.user_id, (sum(users_table.value_2))