From 7f3de68b0d8334bbfc545609ffdc2cf2922a086a Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Fri, 3 Jan 2020 11:47:49 +0100 Subject: [PATCH] Normalize tests: header separator length --- src/test/regress/bin/normalize.sed | 9 +- .../regress/expected/adaptive_executor.out | 10 +- .../adaptive_executor_repartition.out | 24 +- src/test/regress/expected/add_coordinator.out | 4 +- .../regress/expected/aggregate_support.out | 34 +- .../expected/alter_role_propagation.out | 62 ++-- src/test/regress/expected/base_enable_mx.out | 4 +- src/test/regress/expected/bool_agg.out | 14 +- src/test/regress/expected/ch_bench_having.out | 34 +- .../regress/expected/ch_bench_having_mx.out | 34 +- .../ch_bench_subquery_repartition.out | 18 +- .../expected/chbenchmark_all_queries.out | 68 ++-- .../expected/coordinator_shouldhaveshards.out | 28 +- .../expected/cte_nested_modification.out | 14 +- .../regress/expected/cte_prepared_modify.out | 4 +- .../expected/custom_aggregate_support.out | 40 +- .../expected/custom_aggregate_support_0.out | 10 +- .../expected/custom_aggregate_support_1.out | 44 +-- .../expected/disable_object_propagation.out | 18 +- .../expected/distributed_collations.out | 20 +- .../distributed_collations_conflict.out | 14 +- .../expected/distributed_functions.out | 146 ++++---- .../distributed_functions_conflict.out | 18 +- .../expected/distributed_procedure.out | 50 +-- .../regress/expected/distributed_types.out | 76 ++-- .../expected/distributed_types_conflict.out | 20 +- .../distributed_types_xact_add_enum_value.out | 16 +- ...istributed_types_xact_add_enum_value_0.out | 16 +- src/test/regress/expected/dml_recursive.out | 16 +- .../ensure_no_intermediate_data_leak.out | 8 +- .../expected/escape_extension_name.out | 10 +- .../expected/escape_extension_name_0.out | 14 +- .../expected/expression_reference_join.out | 8 +- .../expected/failure_1pc_copy_append.out | 62 ++-- .../expected/failure_1pc_copy_hash.out | 84 ++--- .../expected/failure_add_disable_node.out | 88 ++--- .../failure_connection_establishment.out | 50 +-- .../regress/expected/failure_copy_on_hash.out | 92 ++--- .../expected/failure_copy_to_reference.out | 122 +++---- ...ure_create_distributed_table_non_empty.out | 244 ++++++------- .../failure_create_index_concurrently.out | 38 +- .../failure_create_reference_table.out | 58 +-- .../regress/expected/failure_create_table.out | 174 ++++----- .../regress/expected/failure_cte_subquery.out | 50 +-- src/test/regress/expected/failure_ddl.out | 246 ++++++------- .../failure_insert_select_pushdown.out | 36 +- .../failure_insert_select_via_coordinator.out | 48 +-- .../regress/expected/failure_multi_dml.out | 88 ++--- .../expected/failure_multi_row_insert.out | 34 +- .../failure_multi_shard_update_delete.out | 148 ++++---- .../expected/failure_mx_metadata_sync.out | 36 +- .../regress/expected/failure_ref_tables.out | 24 +- .../regress/expected/failure_savepoints.out | 54 +-- src/test/regress/expected/failure_setup.out | 6 +- .../regress/expected/failure_single_mod.out | 28 +- .../expected/failure_single_select.out | 48 +-- .../regress/expected/failure_test_helpers.out | 2 +- .../regress/expected/failure_truncate.out | 342 +++++++++--------- src/test/regress/expected/failure_vacuum.out | 30 +- .../regress/expected/failure_vacuum_1.out | 30 +- .../expected/fast_path_router_modify.out | 32 +- .../foreign_key_restriction_enforcement.out | 200 +++++----- .../foreign_key_to_reference_table.out | 336 ++++++++--------- src/test/regress/expected/full_join.out | 40 +- .../expected/intermediate_result_pruning.out | 84 ++--- .../regress/expected/intermediate_results.out | 102 +++--- .../expected/limit_intermediate_size.out | 12 +- .../expected/local_shard_execution.out | 190 +++++----- .../regress/expected/materialized_view.out | 58 +-- .../regress/expected/multi_703_upgrade.out | 2 +- .../multi_agg_approximate_distinct.out | 38 +- .../multi_agg_approximate_distinct_0.out | 8 +- .../multi_alter_table_add_constraints.out | 56 +-- src/test/regress/expected/multi_array_agg.out | 22 +- .../expected/multi_average_expression.out | 6 +- .../regress/expected/multi_basic_queries.out | 10 +- .../multi_behavioral_analytics_basics.out | 168 ++++----- ...avioral_analytics_single_shard_queries.out | 142 ++++---- .../multi_binary_master_copy_format.out | 8 +- .../expected/multi_cache_invalidation.out | 8 +- .../regress/expected/multi_citus_tools.out | 94 ++--- .../expected/multi_cluster_management.out | 206 +++++------ .../multi_colocated_shard_transfer.out | 22 +- .../expected/multi_colocation_utils.out | 196 +++++----- .../expected/multi_complex_expressions.out | 84 ++--- .../expected/multi_complex_expressions_0.out | 80 ++-- .../expected/multi_count_type_conversion.out | 4 +- .../regress/expected/multi_create_shards.out | 24 +- .../regress/expected/multi_create_table.out | 206 +++++------ .../multi_create_table_constraints.out | 48 +-- .../regress/expected/multi_cross_shard.out | 26 +- .../regress/expected/multi_data_types.out | 24 +- .../expected/multi_deparse_function.out | 124 +++---- .../expected/multi_deparse_procedure.out | 78 ++-- .../expected/multi_deparse_shard_query.out | 50 +-- .../multi_distributed_transaction_id.out | 34 +- .../expected/multi_distribution_metadata.out | 112 +++--- .../regress/expected/multi_drop_extension.out | 12 +- .../expected/multi_dropped_column_aliases.out | 14 +- src/test/regress/expected/multi_extension.out | 20 +- .../multi_follower_configure_followers.out | 6 +- .../regress/expected/multi_follower_dml.out | 18 +- .../multi_follower_select_statements.out | 24 +- .../expected/multi_follower_task_tracker.out | 4 +- .../regress/expected/multi_foreign_key.out | 142 ++++---- .../multi_foreign_key_relation_graph.out | 148 ++++---- .../expected/multi_function_evaluation.out | 14 +- .../expected/multi_function_in_join.out | 24 +- .../expected/multi_generate_ddl_commands.out | 26 +- .../regress/expected/multi_hash_pruning.out | 88 ++--- .../expected/multi_having_pushdown.out | 24 +- .../expected/multi_index_statements.out | 40 +- .../regress/expected/multi_insert_select.out | 124 +++---- .../expected/multi_insert_select_conflict.out | 46 +-- ...lti_insert_select_non_pushable_queries.out | 114 +++--- .../expected/multi_insert_select_window.out | 52 +-- .../expected/multi_join_order_additional.out | 22 +- .../multi_join_order_tpch_repartition.out | 10 +- .../expected/multi_join_order_tpch_small.out | 8 +- .../regress/expected/multi_join_pruning.out | 18 +- src/test/regress/expected/multi_json_agg.out | 28 +- .../expected/multi_json_object_agg.out | 28 +- src/test/regress/expected/multi_jsonb_agg.out | 28 +- .../expected/multi_jsonb_object_agg.out | 28 +- .../regress/expected/multi_limit_clause.out | 68 ++-- .../multi_limit_clause_approximate.out | 14 +- .../expected/multi_master_protocol.out | 8 +- .../expected/multi_metadata_access.out | 2 +- .../expected/multi_metadata_attributes.out | 2 +- .../regress/expected/multi_metadata_sync.out | 314 ++++++++-------- .../regress/expected/multi_modifications.out | 204 +++++------ .../expected/multi_modifying_xacts.out | 234 ++++++------ src/test/regress/expected/multi_multiuser.out | 128 +++---- .../expected/multi_mx_add_coordinator.out | 38 +- src/test/regress/expected/multi_mx_call.out | 94 ++--- .../expected/multi_mx_create_table.out | 52 +-- src/test/regress/expected/multi_mx_ddl.out | 48 +-- .../multi_mx_function_call_delegation.out | 132 +++---- .../expected/multi_mx_hide_shard_names.out | 68 ++-- .../regress/expected/multi_mx_metadata.out | 76 ++-- .../expected/multi_mx_modifications.out | 74 ++-- ...i_mx_modifications_to_reference_tables.out | 38 +- .../expected/multi_mx_modifying_xacts.out | 56 +-- .../expected/multi_mx_node_metadata.out | 166 ++++----- .../expected/multi_mx_partitioning.out | 42 +-- .../expected/multi_mx_reference_table.out | 138 +++---- .../multi_mx_repartition_udt_prepare.out | 10 +- .../expected/multi_mx_repartition_udt_w1.out | 4 +- .../expected/multi_mx_repartition_udt_w2.out | 4 +- .../expected/multi_mx_router_planner.out | 160 ++++---- .../expected/multi_mx_schema_support.out | 70 ++-- .../regress/expected/multi_mx_tpch_query1.out | 6 +- .../expected/multi_mx_tpch_query10.out | 6 +- .../expected/multi_mx_tpch_query12.out | 6 +- .../expected/multi_mx_tpch_query14.out | 6 +- .../expected/multi_mx_tpch_query19.out | 6 +- .../regress/expected/multi_mx_tpch_query3.out | 6 +- .../regress/expected/multi_mx_tpch_query6.out | 6 +- .../regress/expected/multi_mx_tpch_query7.out | 6 +- .../expected/multi_mx_tpch_query7_nested.out | 6 +- .../multi_mx_transaction_recovery.out | 44 +-- .../multi_mx_truncate_from_worker.out | 48 +-- .../regress/expected/multi_name_lengths.out | 56 +-- .../expected/multi_name_resolution.out | 6 +- .../multi_null_minmax_value_pruning.out | 20 +- .../expected/multi_orderby_limit_pushdown.out | 56 +-- .../expected/multi_partition_pruning.out | 20 +- .../regress/expected/multi_partitioning.out | 260 ++++++------- .../expected/multi_partitioning_utils.out | 74 ++-- .../regress/expected/multi_prepare_plsql.out | 266 +++++++------- .../regress/expected/multi_prepare_sql.out | 180 ++++----- .../expected/multi_prune_shard_list.out | 48 +-- .../multi_query_directory_cleanup.out | 68 ++-- .../multi_query_directory_cleanup_0.out | 68 ++-- .../expected/multi_read_from_secondaries.out | 12 +- .../expected/multi_real_time_transaction.out | 118 +++--- .../expected/multi_reference_table.out | 252 ++++++------- .../multi_remove_node_reference_table.out | 230 ++++++------ .../regress/expected/multi_repair_shards.out | 12 +- .../multi_repartition_join_planning.out | 16 +- .../multi_repartition_join_pruning.out | 30 +- .../expected/multi_repartition_join_ref.out | 16 +- ...multi_repartition_join_task_assignment.out | 6 +- .../expected/multi_repartition_udt.out | 10 +- .../multi_repartitioned_subquery_udf.out | 2 +- .../multi_replicate_reference_table.out | 152 ++++---- .../regress/expected/multi_router_planner.out | 284 +++++++-------- .../multi_router_planner_fast_path.out | 284 +++++++-------- .../regress/expected/multi_schema_support.out | 184 +++++----- .../expected/multi_select_distinct.out | 138 +++---- .../expected/multi_select_for_update.out | 30 +- .../regress/expected/multi_shard_modify.out | 72 ++-- .../expected/multi_shard_update_delete.out | 120 +++--- .../regress/expected/multi_simple_queries.out | 84 ++--- .../expected/multi_simple_queries_0.out | 80 ++-- .../multi_single_relation_subquery.out | 16 +- .../regress/expected/multi_size_queries.out | 38 +- .../regress/expected/multi_sql_function.out | 74 ++-- src/test/regress/expected/multi_subquery.out | 80 ++-- .../multi_subquery_behavioral_analytics.out | 186 +++++----- .../multi_subquery_complex_queries.out | 98 ++--- ...ulti_subquery_complex_reference_clause.out | 140 +++---- .../multi_subquery_in_where_clause.out | 28 +- ...lti_subquery_in_where_reference_clause.out | 28 +- .../regress/expected/multi_subquery_misc.out | 48 +-- .../regress/expected/multi_subquery_union.out | 88 ++--- .../multi_subquery_window_functions.out | 48 +-- .../expected/multi_subtransactions.out | 46 +-- src/test/regress/expected/multi_table_ddl.out | 22 +- .../expected/multi_task_assignment_policy.out | 38 +- .../expected/multi_task_string_size.out | 6 +- .../expected/multi_test_catalog_views.out | 2 +- .../regress/expected/multi_test_helpers.out | 2 +- .../regress/expected/multi_tpch_query1.out | 2 +- .../regress/expected/multi_tpch_query10.out | 2 +- .../regress/expected/multi_tpch_query12.out | 2 +- .../regress/expected/multi_tpch_query14.out | 2 +- .../regress/expected/multi_tpch_query19.out | 2 +- .../regress/expected/multi_tpch_query3.out | 2 +- .../regress/expected/multi_tpch_query6.out | 2 +- .../regress/expected/multi_tpch_query7.out | 2 +- .../expected/multi_tpch_query7_nested.out | 2 +- .../expected/multi_transaction_recovery.out | 86 ++--- .../multi_transactional_drop_shards.out | 132 +++---- src/test/regress/expected/multi_truncate.out | 76 ++-- .../multi_unsupported_worker_operations.out | 62 ++-- .../multi_upgrade_reference_table.out | 186 +++++----- src/test/regress/expected/multi_upsert.out | 32 +- src/test/regress/expected/multi_utilities.out | 72 ++-- .../expected/multi_utility_statements.out | 32 +- src/test/regress/expected/multi_view.out | 188 +++++----- .../expected/multi_working_columns.out | 6 +- .../mx_foreign_key_to_reference_table.out | 8 +- .../expected/non_colocated_join_order.out | 8 +- .../non_colocated_leaf_subquery_joins.out | 16 +- .../expected/non_colocated_subquery_joins.out | 72 ++-- .../partitioned_intermediate_results.out | 48 +-- src/test/regress/expected/pg12.out | 46 +-- .../expected/propagate_extension_commands.out | 80 ++-- .../expected/propagate_set_commands.out | 34 +- .../expected/recursive_dml_queries_mx.out | 10 +- ..._dml_with_different_planners_executors.out | 6 +- .../expected/relation_access_tracking.out | 188 +++++----- .../regress/expected/remove_coordinator.out | 2 +- ...licate_reference_tables_to_coordinator.out | 42 +-- .../expected/replicated_partitioned_table.out | 26 +- src/test/regress/expected/row_types.out | 76 ++-- .../expected/sequential_modifications.out | 136 +++---- .../set_operation_and_local_tables.out | 34 +- src/test/regress/expected/set_operations.out | 132 +++---- .../expected/single_hash_repartition_join.out | 6 +- src/test/regress/expected/sql_procedure.out | 24 +- src/test/regress/expected/ssl_by_default.out | 16 +- src/test/regress/expected/subqueries_deep.out | 8 +- .../regress/expected/subquery_and_cte.out | 26 +- src/test/regress/expected/subquery_basics.out | 28 +- .../expected/subquery_complex_target_list.out | 30 +- .../regress/expected/subquery_executors.out | 14 +- .../regress/expected/subquery_in_where.out | 36 +- .../expected/subquery_local_tables.out | 12 +- .../expected/subquery_partitioning.out | 18 +- .../expected/subquery_prepared_statements.out | 40 +- src/test/regress/expected/subquery_view.out | 30 +- .../expected/task_tracker_assign_task.out | 22 +- .../expected/task_tracker_cleanup_job.out | 26 +- .../expected/task_tracker_partition_task.out | 22 +- .../regress/expected/upgrade_basic_after.out | 70 ++-- .../regress/expected/upgrade_basic_before.out | 12 +- .../upgrade_distributed_function_after.out | 6 +- .../upgrade_distributed_function_before.out | 12 +- .../upgrade_rebalance_strategy_after.out | 2 +- .../upgrade_rebalance_strategy_before.out | 4 +- .../expected/upgrade_ref2ref_after.out | 24 +- .../expected/upgrade_ref2ref_before.out | 8 +- .../regress/expected/upgrade_type_after.out | 2 +- .../regress/expected/upgrade_type_before.out | 2 +- .../regress/expected/validate_constraint.out | 10 +- .../regress/expected/window_functions.out | 64 ++-- src/test/regress/expected/with_basics.out | 62 ++-- src/test/regress/expected/with_dml.out | 6 +- src/test/regress/expected/with_executors.out | 22 +- src/test/regress/expected/with_join.out | 30 +- src/test/regress/expected/with_modifying.out | 104 +++--- src/test/regress/expected/with_nested.out | 12 +- .../regress/expected/with_partitioning.out | 10 +- src/test/regress/expected/with_prepare.out | 84 ++--- .../regress/expected/with_set_operations.out | 38 +- .../regress/expected/with_transactions.out | 20 +- src/test/regress/expected/with_where.out | 12 +- .../expected/worker_binary_data_partition.out | 18 +- .../worker_check_invalid_arguments.out | 6 +- .../expected/worker_hash_partition.out | 26 +- .../worker_hash_partition_complex.out | 22 +- .../expected/worker_merge_hash_files.out | 10 +- .../expected/worker_merge_range_files.out | 10 +- .../expected/worker_null_data_partition.out | 36 +- .../expected/worker_range_partition.out | 22 +- .../worker_range_partition_complex.out | 22 +- 298 files changed, 8421 insertions(+), 8422 deletions(-) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index a74bd7dce..623a4f51d 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -13,11 +13,10 @@ s/placement [0-9]+/placement xxxxx/g s/shard [0-9]+/shard xxxxx/g s/assigned task [0-9]+ to node/assigned task to node/ s/node group [12] (but|does)/node group \1/ -# -## Differing names can have differing table column widths -#s/(-+\|)+-+/---/g -#s/.*-------------.*/---------------------------------------------------------------------/g -# + +# Differing names can have differing table column widths +s/^-[+-]{2,}$/---------------------------------------------------------------------/g + ## In foreign_key_to_reference_table, normalize shard table names, etc in ## the generated plan #s/"(foreign_key_2_|fkey_ref_to_dist_|fkey_ref_)[0-9]+"/"\1xxxxxxx"/g diff --git a/src/test/regress/expected/adaptive_executor.out b/src/test/regress/expected/adaptive_executor.out index 9d3ec647f..87170b24b 100644 --- a/src/test/regress/expected/adaptive_executor.out +++ b/src/test/regress/expected/adaptive_executor.out @@ -6,7 +6,7 @@ SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 801009000; SELECT create_distributed_table('test','x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -19,7 +19,7 @@ SET citus.task_executor_type TO 'adaptive'; BEGIN; SELECT count(*) FROM test a JOIN (SELECT x, pg_sleep(0.1) FROM test) b USING (x); count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -28,7 +28,7 @@ SELECT sum(result::bigint) FROM run_command_on_workers($$ WHERE pid <> pg_backend_pid() AND query LIKE '%8010090%' $$); sum ------ +--------------------------------------------------------------------- 2 (1 row) @@ -38,7 +38,7 @@ SET citus.executor_slow_start_interval TO '10ms'; BEGIN; SELECT count(*) FROM test a JOIN (SELECT x, pg_sleep(0.1) FROM test) b USING (x); count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -47,7 +47,7 @@ SELECT sum(result::bigint) FROM run_command_on_workers($$ WHERE pid <> pg_backend_pid() AND query LIKE '%8010090%' $$); sum ------ +--------------------------------------------------------------------- 4 (1 row) diff --git a/src/test/regress/expected/adaptive_executor_repartition.out b/src/test/regress/expected/adaptive_executor_repartition.out index ab40dd4c0..e8711493a 100644 --- a/src/test/regress/expected/adaptive_executor_repartition.out +++ b/src/test/regress/expected/adaptive_executor_repartition.out @@ -6,7 +6,7 @@ SET citus.enable_repartition_joins TO true; CREATE TABLE ab(a int, b int); SELECT create_distributed_table('ab', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -14,39 +14,39 @@ INSERT INTO ab SELECT *,* FROM generate_series(1,10); SELECT COUNT(*) FROM ab k, ab l WHERE k.a = l.b; count -------- +--------------------------------------------------------------------- 10 (1 row) SELECT COUNT(*) FROM ab k, ab l, ab m, ab t WHERE k.a = l.b AND k.a = m.b AND t.b = l.a; count -------- +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; count -------- +--------------------------------------------------------------------- 10 (1 row) BEGIN; SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; count -------- +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; count -------- +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -63,19 +63,19 @@ CREATE TABLE single_hash_repartition_second (id int, sum int, avg float); CREATE TABLE ref_table (id int, sum int, avg float); SELECT create_distributed_table('single_hash_repartition_first', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('single_hash_repartition_second', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -87,7 +87,7 @@ FROM WHERE r1.id = t1.id AND t2.sum = t1.id; QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 @@ -105,7 +105,7 @@ FROM WHERE t1.id = t2.id AND t1.sum = t3.id; QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 diff --git a/src/test/regress/expected/add_coordinator.out b/src/test/regress/expected/add_coordinator.out index 8681c8b8a..14e0a1dbe 100644 --- a/src/test/regress/expected/add_coordinator.out +++ b/src/test/regress/expected/add_coordinator.out @@ -5,7 +5,7 @@ SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid -- adding the same node again should return the existing nodeid SELECT master_add_node('localhost', :master_port, groupid => 0) = :master_nodeid; ?column? ----------- +--------------------------------------------------------------------- t (1 row) @@ -16,7 +16,7 @@ ERROR: group 0 already has a primary node SELECT start_metadata_sync_to_node('localhost', :master_port); NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index c69950335..633f68bc7 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -40,27 +40,27 @@ create aggregate sum2_strict (int) ( ); select create_distributed_function('sum2(int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) select create_distributed_function('sum2_strict(int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) create table aggdata (id int, key int, val int, valf float8); select create_distributed_table('aggdata', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) insert into aggdata (id, key, val, valf) values (1, 1, 2, 11.2), (2, 1, NULL, 2.1), (3, 2, 2, 3.22), (4, 2, 3, 4.23), (5, 2, 5, 5.25), (6, 3, 4, 63.4), (7, 5, NULL, 75), (8, 6, NULL, NULL), (9, 6, NULL, 96), (10, 7, 8, 1078), (11, 9, 0, 1.19); select key, sum2(val), sum2_strict(val), stddev(valf) from aggdata group by key order by key; key | sum2 | sum2_strict | stddev ------+------+-------------+------------------ +--------------------------------------------------------------------- 1 | | 4 | 6.43467170879758 2 | 20 | 20 | 1.01500410508201 3 | 8 | 8 | @@ -73,7 +73,7 @@ select key, sum2(val), sum2_strict(val), stddev(valf) from aggdata group by key -- FILTER supported select key, sum2(val) filter (where valf < 5), sum2_strict(val) filter (where valf < 5) from aggdata group by key order by key; key | sum2 | sum2_strict ------+------+------------- +--------------------------------------------------------------------- 1 | | 2 | 10 | 10 3 | 0 | @@ -89,7 +89,7 @@ ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) select id, sum2(distinct val), sum2_strict(distinct val) from aggdata group by id order by id; id | sum2 | sum2_strict -----+------+------------- +--------------------------------------------------------------------- 1 | 4 | 4 2 | | 3 | 4 | 4 @@ -109,7 +109,7 @@ ERROR: unsupported aggregate function sum2 -- Test handling a lack of intermediate results select sum2(val), sum2_strict(val) from aggdata where valf = 0; sum2 | sum2_strict -------+------------- +--------------------------------------------------------------------- 0 | (1 row) @@ -137,13 +137,13 @@ CREATE AGGREGATE last ( ); SELECT create_distributed_function('first(anyelement)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT create_distributed_function('last(anyelement)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -154,7 +154,7 @@ ERROR: unsupported aggregate function first SELECT id, first(val ORDER BY key), last(val ORDER BY key) FROM aggdata GROUP BY id ORDER BY id; id | first | last -----+-------+------ +--------------------------------------------------------------------- 1 | 2 | 2 2 | | 3 | 2 | 2 @@ -190,13 +190,13 @@ ERROR: function "aggregate_support.sumstring(text)" does not exist CONTEXT: while executing command on localhost:xxxxx select create_distributed_function('sumstring(text)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) select sumstring(valf::text) from aggdata where valf is not null; sumstring ------------ +--------------------------------------------------------------------- 1339.59 (1 row) @@ -214,13 +214,13 @@ create aggregate array_collect_sort(el int) ( ); select create_distributed_function('array_collect_sort(int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) select array_collect_sort(val) from aggdata; array_collect_sort -------------------------------------- +--------------------------------------------------------------------- {0,2,2,3,4,5,8,NULL,NULL,NULL,NULL} (1 row) @@ -230,7 +230,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. select run_command_on_workers($$create user notsuper$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -242,7 +242,7 @@ grant all on schema aggregate_support to notsuper; grant all on all tables in schema aggregate_support to notsuper; $$); run_command_on_workers ---------------------------- +--------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) @@ -250,7 +250,7 @@ $$); set role notsuper; select array_collect_sort(val) from aggdata; array_collect_sort -------------------------------------- +--------------------------------------------------------------------- {0,2,2,3,4,5,8,NULL,NULL,NULL,NULL} (1 row) diff --git a/src/test/regress/expected/alter_role_propagation.out b/src/test/regress/expected/alter_role_propagation.out index 745a4824a..e9a308e98 100644 --- a/src/test/regress/expected/alter_role_propagation.out +++ b/src/test/regress/expected/alter_role_propagation.out @@ -5,7 +5,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE alter_role_1 WITH LOGIN;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -17,13 +17,13 @@ ERROR: conflicting or redundant options ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; row ---------------------------------------- +--------------------------------------------------------------------- (alter_role_1,t,t,t,t,t,t,t,66,,2032) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers -------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)") (localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)") (2 rows) @@ -32,13 +32,13 @@ SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcr ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; row --------------------------------------- +--------------------------------------------------------------------- (alter_role_1,f,f,f,f,f,f,f,0,,2052) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers ------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)") (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)") (2 rows) @@ -52,13 +52,13 @@ ERROR: role "alter_role_2" does not exist ALTER ROLE CURRENT_USER WITH CONNECTION LIMIT 123; SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER; rolconnlimit --------------- +--------------------------------------------------------------------- 123 (1 row) SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER;$$); run_command_on_workers -------------------------- +--------------------------------------------------------------------- (localhost,57637,t,123) (localhost,57638,t,123) (2 rows) @@ -67,13 +67,13 @@ SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname ALTER ROLE SESSION_USER WITH CONNECTION LIMIT 124; SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER; rolconnlimit --------------- +--------------------------------------------------------------------- 124 (1 row) SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER;$$); run_command_on_workers -------------------------- +--------------------------------------------------------------------- (localhost,57637,t,124) (localhost,57638,t,124) (2 rows) @@ -82,13 +82,13 @@ SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname ALTER ROLE alter_role_1 WITH PASSWORD NULL; SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1'; ?column? ----------- +--------------------------------------------------------------------- t (1 row) SELECT run_command_on_workers($$SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) (2 rows) @@ -96,13 +96,13 @@ SELECT run_command_on_workers($$SELECT rolpassword is NULL FROM pg_authid WHERE ALTER ROLE alter_role_1 WITH PASSWORD 'test1'; SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; rolpassword -------------------------------------- +--------------------------------------------------------------------- md52f9cc8d65e37edcc45c4a489bdfc699d (1 row) SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers ---------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,md52f9cc8d65e37edcc45c4a489bdfc699d) (localhost,57638,t,md52f9cc8d65e37edcc45c4a489bdfc699d) (2 rows) @@ -110,13 +110,13 @@ SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'test2'; SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; rolpassword -------------------------------------- +--------------------------------------------------------------------- md5e17f7818c5ec023fa87bdb97fd3e842e (1 row) SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers ---------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,md5e17f7818c5ec023fa87bdb97fd3e842e) (localhost,57638,t,md5e17f7818c5ec023fa87bdb97fd3e842e) (2 rows) @@ -124,13 +124,13 @@ SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'md59cce240038b7b335c6aa9674a6f13e72'; SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; rolpassword -------------------------------------- +--------------------------------------------------------------------- md59cce240038b7b335c6aa9674a6f13e72 (1 row) SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers ---------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,md59cce240038b7b335c6aa9674a6f13e72) (localhost,57638,t,md59cce240038b7b335c6aa9674a6f13e72) (2 rows) @@ -141,7 +141,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE "alter_role'1" WITH LOGIN;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -149,13 +149,13 @@ SELECT run_command_on_workers($$CREATE ROLE "alter_role'1" WITH LOGIN;$$); ALTER ROLE "alter_role'1" CREATEROLE; SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1'; rolcreaterole ---------------- +--------------------------------------------------------------------- t (1 row) SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) (2 rows) @@ -165,7 +165,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE "alter_role""1" WITH LOGIN;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -173,13 +173,13 @@ SELECT run_command_on_workers($$CREATE ROLE "alter_role""1" WITH LOGIN;$$); ALTER ROLE "alter_role""1" CREATEROLE; SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1'; rolcreaterole ---------------- +--------------------------------------------------------------------- t (1 row) SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) (2 rows) @@ -188,51 +188,51 @@ SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolnam ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05' PASSWORD 'test3'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; row --------------------------------------------------------------------------- +--------------------------------------------------------------------- (alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers ------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)") (localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)") (2 rows) SELECT master_remove_node('localhost', :worker_1_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05' PASSWORD 'test4'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; row -------------------------------------------------------------------------- +--------------------------------------------------------------------- (alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers ------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") (1 row) SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; row -------------------------------------------------------------------------- +--------------------------------------------------------------------- (alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); run_command_on_workers ------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") (2 rows) diff --git a/src/test/regress/expected/base_enable_mx.out b/src/test/regress/expected/base_enable_mx.out index d4fe70c3a..17985908f 100644 --- a/src/test/regress/expected/base_enable_mx.out +++ b/src/test/regress/expected/base_enable_mx.out @@ -3,13 +3,13 @@ -- SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/bool_agg.out b/src/test/regress/expected/bool_agg.out index a8d4316af..ca634f3c2 100644 --- a/src/test/regress/expected/bool_agg.out +++ b/src/test/regress/expected/bool_agg.out @@ -4,7 +4,7 @@ SET search_path TO bool_agg; CREATE TABLE bool_test (id int, val int, flag bool, kind int); SELECT create_distributed_table('bool_agg.bool_test','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -12,13 +12,13 @@ INSERT INTO bool_test VALUES (1, 1, true, 99), (2, 2, false, 99), (2, 3, true, 8 -- mix of true and false SELECT bool_and(flag), bool_or(flag), every(flag) FROM bool_test; bool_and | bool_or | every -----------+---------+------- +--------------------------------------------------------------------- f | t | f (1 row) SELECT kind, bool_and(flag), bool_or(flag), every(flag) FROM bool_test GROUP BY kind ORDER BY 2; kind | bool_and | bool_or | every -------+----------+---------+------- +--------------------------------------------------------------------- 99 | f | t | f 88 | t | t | t (2 rows) @@ -26,13 +26,13 @@ SELECT kind, bool_and(flag), bool_or(flag), every(flag) FROM bool_test GROUP BY -- expressions in aggregate SELECT bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test; bool_or | bool_and ----------+---------- +--------------------------------------------------------------------- t | f (1 row) SELECT kind, bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test GROUP BY kind ORDER BY 3; kind | bool_or | bool_and -------+---------+---------- +--------------------------------------------------------------------- 88 | t | f 99 | t | t (2 rows) @@ -40,13 +40,13 @@ SELECT kind, bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test GROUP -- 1 & 3, 1 | 3 SELECT bit_and(val), bit_or(val) FROM bool_test WHERE flag; bit_and | bit_or ----------+-------- +--------------------------------------------------------------------- 1 | 3 (1 row) SELECT flag, bit_and(val), bit_or(val) FROM bool_test GROUP BY flag ORDER BY flag; flag | bit_and | bit_or -------+---------+-------- +--------------------------------------------------------------------- f | 2 | 2 t | 1 | 3 (2 rows) diff --git a/src/test/regress/expected/ch_bench_having.out b/src/test/regress/expected/ch_bench_having.out index 4a7baeaff..f16affede 100644 --- a/src/test/regress/expected/ch_bench_having.out +++ b/src/test/regress/expected/ch_bench_having.out @@ -8,7 +8,7 @@ CREATE TABLE stock ( ); SELECT create_distributed_table('stock','s_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -20,7 +20,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; QUERY PLAN ------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Sort Sort Key: s_i_id InitPlan 1 (returns $0) @@ -66,7 +66,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; QUERY PLAN ------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Sort Sort Key: s_i_id InitPlan 1 (returns $0) @@ -99,7 +99,7 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock); QUERY PLAN ------------------------------------------------------------------------------------ +--------------------------------------------------------------------- HashAggregate Group Key: s_i_id Filter: ((pg_catalog.sum(worker_column_3))::bigint > $0) @@ -130,7 +130,7 @@ group by s_i_id having (select true) order by s_i_id; QUERY PLAN -------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan.s_i_id InitPlan 1 (returns $0) @@ -153,7 +153,7 @@ from stock s group by s_i_id having (select true); QUERY PLAN -------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.s_i_id Filter: $0 @@ -176,7 +176,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- (0 rows) INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; @@ -187,7 +187,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -199,7 +199,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -212,7 +212,7 @@ group by s_i_id having (select true) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -227,7 +227,7 @@ group by s_i_id having (select false) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- (0 rows) select s_i_id, sum(s_order_cnt) as ordercount @@ -236,7 +236,7 @@ group by s_i_id having (select true) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -250,7 +250,7 @@ group by s_i_id having (select false) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- (0 rows) select s_i_id, sum(s_order_cnt) as ordercount @@ -259,7 +259,7 @@ group by s_i_id having (select true) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -310,7 +310,7 @@ insert into stock VALUES SELECT create_distributed_table('stock','s_w_id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -328,7 +328,7 @@ having sum(s_order_cnt) > and n_name = 'GERMANY') order by ordercount desc; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 33 | 1 1 | 1 (2 rows) @@ -349,7 +349,7 @@ having sum(s_order_cnt) > and n_name = 'GERMANY') order by ordercount desc; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 1 | 100001 (1 row) diff --git a/src/test/regress/expected/ch_bench_having_mx.out b/src/test/regress/expected/ch_bench_having_mx.out index 60456d1f1..132cabce2 100644 --- a/src/test/regress/expected/ch_bench_having_mx.out +++ b/src/test/regress/expected/ch_bench_having_mx.out @@ -11,7 +11,7 @@ CREATE TABLE stock ( ); SELECT create_distributed_table('stock','s_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -25,7 +25,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; QUERY PLAN ------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Sort Sort Key: s_i_id InitPlan 1 (returns $0) @@ -71,7 +71,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; QUERY PLAN ------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Sort Sort Key: s_i_id InitPlan 1 (returns $0) @@ -104,7 +104,7 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock); QUERY PLAN ------------------------------------------------------------------------------------ +--------------------------------------------------------------------- HashAggregate Group Key: s_i_id Filter: ((pg_catalog.sum(worker_column_3))::bigint > $0) @@ -135,7 +135,7 @@ group by s_i_id having (select true) order by s_i_id; QUERY PLAN -------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan.s_i_id InitPlan 1 (returns $0) @@ -158,7 +158,7 @@ from stock s group by s_i_id having (select true); QUERY PLAN -------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.s_i_id Filter: $0 @@ -181,7 +181,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- (0 rows) INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; @@ -192,7 +192,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -204,7 +204,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -217,7 +217,7 @@ group by s_i_id having (select true) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -232,7 +232,7 @@ group by s_i_id having (select false) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- (0 rows) select s_i_id, sum(s_order_cnt) as ordercount @@ -241,7 +241,7 @@ group by s_i_id having (select true) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -255,7 +255,7 @@ group by s_i_id having (select false) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- (0 rows) select s_i_id, sum(s_order_cnt) as ordercount @@ -264,7 +264,7 @@ group by s_i_id having (select true) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -320,7 +320,7 @@ insert into stock VALUES SELECT create_distributed_table('stock','s_w_id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -340,7 +340,7 @@ having sum(s_order_cnt) > and n_name = 'GERMANY') order by ordercount desc; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 33 | 1 1 | 1 (2 rows) @@ -361,7 +361,7 @@ having sum(s_order_cnt) > and n_name = 'GERMANY') order by ordercount desc; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 1 | 100001 (1 row) diff --git a/src/test/regress/expected/ch_bench_subquery_repartition.out b/src/test/regress/expected/ch_bench_subquery_repartition.out index f2b518dd8..d2eb8381d 100644 --- a/src/test/regress/expected/ch_bench_subquery_repartition.out +++ b/src/test/regress/expected/ch_bench_subquery_repartition.out @@ -62,31 +62,31 @@ create table supplier ( ); SELECT create_distributed_table('order_line','ol_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('stock','s_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('item'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('nation'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('supplier'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -103,7 +103,7 @@ select s_i_id AND s_i_id = ol_i_id order by s_i_id; s_i_id --------- +--------------------------------------------------------------------- 1 2 3 @@ -151,7 +151,7 @@ where su_suppkey in and n_name = 'Germany' order by su_name; su_name | su_address ----------+------------ +--------------------------------------------------------------------- (0 rows) -- Fallback to public tables with prefilled data @@ -185,7 +185,7 @@ where s_suppkey in and n_name = 'GERMANY' order by s_name; s_name | s_address ----------------------------+------------------------------------- +--------------------------------------------------------------------- Supplier#000000033 | gfeKpYw3400L0SDywXA6Ya1Qmq1w6YB9f3R (1 row) @@ -206,7 +206,7 @@ where s_suppkey in and n_name = 'GERMANY' order by s_name; s_name | s_address ----------------------------+------------------------------------- +--------------------------------------------------------------------- Supplier#000000033 | gfeKpYw3400L0SDywXA6Ya1Qmq1w6YB9f3R Supplier#000000044 | kERxlLDnlIZJdN66zAPHklyL (2 rows) diff --git a/src/test/regress/expected/chbenchmark_all_queries.out b/src/test/regress/expected/chbenchmark_all_queries.out index 45400e8f1..8377432fa 100644 --- a/src/test/regress/expected/chbenchmark_all_queries.out +++ b/src/test/regress/expected/chbenchmark_all_queries.out @@ -146,73 +146,73 @@ CREATE TABLE supplier ( ); SELECT create_distributed_table('order_line','ol_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('new_order','no_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('stock','s_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('oorder','o_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('history','h_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('customer','c_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('district','d_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('warehouse','w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('item'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('region'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('nation'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('supplier'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -246,7 +246,7 @@ WHERE ol_delivery_d > '2007-01-02 00:00:00.000000' GROUP BY ol_number ORDER BY ol_number; ol_number | sum_qty | sum_amount | avg_qty | avg_amount | count_order ------------+---------+------------+------------------------+------------------------+------------- +--------------------------------------------------------------------- 0 | 0 | 0.00 | 0.00000000000000000000 | 0.00000000000000000000 | 1 1 | 1 | 1.00 | 1.00000000000000000000 | 1.00000000000000000000 | 1 2 | 2 | 2.00 | 2.0000000000000000 | 2.0000000000000000 | 1 @@ -302,7 +302,7 @@ ORDER BY su_name, i_id; su_suppkey | su_name | n_name | i_id | i_name | su_address | su_phone | su_comment -------------+---------------------------+---------------------------+------+----------+------------+-----------------+------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 9 | abc | Germany | 3 | Keyboard | def | ghi | jkl 4 | abc | The Netherlands | 2 | Keyboard | def | ghi | jkl (2 rows) @@ -339,7 +339,7 @@ ORDER BY revenue DESC, o_entry_d; ol_o_id | ol_w_id | ol_d_id | revenue | o_entry_d ----------+---------+---------+---------+-------------------------- +--------------------------------------------------------------------- 10 | 10 | 10 | 10.00 | Fri Oct 17 00:00:00 2008 9 | 9 | 9 | 9.00 | Fri Oct 17 00:00:00 2008 8 | 8 | 8 | 8.00 | Fri Oct 17 00:00:00 2008 @@ -370,7 +370,7 @@ WHERE o_entry_d >= '2007-01-02 00:00:00.000000' GROUP BY o_ol_cnt ORDER BY o_ol_cnt; o_ol_cnt | order_count -----------+------------- +--------------------------------------------------------------------- 1 | 11 (1 row) @@ -407,7 +407,7 @@ WHERE c_id = o_c_id GROUP BY n_name ORDER BY revenue DESC; n_name | revenue ----------------------------+--------- +--------------------------------------------------------------------- Germany | 3.00 The Netherlands | 2.00 (2 rows) @@ -420,7 +420,7 @@ WHERE ol_delivery_d >= '1999-01-01 00:00:00.000000' AND ol_delivery_d < '2020-01-01 00:00:00.000000' AND ol_quantity BETWEEN 1 AND 100000; revenue ---------- +--------------------------------------------------------------------- 55.00 (1 row) @@ -463,7 +463,7 @@ ORDER BY cust_nation, l_year; supp_nation | cust_nation | l_year | revenue --------------+-------------+--------+--------- +--------------------------------------------------------------------- 9 | C | 2008 | 3.00 (1 row) @@ -502,7 +502,7 @@ WHERE i_id = s_i_id GROUP BY extract(YEAR FROM o_entry_d) ORDER BY l_year; l_year | mkt_share ---------+------------------------ +--------------------------------------------------------------------- 2008 | 0.50000000000000000000 (1 row) @@ -534,7 +534,7 @@ ORDER BY n_name, l_year DESC; n_name | l_year | sum_profit ----------------------------+--------+------------ +--------------------------------------------------------------------- Germany | 2008 | 3.00 The Netherlands | 2008 | 2.00 United States | 2008 | 1.00 @@ -570,7 +570,7 @@ GROUP BY n_name ORDER BY revenue DESC; c_id | c_last | revenue | c_city | c_phone | n_name -------+--------+---------+-----------+------------------+--------------------------- +--------------------------------------------------------------------- 10 | John | 10.00 | Some City | +1 000 0000000 | Cambodia 9 | John | 9.00 | Some City | +1 000 0000000 | Cambodia 8 | John | 8.00 | Some City | +1 000 0000000 | Cambodia @@ -607,7 +607,7 @@ HAVING sum(s_order_cnt) > AND n_name = 'Germany') ORDER BY ordercount DESC; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -627,7 +627,7 @@ WHERE ol_w_id = o_w_id GROUP BY o_ol_cnt ORDER BY o_ol_cnt; o_ol_cnt | high_line_count | low_line_count -----------+-----------------+---------------- +--------------------------------------------------------------------- 1 | 2 | 9 (1 row) @@ -650,7 +650,7 @@ ORDER BY custdist DESC, c_count DESC; c_count | custdist ----------+---------- +--------------------------------------------------------------------- 0 | 9 1 | 2 (2 rows) @@ -665,7 +665,7 @@ WHERE ol_i_id = i_id AND ol_delivery_d >= '2007-01-02 00:00:00.000000' AND ol_delivery_d < '2020-01-02 00:00:00.000000'; promo_revenue ------------------------- +--------------------------------------------------------------------- 0.00000000000000000000 (1 row) @@ -694,7 +694,7 @@ WHERE su_suppkey = supplier_no AND total_revenue = (SELECT max(total_revenue) FROM revenue) ORDER BY su_suppkey; su_suppkey | su_name | su_address | su_phone | total_revenue -------------+---------------------------+------------+-----------------+--------------- +--------------------------------------------------------------------- 9 | abc | def | ghi | 3.00 (1 row) @@ -719,7 +719,7 @@ GROUP BY i_price ORDER BY supplier_cnt DESC; i_name | brand | i_price | supplier_cnt -----------+-------+---------+-------------- +--------------------------------------------------------------------- Keyboard | co | 50.00 | 3 (1 row) @@ -739,7 +739,7 @@ FROM GROUP BY i_id) t WHERE ol_i_id = t.i_id; avg_yearly ---------------------- +--------------------------------------------------------------------- 27.5000000000000000 (1 row) @@ -776,7 +776,7 @@ ORDER BY sum(ol_amount) DESC, o_entry_d; c_last | o_id | o_entry_d | o_ol_cnt | sum ---------+------+--------------------------+----------+------- +--------------------------------------------------------------------- John | 10 | Fri Oct 17 00:00:00 2008 | 1 | 10.00 John | 9 | Fri Oct 17 00:00:00 2008 | 1 | 9.00 John | 8 | Fri Oct 17 00:00:00 2008 | 1 | 8.00 @@ -809,7 +809,7 @@ WHERE ( ol_i_id = i_id AND i_price BETWEEN 1 AND 400000 AND ol_w_id IN (1,5,3)); revenue ---------- +--------------------------------------------------------------------- 7.00 (1 row) @@ -838,7 +838,7 @@ WHERE su_suppkey in AND n_name = 'Germany' ORDER BY su_name; su_name | su_address ----------------------------+------------ +--------------------------------------------------------------------- abc | def (1 row) @@ -873,7 +873,7 @@ ORDER BY numwait desc, su_name; su_name | numwait ----------+--------- +--------------------------------------------------------------------- (0 rows) -- Query 22 @@ -896,7 +896,7 @@ WHERE substr(c_phone,1,1) in ('1','2','3','4','5','6','7') GROUP BY substr(c_state,1,1) ORDER BY substr(c_state,1,1); country | numcust | totacctbal ----------+---------+------------ +--------------------------------------------------------------------- (0 rows) SET client_min_messages TO WARNING; diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out index bc9d0e8eb..46a06f70c 100644 --- a/src/test/regress/expected/coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/coordinator_shouldhaveshards.out @@ -5,14 +5,14 @@ SET search_path TO coordinator_shouldhaveshards; SET client_min_messages TO WARNING; SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) RESET client_min_messages; SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -20,14 +20,14 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE test (x int, y int); SELECT create_distributed_table('test','x', colocate_with := 'none'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_placement USING (shardid) WHERE logicalrelid = 'test'::regclass AND groupid = 0; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -37,20 +37,20 @@ INSERT INTO test SELECT s,s FROM generate_series(2,100) s; INSERT INTO test VALUES (1, 1); SELECT y FROM test WHERE x = 1; y ---- +--------------------------------------------------------------------- 1 (1 row) -- multi-shard queries connect to localhost SELECT count(*) FROM test; count -------- +--------------------------------------------------------------------- 100 (1 row) WITH a AS (SELECT * FROM test) SELECT count(*) FROM test; count -------- +--------------------------------------------------------------------- 100 (1 row) @@ -58,13 +58,13 @@ WITH a AS (SELECT * FROM test) SELECT count(*) FROM test; BEGIN; SELECT y FROM test WHERE x = 1; y ---- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM test; count -------- +--------------------------------------------------------------------- 100 (1 row) @@ -72,13 +72,13 @@ END; BEGIN; SELECT y FROM test WHERE x = 1; y ---- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM test; count -------- +--------------------------------------------------------------------- 100 (1 row) @@ -89,7 +89,7 @@ ALTER TABLE test ADD COLUMN z int; BEGIN; SELECT y FROM test WHERE x = 1; y ---- +--------------------------------------------------------------------- 1 (1 row) @@ -102,7 +102,7 @@ BEGIN; ALTER TABLE test DROP COLUMN z; SELECT y FROM test WHERE x = 1; y ---- +--------------------------------------------------------------------- 1 (1 row) @@ -112,7 +112,7 @@ DROP TABLE test; DROP SCHEMA coordinator_shouldhaveshards CASCADE; SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', false); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/cte_nested_modification.out b/src/test/regress/expected/cte_nested_modification.out index fca88d15a..f5010a62f 100644 --- a/src/test/regress/expected/cte_nested_modification.out +++ b/src/test/regress/expected/cte_nested_modification.out @@ -5,7 +5,7 @@ INSERT INTO tt1 VALUES(1,2),(2,3),(3,4); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -14,7 +14,7 @@ INSERT INTO tt2 VALUES(3,3),(4,4),(5,5); SELECT create_distributed_table('tt2','id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -41,7 +41,7 @@ FROM cte_1 WHERE cte_1.id = tt1.id; SELECT * FROM tt1 ORDER BY id; id | value_1 -----+--------- +--------------------------------------------------------------------- 1 | 2 2 | 6 3 | 4 @@ -65,7 +65,7 @@ UPDATE tt1 SET value_1 = (SELECT max(id) + abs(2 + 3.5) FROM cte_1); SELECT * FROM tt1 ORDER BY id; id | value_1 -----+--------- +--------------------------------------------------------------------- 1 | 9 2 | 9 3 | 9 @@ -89,7 +89,7 @@ UPDATE tt1 SET value_1 = (SELECT max(id) + abs(2 + 3.5) FROM cte_1); SELECT * FROM tt1 ORDER BY id; id | value_1 -----+--------- +--------------------------------------------------------------------- 1 | 9 2 | 9 3 | 9 @@ -115,7 +115,7 @@ USING cte_1 WHERE tt1.id < cte_1.id; SELECT * FROM tt1 ORDER BY id; id | value_1 -----+--------- +--------------------------------------------------------------------- 3 | 4 (1 row) @@ -135,7 +135,7 @@ USING cte_1 WHERE tt1.id < cte_1.id; SELECT * FROM tt1 ORDER BY id; id | value_1 -----+--------- +--------------------------------------------------------------------- (0 rows) ROLLBACK; diff --git a/src/test/regress/expected/cte_prepared_modify.out b/src/test/regress/expected/cte_prepared_modify.out index 390a8d72d..560f455b6 100644 --- a/src/test/regress/expected/cte_prepared_modify.out +++ b/src/test/regress/expected/cte_prepared_modify.out @@ -5,7 +5,7 @@ INSERT INTO tt1 VALUES(1,2),(2,3),(3,4); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -14,7 +14,7 @@ INSERT INTO tt2 VALUES(3,3),(4,4),(5,5); SELECT create_distributed_table('tt2','id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/custom_aggregate_support.out b/src/test/regress/expected/custom_aggregate_support.out index f3d8d93cf..3a17cf8e0 100644 --- a/src/test/regress/expected/custom_aggregate_support.out +++ b/src/test/regress/expected/custom_aggregate_support.out @@ -15,13 +15,13 @@ CREATE TABLE raw_table (day date, user_id int); CREATE TABLE daily_uniques(day date, unique_users hll); SELECT create_distributed_table('raw_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('daily_uniques', 'day'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -39,7 +39,7 @@ FROM ( SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg FROM raw_table)a; hll_cardinality ------------------ +--------------------------------------------------------------------- 19 (1 row) @@ -55,7 +55,7 @@ WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 2 DESC,1 LIMIT 10; day | hll_cardinality -------------+----------------- +--------------------------------------------------------------------- 06-20-2018 | 19 06-21-2018 | 19 06-22-2018 | 19 @@ -73,7 +73,7 @@ SELECT hll_cardinality(hll_union_agg(unique_users)) FROM daily_uniques WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date; hll_cardinality ------------------ +--------------------------------------------------------------------- 19 (1 row) @@ -83,7 +83,7 @@ WHERE day >= '2018-06-23' AND day <= '2018-07-01' GROUP BY 1 ORDER BY 1; month | hll_cardinality --------+----------------- +--------------------------------------------------------------------- 6 | 19 7 | 13 (2 rows) @@ -109,7 +109,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All @@ -143,7 +143,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All @@ -178,7 +178,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All @@ -212,7 +212,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All @@ -247,7 +247,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All @@ -281,7 +281,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All @@ -316,7 +316,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All @@ -351,7 +351,7 @@ FROM GROUP BY(1) HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; QUERY PLAN ----------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All @@ -396,13 +396,13 @@ CREATE TABLE customer_reviews (day date, user_id int, review int); CREATE TABLE popular_reviewer(day date, reviewers jsonb); SELECT create_distributed_table('customer_reviews', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('popular_reviewer', 'day'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -422,7 +422,7 @@ FROM ( )a ORDER BY 2 DESC, 1; item | frequency -------+----------- +--------------------------------------------------------------------- 1 | 7843 2 | 7843 3 | 6851 @@ -447,7 +447,7 @@ WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 3 DESC, 1, 2 LIMIT 10; day | item | frequency -------------+------+----------- +--------------------------------------------------------------------- 06-20-2018 | 1 | 248 06-20-2018 | 2 | 248 06-21-2018 | 1 | 248 @@ -469,7 +469,7 @@ FROM ( )a ORDER BY 2 DESC, 1; item | frequency -------+----------- +--------------------------------------------------------------------- 1 | 1240 2 | 1240 0 | 992 @@ -489,7 +489,7 @@ FROM ( )a ORDER BY 1, 3 DESC, 2; month | item | frequency --------+------+----------- +--------------------------------------------------------------------- 6 | 1 | 1054 6 | 2 | 1054 6 | 3 | 992 diff --git a/src/test/regress/expected/custom_aggregate_support_0.out b/src/test/regress/expected/custom_aggregate_support_0.out index 6e65bc9a9..bdc8d2e83 100644 --- a/src/test/regress/expected/custom_aggregate_support_0.out +++ b/src/test/regress/expected/custom_aggregate_support_0.out @@ -10,7 +10,7 @@ WHERE name = 'hll' \gset :create_cmd; hll_present -------------- +--------------------------------------------------------------------- f (1 row) @@ -22,7 +22,7 @@ LINE 1: CREATE TABLE daily_uniques(day date, unique_users hll); ^ SELECT create_distributed_table('raw_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -191,7 +191,7 @@ WHERE name = 'topn' \gset :create_topn; topn_present --------------- +--------------------------------------------------------------------- f (1 row) @@ -199,13 +199,13 @@ CREATE TABLE customer_reviews (day date, user_id int, review int); CREATE TABLE popular_reviewer(day date, reviewers jsonb); SELECT create_distributed_table('customer_reviews', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('popular_reviewer', 'day'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/custom_aggregate_support_1.out b/src/test/regress/expected/custom_aggregate_support_1.out index 3d3a15f52..fa0c82288 100644 --- a/src/test/regress/expected/custom_aggregate_support_1.out +++ b/src/test/regress/expected/custom_aggregate_support_1.out @@ -15,13 +15,13 @@ CREATE TABLE raw_table (day date, user_id int); CREATE TABLE daily_uniques(day date, unique_users hll); SELECT create_distributed_table('raw_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('daily_uniques', 'day'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -39,7 +39,7 @@ FROM ( SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg FROM raw_table)a; hll_cardinality ------------------ +--------------------------------------------------------------------- 19 (1 row) @@ -55,7 +55,7 @@ WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 2 DESC,1 LIMIT 10; day | hll_cardinality -------------+----------------- +--------------------------------------------------------------------- 06-20-2018 | 19 06-21-2018 | 19 06-22-2018 | 19 @@ -73,7 +73,7 @@ SELECT hll_cardinality(hll_union_agg(unique_users)) FROM daily_uniques WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date; hll_cardinality ------------------ +--------------------------------------------------------------------- 19 (1 row) @@ -83,7 +83,7 @@ WHERE day >= '2018-06-23' AND day <= '2018-07-01' GROUP BY 1 ORDER BY 1; month | hll_cardinality --------+----------------- +--------------------------------------------------------------------- 6 | 19 7 | 13 (2 rows) @@ -109,7 +109,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------------- +--------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day -> Custom Scan (Citus Adaptive) @@ -145,7 +145,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------------------- +--------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day -> Sort @@ -184,7 +184,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------------- +--------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day -> Custom Scan (Citus Adaptive) @@ -220,7 +220,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------------------- +--------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day -> Sort @@ -259,7 +259,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------------- +--------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day -> Custom Scan (Citus Adaptive) @@ -295,7 +295,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------------------- +--------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day -> Sort @@ -334,7 +334,7 @@ FROM daily_uniques GROUP BY(1); QUERY PLAN ------------------------------------------------------------------------- +--------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day -> Custom Scan (Citus Adaptive) @@ -371,7 +371,7 @@ FROM GROUP BY(1) HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; QUERY PLAN ----------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day Filter: (hll_cardinality(hll_union_agg(remote_scan.worker_column_3)) > '1'::double precision) @@ -429,13 +429,13 @@ CREATE TABLE customer_reviews (day date, user_id int, review int); CREATE TABLE popular_reviewer(day date, reviewers jsonb); SELECT create_distributed_table('customer_reviews', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('popular_reviewer', 'day'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -455,7 +455,7 @@ FROM ( )a ORDER BY 2 DESC, 1; item | frequency -------+----------- +--------------------------------------------------------------------- 1 | 7843 2 | 7843 3 | 6851 @@ -480,7 +480,7 @@ WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 3 DESC, 1, 2 LIMIT 10; day | item | frequency -------------+------+----------- +--------------------------------------------------------------------- 06-20-2018 | 1 | 248 06-20-2018 | 2 | 248 06-21-2018 | 1 | 248 @@ -502,7 +502,7 @@ FROM ( )a ORDER BY 2 DESC, 1; item | frequency -------+----------- +--------------------------------------------------------------------- 1 | 1240 2 | 1240 0 | 992 @@ -522,7 +522,7 @@ FROM ( )a ORDER BY 1, 3 DESC, 2; month | item | frequency --------+------+----------- +--------------------------------------------------------------------- 6 | 1 | 1054 6 | 2 | 1054 6 | 3 | 992 @@ -542,7 +542,7 @@ FROM popular_reviewer WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date ORDER BY 2 DESC, 1; item | frequency -------+----------- +--------------------------------------------------------------------- 1 | 1240 2 | 1240 0 | 992 @@ -556,7 +556,7 @@ SELECT (topn(topn_add_agg(user_id::text), 10)).* FROM customer_reviews ORDER BY 2 DESC, 1; item | frequency -------+----------- +--------------------------------------------------------------------- 1 | 7843 2 | 7843 3 | 6851 diff --git a/src/test/regress/expected/disable_object_propagation.out b/src/test/regress/expected/disable_object_propagation.out index 224666408..c654de960 100644 --- a/src/test/regress/expected/disable_object_propagation.out +++ b/src/test/regress/expected/disable_object_propagation.out @@ -10,7 +10,7 @@ SET search_path TO disabled_object_propagation; CREATE TABLE t1 (a int PRIMARY KEY , b int); SELECT create_distributed_table('t1','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -27,14 +27,14 @@ SELECT 1 FROM run_command_on_workers($$ COMMIT; $$); ?column? ----------- +--------------------------------------------------------------------- 1 1 (2 rows) SELECT create_distributed_table('t2', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -51,14 +51,14 @@ SELECT 1 FROM run_command_on_workers($$ COMMIT; $$); ?column? ----------- +--------------------------------------------------------------------- 1 1 (2 rows) SELECT create_distributed_table('t3', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -70,7 +70,7 @@ CREATE TYPE tt3 AS (a int, b int); CREATE TABLE t4 (a int PRIMARY KEY, b tt3); SELECT create_distributed_table('t4','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -79,7 +79,7 @@ COMMIT; -- verify the type is distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = 'disabled_object_propagation.tt3'::regtype::oid; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -98,7 +98,7 @@ SELECT row(nspname, typname, usename) WHERE typname = 'tt3'; $$); run_command_on_workers ------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(disabled_object_propagation,tt3,postgres)") (localhost,57638,t,"(disabled_object_propagation,tt3,postgres)") (2 rows) @@ -113,7 +113,7 @@ SELECT run_command_on_workers($$ GROUP BY pg_type.typname; $$); run_command_on_workers ------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(tt3,""a int4, b int4"")") (localhost,57638,t,"(tt3,""a int4, b int4"")") (2 rows) diff --git a/src/test/regress/expected/distributed_collations.out b/src/test/regress/expected/distributed_collations.out index 1562f8232..0ca0361b4 100644 --- a/src/test/regress/expected/distributed_collations.out +++ b/src/test/regress/expected/distributed_collations.out @@ -4,7 +4,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER collationuser;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -24,7 +24,7 @@ JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; collname | nspname | rolname -------------------+-----------------+---------- +--------------------------------------------------------------------- german_phonebook | collation_tests | postgres (1 row) @@ -36,20 +36,20 @@ INSERT INTO test_propagate VALUES (1, 'aesop', U&'\00E4sop'), (2, U&'Vo\1E9Er', SELECT create_distributed_table('test_propagate', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- Test COLLATE is pushed down SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b'; id | t1 | t2 -----+-------+------ +--------------------------------------------------------------------- 1 | aesop | äsop (1 row) SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b' COLLATE "C"; id | t1 | t2 -----+------+------- +--------------------------------------------------------------------- 2 | Voẞr | Vossr (1 row) @@ -57,7 +57,7 @@ SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b' COLLATE "C"; CREATE TABLE test_range(key text COLLATE german_phonebook, val int); SELECT create_distributed_table('test_range', 'key', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -77,7 +77,7 @@ SELECT * FROM test_range WHERE key > 'Ab' AND key < U&'\00E4z'; DEBUG: Creating router plan DEBUG: Plan is router executable key | val -------+----- +--------------------------------------------------------------------- äsop | 1 (1 row) @@ -89,7 +89,7 @@ JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; collname | nspname | rolname --------------------------------+-----------------+---------- +--------------------------------------------------------------------- german_phonebook | collation_tests | postgres german_phonebook_unpropagated | collation_tests | postgres (2 rows) @@ -106,7 +106,7 @@ JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; collname | nspname | rolname --------------------------------+------------------+--------------- +--------------------------------------------------------------------- german_phonebook2 | collation_tests2 | collationuser german_phonebook_unpropagated | collation_tests | postgres (2 rows) @@ -128,7 +128,7 @@ DROP SCHEMA collation_tests2 CASCADE; DROP USER collationuser; SELECT run_command_on_workers($$DROP USER collationuser;$$); run_command_on_workers ---------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_collations_conflict.out b/src/test/regress/expected/distributed_collations_conflict.out index 77ac86d42..dd0624558 100644 --- a/src/test/regress/expected/distributed_collations_conflict.out +++ b/src/test/regress/expected/distributed_collations_conflict.out @@ -1,7 +1,7 @@ CREATE SCHEMA collation_conflict; SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$); run_command_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") (2 rows) @@ -21,7 +21,7 @@ CREATE COLLATION caseinsensitive ( CREATE TABLE tblcoll(val text COLLATE caseinsensitive); SELECT create_reference_table('tblcoll'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -33,7 +33,7 @@ JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'caseinsensitive%' ORDER BY 1,2,3; collname | nspname | rolname ------------------+--------------------+---------- +--------------------------------------------------------------------- caseinsensitive | collation_conflict | postgres (1 row) @@ -59,7 +59,7 @@ CREATE COLLATION caseinsensitive ( CREATE TABLE tblcoll(val text COLLATE caseinsensitive); SELECT create_reference_table('tblcoll'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -71,7 +71,7 @@ JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'caseinsensitive%' ORDER BY 1,2,3; collname | nspname | rolname ----------------------------------+--------------------+---------- +--------------------------------------------------------------------- caseinsensitive | collation_conflict | postgres caseinsensitive(citus_backup_0) | collation_conflict | postgres (2 rows) @@ -81,13 +81,13 @@ SET search_path TO collation_conflict; -- now test worker_create_or_replace_object directly SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$); worker_create_or_replace_object ---------------------------------- +--------------------------------------------------------------------- f (1 row) SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$); worker_create_or_replace_object ---------------------------------- +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index b537b8fb2..8c9c2b9ce 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -4,7 +4,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER functionuser;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -131,7 +131,7 @@ SET citus.replication_model TO 'statement'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('statement_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -141,7 +141,7 @@ SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('streaming_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -149,7 +149,7 @@ SELECT create_distributed_table('streaming_table','id'); -- at the start of the test select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; bool_or ---------- +--------------------------------------------------------------------- f (1 row) @@ -157,21 +157,21 @@ select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'pr -- distribution_argument_index and colocationid SELECT create_distributed_function('"add_mi''xed_param_names"(int, int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT distribution_argument_index is NULL, colocationid is NULL from citus.pg_dist_object WHERE objid = 'add_mi''xed_param_names(int, int)'::regprocedure; ?column? | ?column? -----------+---------- +--------------------------------------------------------------------- t | t (1 row) -- also show that we can use the function SELECT * FROM run_command_on_workers('SELECT function_tests."add_mi''xed_param_names"(2,3);') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 (2 rows) @@ -180,7 +180,7 @@ SELECT * FROM run_command_on_workers('SELECT function_tests."add_mi''xed_param_n -- since the function doesn't have a parameter select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; bool_or ---------- +--------------------------------------------------------------------- f (1 row) @@ -203,52 +203,52 @@ END; -- try to co-locate with a table that uses streaming replication SELECT create_distributed_function('dup(int)', '$1', colocate_with := 'streaming_table'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.dup(42);') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+------------------- +--------------------------------------------------------------------- localhost | 57637 | t | (42,"42 is text") localhost | 57638 | t | (42,"42 is text") (2 rows) SELECT create_distributed_function('add(int,int)', '$1', colocate_with := 'streaming_table'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 (2 rows) SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) -- distribute aggregate SELECT create_distributed_function('sum2(int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT create_distributed_function('my_rank("any")'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT create_distributed_function('agg_names(dup_result,dup_result)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -258,21 +258,21 @@ SELECT create_distributed_function('agg_names(dup_result,dup_result)'); ALTER FUNCTION add(int,int) CALLED ON NULL INPUT IMMUTABLE SECURITY INVOKER PARALLEL UNSAFE LEAKPROOF COST 5; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) RETURNS NULL ON NULL INPUT STABLE SECURITY DEFINER PARALLEL RESTRICTED; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) STRICT VOLATILE PARALLEL SAFE; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -280,49 +280,49 @@ SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); ALTER FUNCTION add(int,int) SET client_min_messages TO warning; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET client_min_messages TO error; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET client_min_messages TO debug; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) RESET client_min_messages; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET "citus.setting;'" TO 'hello '' world'; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) RESET "citus.setting;'"; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET search_path TO 'sch'';ma', public; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -333,7 +333,7 @@ ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed fu HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value. SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -342,7 +342,7 @@ ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed fu HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value. SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -351,7 +351,7 @@ ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed fu HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value. SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -359,20 +359,20 @@ SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); ALTER FUNCTION add(int,int) RENAME TO add2; SELECT public.verify_function_is_same_on_workers('function_tests.add2(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist (2 rows) SELECT * FROM run_command_on_workers('SELECT function_tests.add2(2,3);') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 (2 rows) @@ -381,7 +381,7 @@ ALTER FUNCTION add2(int,int) RENAME TO add; ALTER AGGREGATE sum2(int) RENAME TO sum27; SELECT * FROM run_command_on_workers($$SELECT 1 from pg_proc where proname = 'sum27';$$) ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57638 | t | 1 (2 rows) @@ -391,7 +391,7 @@ ALTER AGGREGATE sum27(int) RENAME TO sum2; ALTER FUNCTION add(int,int) OWNER TO functionuser; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -404,7 +404,7 @@ JOIN pg_namespace ON (pg_namespace.oid = pronamespace and nspname = 'function_te WHERE proname = 'add'; $$); run_command_on_workers ---------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(functionuser,function_tests,add)") (localhost,57638,t,"(functionuser,function_tests,add)") (2 rows) @@ -417,7 +417,7 @@ JOIN pg_namespace ON (pg_namespace.oid = pronamespace and nspname = 'function_te WHERE proname = 'sum2'; $$); run_command_on_workers ----------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(functionuser,function_tests,sum2)") (localhost,57638,t,"(functionuser,function_tests,sum2)") (2 rows) @@ -427,20 +427,20 @@ $$); ALTER FUNCTION add(int,int) SET SCHEMA function_tests2; SELECT public.verify_function_is_same_on_workers('function_tests2.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist (2 rows) SELECT * FROM run_command_on_workers('SELECT function_tests2.add(2,3);') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 (2 rows) @@ -455,13 +455,13 @@ AS 'select $1 * $2;' -- I know, this is not an add, but the output will tell us RETURNS NULL ON NULL INPUT; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 6 localhost | 57638 | t | 6 (2 rows) @@ -478,7 +478,7 @@ DROP FUNCTION add(int,int); -- call should fail as function should have been dropped SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist (2 rows) @@ -487,7 +487,7 @@ DROP AGGREGATE function_tests2.sum2(int); -- call should fail as aggregate should have been dropped SELECT * FROM run_command_on_workers('SELECT function_tests2.sum2(id) FROM (select 1 id, 2) subq;') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+--------------------------------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests2.sum2(integer) does not exist localhost | 57638 | f | ERROR: function function_tests2.sum2(integer) does not exist (2 rows) @@ -541,7 +541,7 @@ HINT: Either provide a valid function argument name or a valid "$paramIndex" to BEGIN; SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='val1'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -549,7 +549,7 @@ ROLLBACK; -- make sure that none of the nodes have the function because we've rollbacked SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_with_param_names';$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -557,28 +557,28 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_ -- make sure that none of the active and primary nodes hasmetadata select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; bool_or ---------- +--------------------------------------------------------------------- t (1 row) -- valid distribution with distribution_arg_name SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='val1'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) -- make sure that the primary nodes are now metadata synced select bool_and(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; bool_and ----------- +--------------------------------------------------------------------- t (1 row) -- make sure that both of the nodes have the function because we've succeeded SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_with_param_names';$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) @@ -586,14 +586,14 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_ -- valid distribution with distribution_arg_name -- case insensitive SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='VaL1'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) -- valid distribution with distribution_arg_index SELECT create_distributed_function('add_with_param_names(int, int)','$1'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -603,7 +603,7 @@ CREATE TABLE replicated_table_func_test (a int); SET citus.replication_model TO "statement"; SELECT create_distributed_table('replicated_table_func_test', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -613,7 +613,7 @@ DETAIL: Citus currently only supports colocating function with distributed tabl HINT: When distributing tables make sure that citus.replication_model = 'streaming' SELECT public.wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) @@ -624,13 +624,13 @@ CREATE TABLE replicated_table_func_test_2 (a bigint); SET citus.replication_model TO "streaming"; SELECT create_distributed_table('replicated_table_func_test_2', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_function('add_with_param_names(int, int)', 'val1', colocate_with:='replicated_table_func_test_2'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -645,7 +645,7 @@ ERROR: relation replicated_table_func_test_3 is not distributed -- a function cannot be colocated with a reference table SELECT create_reference_table('replicated_table_func_test_3'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -657,13 +657,13 @@ CREATE TABLE replicated_table_func_test_4 (a int); SET citus.replication_model TO "streaming"; SELECT create_distributed_table('replicated_table_func_test_4', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_function('add_with_param_names(int, int)', '$1', colocate_with:='replicated_table_func_test_4'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -673,7 +673,7 @@ FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_with_param_names(int, int)'::regprocedure; table_and_function_colocated ------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -681,7 +681,7 @@ WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass -- group preserved, because we're using the default shard creation settings SELECT create_distributed_function('add_with_param_names(int, int)', 'val1'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -690,7 +690,7 @@ FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_with_param_names(int, int)'::regprocedure; table_and_function_colocated ------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -700,7 +700,7 @@ WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass -- to coerce the values SELECT create_distributed_function('add_numeric(numeric, numeric)', '$1', colocate_with:='replicated_table_func_test_4'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -709,13 +709,13 @@ FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_numeric(numeric, numeric)'::regprocedure; table_and_function_colocated ------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT create_distributed_function('add_text(text, text)', '$1', colocate_with:='replicated_table_func_test_4'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -724,7 +724,7 @@ FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_text(text, text)'::regprocedure; table_and_function_colocated ------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -741,7 +741,7 @@ HINT: Provide a distributed table via "colocate_with" option to create_distribu -- sync metadata to workers for consistent results when clearing objects SELECT public.wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) @@ -750,7 +750,7 @@ SET citus.shard_count TO 4; CREATE TABLE test (id int, name text); SELECT create_distributed_table('test','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -765,7 +765,7 @@ END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('increment(int)', '$1', colocate_with := 'test'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -780,13 +780,13 @@ END; $$ LANGUAGE plpgsql; SELECT test_func_calls_dist_func(); test_func_calls_dist_func ---------------------------- +--------------------------------------------------------------------- (1 row) SELECT test_func_calls_dist_func(); test_func_calls_dist_func ---------------------------- +--------------------------------------------------------------------- (1 row) @@ -794,7 +794,7 @@ SELECT test_func_calls_dist_func(); INSERT INTO test SELECT increment(3); SELECT * FROM test ORDER BY id; id | name -----+------- +--------------------------------------------------------------------- 3 | three 4 | (2 rows) @@ -806,7 +806,7 @@ DROP SCHEMA function_tests2 CASCADE; -- clear objects SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary'; stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (2 rows) @@ -829,7 +829,7 @@ DROP SCHEMA function_tests2 CASCADE; DROP USER functionuser; SELECT run_command_on_workers($$DROP USER functionuser$$); run_command_on_workers ---------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_functions_conflict.out b/src/test/regress/expected/distributed_functions_conflict.out index 18823379f..73c503c0b 100644 --- a/src/test/regress/expected/distributed_functions_conflict.out +++ b/src/test/regress/expected/distributed_functions_conflict.out @@ -3,7 +3,7 @@ CREATE SCHEMA proc_conflict; SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$); run_command_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") (2 rows) @@ -32,7 +32,7 @@ CREATE AGGREGATE existing_agg(int) ( ); SELECT create_distributed_function('existing_agg(int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -45,7 +45,7 @@ WITH data (val) AS ( ) SELECT existing_agg(val) FROM data; existing_agg --------------- +--------------------------------------------------------------------- 78 (1 row) @@ -58,7 +58,7 @@ WITH data (val) AS ( ) SELECT existing_agg(val) FROM data; existing_agg --------------- +--------------------------------------------------------------------- 78 (1 row) @@ -91,7 +91,7 @@ CREATE AGGREGATE existing_agg(int) ( ); SELECT create_distributed_function('existing_agg(int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -104,7 +104,7 @@ WITH data (val) AS ( ) SELECT existing_agg(val) FROM data; existing_agg --------------- +--------------------------------------------------------------------- 76 (1 row) @@ -117,7 +117,7 @@ WITH data (val) AS ( ) SELECT existing_agg(val) FROM data; existing_agg --------------- +--------------------------------------------------------------------- 76 (1 row) @@ -129,13 +129,13 @@ END; $$ LANGUAGE plpgsql STRICT IMMUTABLE; SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_agg(integer) (STYPE = integer,SFUNC = proc_conflict.existing_func2)'); worker_create_or_replace_object ---------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_agg(integer) (STYPE = integer,SFUNC = proc_conflict.existing_func2)'); worker_create_or_replace_object ---------------------------------- +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/distributed_procedure.out b/src/test/regress/expected/distributed_procedure.out index 31258530c..6e0f572d0 100644 --- a/src/test/regress/expected/distributed_procedure.out +++ b/src/test/regress/expected/distributed_procedure.out @@ -4,7 +4,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER procedureuser;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -25,7 +25,7 @@ ALTER SYSTEM SET citus.metadata_sync_interval TO 3000; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -39,32 +39,32 @@ SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('colocation_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_function('raise_info(text)', '$1', colocate_with := 'colocation_table'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | CALL localhost | 57638 | t | CALL (2 rows) SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -74,14 +74,14 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(tex ALTER PROCEDURE raise_info(text) SECURITY INVOKER; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) SECURITY DEFINER; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -89,28 +89,28 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(tex ALTER PROCEDURE raise_info(text) SET client_min_messages TO warning; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) SET client_min_messages TO error; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) SET client_min_messages TO debug; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) RESET client_min_messages; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -118,20 +118,20 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(tex ALTER PROCEDURE raise_info(text) RENAME TO raise_info2; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info2(text)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info2('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | CALL localhost | 57638 | t | CALL (2 rows) @@ -141,7 +141,7 @@ ALTER PROCEDURE raise_info2(text) RENAME TO raise_info; ALTER PROCEDURE raise_info(text) OWNER TO procedureuser; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -153,7 +153,7 @@ JOIN pg_namespace ON (pg_namespace.oid = pronamespace) WHERE proname = 'raise_info'; $$); run_command_on_workers ------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(procedureuser,procedure_tests,raise_info)") (localhost,57638,t,"(procedureuser,procedure_tests,raise_info)") (2 rows) @@ -163,20 +163,20 @@ $$); ALTER PROCEDURE raise_info(text) SET SCHEMA procedure_tests2; SELECT public.verify_function_is_same_on_workers('procedure_tests2.raise_info(text)'); verify_function_is_same_on_workers ------------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) SELECT * FROM run_command_on_workers($$CALL procedure_tests2.raise_info('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | CALL localhost | 57638 | t | CALL (2 rows) @@ -186,7 +186,7 @@ DROP PROCEDURE raise_info(text); -- call should fail as procedure should have been dropped SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+---------------------------------------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) @@ -195,7 +195,7 @@ SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) @@ -203,7 +203,7 @@ SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$); DROP SCHEMA procedure_tests2 CASCADE; SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) @@ -211,7 +211,7 @@ SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$); DROP USER procedureuser; SELECT run_command_on_workers($$DROP USER procedureuser;$$); run_command_on_workers ---------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_types.out b/src/test/regress/expected/distributed_types.out index 571e40645..ccdc05328 100644 --- a/src/test/regress/expected/distributed_types.out +++ b/src/test/regress/expected/distributed_types.out @@ -4,7 +4,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER typeuser;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -18,14 +18,14 @@ CREATE TYPE tc1 AS (a int, b int); CREATE TABLE t1 (a int PRIMARY KEY, b tc1); SELECT create_distributed_table('t1','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO t1 VALUES (1, (2,3)::tc1); SELECT * FROM t1; a | b ----+------- +--------------------------------------------------------------------- 1 | (2,3) (1 row) @@ -38,14 +38,14 @@ CREATE TYPE te1 AS ENUM ('one', 'two', 'three'); CREATE TABLE t2 (a int PRIMARY KEY, b te1); SELECT create_distributed_table('t2','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO t2 VALUES (1, 'two'); SELECT * FROM t2; a | b ----+----- +--------------------------------------------------------------------- 1 | two (1 row) @@ -56,7 +56,7 @@ ALTER TYPE te1_newname ADD VALUE 'four'; UPDATE t2 SET b = 'four'; SELECT * FROM t2; a | b ----+------ +--------------------------------------------------------------------- 1 | four (1 row) @@ -69,14 +69,14 @@ CREATE TYPE tc2 AS (a int, b int); CREATE TABLE t3 (a int PRIMARY KEY, b tc2); SELECT create_distributed_table('t3','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO t3 VALUES (4, (5,6)::tc2); SELECT * FROM t3; a | b ----+------- +--------------------------------------------------------------------- 4 | (5,6) (1 row) @@ -87,14 +87,14 @@ CREATE TYPE te2 AS ENUM ('yes', 'no'); CREATE TABLE t4 (a int PRIMARY KEY, b te2); SELECT create_distributed_table('t4','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO t4 VALUES (1, 'yes'); SELECT * FROM t4; a | b ----+----- +--------------------------------------------------------------------- 1 | yes (1 row) @@ -103,13 +103,13 @@ COMMIT; -- verify order of enum labels SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype; string_agg ------------- +--------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype;$$); run_command_on_workers ------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") (2 rows) @@ -125,7 +125,7 @@ RESET citus.enable_ddl_propagation; CREATE TABLE t5 (a int PRIMARY KEY, b tc5[], c te3); SELECT create_distributed_table('t5','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -145,7 +145,7 @@ INSERT INTO t5 VALUES (1, NULL, 'a', 'd', (1,2,(4,5)::tc6c)::tc6); ALTER TYPE tc6 RENAME ATTRIBUTE b TO d; SELECT (e::tc6).d FROM t5 ORDER BY 1; d ---- +--------------------------------------------------------------------- 2 (1 row) @@ -153,13 +153,13 @@ SELECT (e::tc6).d FROM t5 ORDER BY 1; ALTER TYPE te4 OWNER TO typeuser; SELECT typname, usename FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid; typname | usename ----------+---------- +--------------------------------------------------------------------- te4 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid;$$); run_command_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(te4,typeuser)") (localhost,57638,t,"(te4,typeuser)") (2 rows) @@ -167,13 +167,13 @@ SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_us ALTER TYPE tc6 OWNER TO typeuser; SELECT typname, usename FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid; typname | usename ----------+---------- +--------------------------------------------------------------------- tc6 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid;$$); run_command_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(tc6,typeuser)") (localhost,57638,t,"(tc6,typeuser)") (2 rows) @@ -191,7 +191,7 @@ RESET citus.enable_ddl_propagation; CREATE TABLE t6 (a int, b tc8, c te6); SELECT create_distributed_table('t6', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -199,52 +199,52 @@ RESET ROLE; -- test ownership of all types SELECT typname, usename FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid; typname | usename ----------+---------- +--------------------------------------------------------------------- tc7 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid;$$); run_command_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(tc7,typeuser)") (localhost,57638,t,"(tc7,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid; typname | usename ----------+---------- +--------------------------------------------------------------------- te5 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid;$$); run_command_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(te5,typeuser)") (localhost,57638,t,"(te5,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid; typname | usename ----------+---------- +--------------------------------------------------------------------- tc8 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid;$$); run_command_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(tc8,typeuser)") (localhost,57638,t,"(tc8,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid; typname | usename ----------+---------- +--------------------------------------------------------------------- te6 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid;$$); run_command_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"(te6,typeuser)") (localhost,57638,t,"(te6,typeuser)") (2 rows) @@ -258,12 +258,12 @@ NOTICE: drop cascades to column b of table t5 -- test if the types are deleted SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname; typname ---------- +--------------------------------------------------------------------- (0 rows) SELECT run_command_on_workers($$SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname;$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") (2 rows) @@ -302,7 +302,7 @@ CREATE TYPE distributed_enum_type AS ENUM ('a', 'c'); CREATE TABLE type_proc (a int, b distributed_composite_type, c distributed_enum_type); SELECT create_distributed_table('type_proc','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -331,13 +331,13 @@ CREATE TYPE feature_flag_enum_type AS ENUM ('a', 'b'); -- verify types do not exist on workers SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type'); count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -346,19 +346,19 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN (' CREATE TABLE feature_flag_table (a int PRIMARY KEY, b feature_flag_composite_type, c feature_flag_enum_type); SELECT create_distributed_table('feature_flag_table','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type'); count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,2) (localhost,57638,t,2) (2 rows) @@ -369,7 +369,7 @@ SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) @@ -377,7 +377,7 @@ SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$); DROP SCHEMA type_tests2 CASCADE; SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) @@ -385,7 +385,7 @@ SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$); DROP USER typeuser; SELECT run_command_on_workers($$DROP USER typeuser;$$); run_command_on_workers ---------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/distributed_types_conflict.out b/src/test/regress/expected/distributed_types_conflict.out index 8b0459d1d..1750ef2a0 100644 --- a/src/test/regress/expected/distributed_types_conflict.out +++ b/src/test/regress/expected/distributed_types_conflict.out @@ -2,7 +2,7 @@ SET citus.next_shard_id TO 20020000; CREATE SCHEMA type_conflict; SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$); run_command_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") (2 rows) @@ -34,14 +34,14 @@ SET search_path TO type_conflict; AND attnum > 0 ORDER BY attnum; relname | attname | typname --------------+---------+---------------------------------- +--------------------------------------------------------------------- local_table | a | int4 local_table | b | my_precious_type(citus_backup_0) (2 rows) SELECT * FROM local_table; a | b -----+---------------------------- +--------------------------------------------------------------------- 42 | ("always bring a towel",t) (1 row) @@ -50,37 +50,37 @@ SET search_path TO type_conflict; -- make sure worker_create_or_replace correctly generates new names while types are existing SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int);'); worker_create_or_replace_object ---------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int, c int);'); worker_create_or_replace_object ---------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int, c int, d int);'); worker_create_or_replace_object ---------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int);'); worker_create_or_replace_object ---------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int, c int);'); worker_create_or_replace_object ---------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int, c int, d int);'); worker_create_or_replace_object ---------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -94,7 +94,7 @@ FROM pg_attribute WHERE pg_type.typname LIKE 'multi_conflicting_type%' GROUP BY pg_type.typname; typname | fields ------------------------------------------------------------------+-------------------------------- +--------------------------------------------------------------------- multi_conflicting_type | a int4, b int4, c int4, d int4 multi_conflicting_type(citus_backup_0) | a int4, b int4 multi_conflicting_type(citus_backup_1) | a int4, b int4, c int4 diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value.out b/src/test/regress/expected/distributed_types_xact_add_enum_value.out index c5e818c36..2a84e67de 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value.out @@ -1,7 +1,7 @@ SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; version_above_eleven ----------------------- +--------------------------------------------------------------------- t (1 row) @@ -15,14 +15,14 @@ CREATE TYPE xact_enum_edit AS ENUM ('yes', 'no'); CREATE TABLE t1 (a int PRIMARY KEY, b xact_enum_edit); SELECT create_distributed_table('t1','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO t1 VALUES (1, 'yes'); SELECT * FROM t1; a | b ----+----- +--------------------------------------------------------------------- 1 | yes (1 row) @@ -33,13 +33,13 @@ ABORT; -- maybe should not be on the workers SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; string_agg ------------- +--------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); run_command_on_workers ------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") (2 rows) @@ -50,13 +50,13 @@ COMMIT; -- maybe should be on the workers (pg12 and above) SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; string_agg --------------- +--------------------------------------------------------------------- yes,no,maybe (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); run_command_on_workers ------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"yes,no,maybe") (localhost,57638,t,"yes,no,maybe") (2 rows) @@ -66,7 +66,7 @@ SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA xact_enum_type CASCADE; SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out b/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out index 934dcaf06..0a9db443e 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out @@ -1,7 +1,7 @@ SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; version_above_eleven ----------------------- +--------------------------------------------------------------------- f (1 row) @@ -15,14 +15,14 @@ CREATE TYPE xact_enum_edit AS ENUM ('yes', 'no'); CREATE TABLE t1 (a int PRIMARY KEY, b xact_enum_edit); SELECT create_distributed_table('t1','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO t1 VALUES (1, 'yes'); SELECT * FROM t1; a | b ----+----- +--------------------------------------------------------------------- 1 | yes (1 row) @@ -34,13 +34,13 @@ ABORT; -- maybe should not be on the workers SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; string_agg ------------- +--------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); run_command_on_workers ------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") (2 rows) @@ -52,13 +52,13 @@ COMMIT; -- maybe should be on the workers (pg12 and above) SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; string_agg ------------- +--------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); run_command_on_workers ------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") (2 rows) @@ -68,7 +68,7 @@ SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA xact_enum_type CASCADE; SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) diff --git a/src/test/regress/expected/dml_recursive.out b/src/test/regress/expected/dml_recursive.out index 3b0b322a4..0196aa2c9 100644 --- a/src/test/regress/expected/dml_recursive.out +++ b/src/test/regress/expected/dml_recursive.out @@ -4,21 +4,21 @@ SET citus.next_shard_id TO 2370000; CREATE TABLE recursive_dml_queries.distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('distributed_table', 'tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE recursive_dml_queries.second_distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE recursive_dml_queries.reference_table (id text, name text); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -55,7 +55,7 @@ RETURNING DEBUG: generating subplan 4_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM recursive_dml_queries.second_distributed_table DEBUG: Plan 4 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) reference_table.name) FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) reference_table.id) RETURNING reference_table.name name -------------- +--------------------------------------------------------------------- new_user_50 (1 row) @@ -88,7 +88,7 @@ RETURNING DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) AS max_dept FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id)) foo_inner GROUP BY tenant_id ORDER BY tenant_id DESC DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.second_distributed_table SET dept = (foo.max_dept OPERATOR(pg_catalog.*) 2) FROM (SELECT intermediate_result.tenant_id, intermediate_result.max_dept FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, max_dept integer)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 2)) RETURNING second_distributed_table.tenant_id, second_distributed_table.dept tenant_id | dept ------------+------ +--------------------------------------------------------------------- 12 | 18 2 | 18 22 | 18 @@ -157,7 +157,7 @@ RETURNING DEBUG: generating subplan 11_1 for subquery SELECT avg((id)::integer) AS avg_tenant_id FROM recursive_dml_queries.local_table DEBUG: Plan 11 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info tenant_id | dept | info ------------+------+------------------------ +--------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) @@ -180,7 +180,7 @@ RETURNING DEBUG: generating subplan 12_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM (SELECT distributed_table.tenant_id, reference_table.name FROM recursive_dml_queries.distributed_table, recursive_dml_queries.reference_table WHERE ((distributed_table.dept)::text OPERATOR(pg_catalog.=) reference_table.id) ORDER BY reference_table.name DESC, distributed_table.tenant_id DESC) tenant_ids DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info tenant_id | dept | info ------------+------+------------------------ +--------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) @@ -213,7 +213,7 @@ foo_inner_1 JOIN LATERAL ON (foo_inner_2.tenant_id != foo_inner_1.tenant_id) ORDER BY foo_inner_1.tenant_id; tenant_id ------------ +--------------------------------------------------------------------- 14 24 34 diff --git a/src/test/regress/expected/ensure_no_intermediate_data_leak.out b/src/test/regress/expected/ensure_no_intermediate_data_leak.out index 4b9fb7a3a..b4ed5abd5 100644 --- a/src/test/regress/expected/ensure_no_intermediate_data_leak.out +++ b/src/test/regress/expected/ensure_no_intermediate_data_leak.out @@ -1,19 +1,19 @@ ------- +--------------------------------------------------------------------- -- THIS TEST SHOULD IDEALLY BE EXECUTED AT THE END OF -- THE REGRESSION TEST SUITE TO MAKE SURE THAT WE -- CLEAR ALL INTERMEDIATE RESULTS ON BOTH THE COORDINATOR -- AND ON THE WORKERS. HOWEVER, WE HAVE SOME ISSUES AROUND -- WINDOWS SUPPORT, FAILURES IN TASK-TRACKER EXECUTOR -- SO WE DISABLE THIS TEST ON WINDOWS ------- +--------------------------------------------------------------------- SELECT pg_ls_dir('base/pgsql_job_cache') WHERE citus_version() NOT ILIKE '%windows%'; pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) SELECT run_command_on_workers($$SELECT pg_ls_dir('base/pgsql_job_cache') WHERE citus_version() NOT ILIKE '%windows%'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") (2 rows) diff --git a/src/test/regress/expected/escape_extension_name.out b/src/test/regress/expected/escape_extension_name.out index 1ecd9bb0c..4968d02cd 100644 --- a/src/test/regress/expected/escape_extension_name.out +++ b/src/test/regress/expected/escape_extension_name.out @@ -15,7 +15,7 @@ WHERE name = 'uuid-ossp' -- show that the extension is created on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) @@ -26,7 +26,7 @@ RESET client_min_messages; -- show that the extension is dropped from both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -34,7 +34,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname -- show that extension recreation on new nodes works also fine with extension names that require escaping SELECT 1 from master_remove_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -51,14 +51,14 @@ WHERE name = 'uuid-ossp' -- and add the other node SELECT 1 from master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- show that the extension exists on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) diff --git a/src/test/regress/expected/escape_extension_name_0.out b/src/test/regress/expected/escape_extension_name_0.out index 343520a9a..e6c43039e 100644 --- a/src/test/regress/expected/escape_extension_name_0.out +++ b/src/test/regress/expected/escape_extension_name_0.out @@ -13,14 +13,14 @@ WHERE name = 'uuid-ossp' \gset :uuid_present_command; uuid_ossp_present -------------------- +--------------------------------------------------------------------- f (1 row) -- show that the extension is created on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -32,7 +32,7 @@ RESET client_min_messages; -- show that the extension is dropped from both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -40,7 +40,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname -- show that extension recreation on new nodes works also fine with extension names that require escaping SELECT 1 from master_remove_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -55,21 +55,21 @@ WHERE name = 'uuid-ossp' \gset :uuid_present_command; uuid_ossp_present -------------------- +--------------------------------------------------------------------- f (1 row) -- and add the other node SELECT 1 from master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- show that the extension exists on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) diff --git a/src/test/regress/expected/expression_reference_join.out b/src/test/regress/expected/expression_reference_join.out index d290a27f7..794a0e2ad 100644 --- a/src/test/regress/expected/expression_reference_join.out +++ b/src/test/regress/expected/expression_reference_join.out @@ -14,14 +14,14 @@ INSERT INTO test VALUES SELECT create_reference_table('ref'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test', 'x'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -34,7 +34,7 @@ FROM WHERE t2.y * 2 = a.a ORDER BY 1,2,3; y | x | x | a | b ----+---+---+---+--- +--------------------------------------------------------------------- 2 | 1 | 1 | 4 | 4 2 | 1 | 2 | 4 | 4 2 | 2 | 1 | 4 | 4 @@ -54,7 +54,7 @@ FROM WHERE t2.y - a.a - b.b = 0 ORDER BY 1,2,3; y | x | x | a | b | a | b ----+---+---+---+---+---+--- +--------------------------------------------------------------------- (0 rows) -- The join clause is wider than it used to be, causing this query to be recognized by the LogicalPlanner as a repartition join. diff --git a/src/test/regress/expected/failure_1pc_copy_append.out b/src/test/regress/expected/failure_1pc_copy_append.out index c631809d7..5d4150341 100644 --- a/src/test/regress/expected/failure_1pc_copy_append.out +++ b/src/test/regress/expected/failure_1pc_copy_append.out @@ -1,6 +1,6 @@ SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -14,26 +14,26 @@ ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100; CREATE TABLE copy_test (key int, value int); SELECT create_distributed_table('copy_test', 'key', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.clear_network_traffic(); clear_network_traffic ------------------------ +--------------------------------------------------------------------- (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; SELECT count(1) FROM copy_test; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT citus.dump_network_traffic(); dump_network_traffic ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- (0,coordinator,"[initial message]") (0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") (0,coordinator,"[""Query(query=SELECT worker_apply_shard_ddl_command (100400, 'CREATE TABLE public.copy_test (key integer, value integer)'))""]") @@ -59,14 +59,14 @@ SELECT citus.dump_network_traffic(); ---- all of the following tests test behavior with 2 shard placements ---- SHOW citus.shard_replication_factor; citus.shard_replication_factor --------------------------------- +--------------------------------------------------------------------- 2 (1 row) ---- kill the connection when we try to create the shard ---- SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -79,21 +79,21 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; count -------- +--------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we try to start a transaction ---- SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -105,21 +105,21 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; count -------- +--------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we start the COPY ---- SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -132,21 +132,21 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; count -------- +--------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we send the data ---- SELECT citus.mitmproxy('conn.onCopyData().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -156,14 +156,14 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -173,14 +173,14 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. count -------- +--------------------------------------------------------------------- 4 (1 row) ---- cancel the connection when we send the data ---- SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -190,7 +190,7 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) @@ -200,7 +200,7 @@ ERROR: canceling statement due to user request ---- kill the connection when we try to get the size of the table ---- SELECT citus.mitmproxy('conn.onQuery(query="pg_table_size").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -216,21 +216,21 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; count -------- +--------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we try to get the min, max of the table ---- SELECT citus.mitmproxy('conn.onQuery(query="SELECT min\(key\), max\(key\)").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -246,21 +246,21 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; count -------- +--------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we try to COMMIT ---- SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -274,7 +274,7 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 copy_test | 100408 | t | 0 | 3 | 100408 | 1 | 8192 | localhost | 57637 | 112 @@ -283,14 +283,14 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p SELECT count(1) FROM copy_test; count -------- +--------------------------------------------------------------------- 8 (1 row) -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_1pc_copy_hash.out b/src/test/regress/expected/failure_1pc_copy_hash.out index 4d7bc558d..8e672cdc6 100644 --- a/src/test/regress/expected/failure_1pc_copy_hash.out +++ b/src/test/regress/expected/failure_1pc_copy_hash.out @@ -1,6 +1,6 @@ SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -15,26 +15,26 @@ ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100; CREATE TABLE copy_test (key int, value int); SELECT create_distributed_table('copy_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.clear_network_traffic(); clear_network_traffic ------------------------ +--------------------------------------------------------------------- (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; SELECT count(1) FROM copy_test; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT citus.dump_network_traffic(); dump_network_traffic ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- (0,coordinator,"[initial message]") (0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") (0,coordinator,"[""Query(query=BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(0, XX, 'XXXX-XX-XX XX:XX:XX.XXXXXX-XX');)""]") @@ -55,7 +55,7 @@ SELECT citus.dump_network_traffic(); -- the query should abort SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction").killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -69,7 +69,7 @@ CONTEXT: COPY copy_test, line 1: "0, 0" -- the query should abort SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -83,7 +83,7 @@ COPY copy_test, line 1: "0, 0" -- the query should abort SELECT citus.mitmproxy('conn.onCopyData().killall()'); -- raw rows from the client mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -93,7 +93,7 @@ ERROR: failed to COPY to shard xxxxx on localhost:xxxxx -- the query should abort SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -103,7 +103,7 @@ ERROR: failed to COPY to shard xxxxx on localhost:xxxxx -- the query should succeed, and the placement should be marked inactive SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -111,19 +111,19 @@ SELECT count(1) FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) AND shardstate = 3; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(1) FROM copy_test; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -136,7 +136,7 @@ CONTEXT: while executing command on localhost:xxxxx -- the shard is marked invalid SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -144,13 +144,13 @@ SELECT count(1) FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) AND shardstate = 3; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(1) FROM copy_test; count -------- +--------------------------------------------------------------------- 8 (1 row) @@ -170,7 +170,7 @@ CONTEXT: COPY copy_test, line 5: "10" -- kill the connection if the coordinator sends COMMIT. It doesn't, so nothing changes SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -179,13 +179,13 @@ ERROR: missing data for column "value" CONTEXT: COPY copy_test, line 5: "10" SELECT * FROM copy_test ORDER BY key, value; key | value ------+------- +--------------------------------------------------------------------- (0 rows) -- ==== clean up some more to prepare for tests with only one replica ==== SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -195,7 +195,7 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) ORDER BY nodeport, placementid; shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- 100400 | 1 | 0 | localhost | 9060 | 100 100400 | 3 | 0 | localhost | 57637 | 101 (2 rows) @@ -204,7 +204,7 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; SELECT * FROM copy_test; key | value ------+------- +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -214,7 +214,7 @@ SELECT * FROM copy_test; -- the worker is unreachable SELECT citus.mitmproxy('conn.killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -228,13 +228,13 @@ ERROR: could not connect to any active placements CONTEXT: COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM copy_test; key | value ------+------- +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -244,7 +244,7 @@ SELECT * FROM copy_test; -- the first message fails SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id").killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -256,13 +256,13 @@ ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM copy_test; key | value ------+------- +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -272,7 +272,7 @@ SELECT * FROM copy_test; -- the COPY message fails SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -284,13 +284,13 @@ CONTEXT: while executing command on localhost:xxxxx COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM copy_test; key | value ------+------- +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -300,7 +300,7 @@ SELECT * FROM copy_test; -- the COPY data fails SELECT citus.mitmproxy('conn.onCopyData().killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -308,13 +308,13 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM copy_test; key | value ------+------- +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -324,7 +324,7 @@ SELECT * FROM copy_test; -- the COMMIT fails SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -338,13 +338,13 @@ WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM copy_test; key | value ------+------- +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -356,7 +356,7 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) ORDER BY nodeport, placementid; shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- 100400 | 1 | 0 | localhost | 9060 | 100 100400 | 3 | 0 | localhost | 57637 | 101 (2 rows) @@ -364,7 +364,7 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( -- the COMMIT makes it through but the connection dies before we get a response SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT").killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -378,7 +378,7 @@ WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -386,14 +386,14 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) ORDER BY nodeport, placementid; shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- 100400 | 1 | 0 | localhost | 9060 | 100 100400 | 3 | 0 | localhost | 57637 | 101 (2 rows) SELECT * FROM copy_test; key | value ------+------- +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 4 @@ -407,7 +407,7 @@ SELECT * FROM copy_test; -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_add_disable_node.out b/src/test/regress/expected/failure_add_disable_node.out index c19df311c..8ca5e31da 100644 --- a/src/test/regress/expected/failure_add_disable_node.out +++ b/src/test/regress/expected/failure_add_disable_node.out @@ -6,7 +6,7 @@ -- SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -15,7 +15,7 @@ SET citus.next_shard_id TO 200000; SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 9060 localhost | 57637 (2 rows) @@ -23,7 +23,7 @@ ORDER BY 1, 2; -- verify there are no tables that could prevent add/remove node operations SELECT * FROM pg_dist_partition; logicalrelid | partmethod | partkey | colocationid | repmodel ---------------+------------+---------+--------------+---------- +--------------------------------------------------------------------- (0 rows) CREATE SCHEMA add_remove_node; @@ -31,14 +31,14 @@ SET SEARCH_PATH=add_remove_node; CREATE TABLE user_table(user_id int, user_name text); SELECT create_reference_table('user_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE event_table(user_id int, event_id int, event_name text); SELECT create_distributed_table('event_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -47,7 +47,7 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 200000 | 1 (2 rows) @@ -55,14 +55,14 @@ ORDER BY placementid; SELECT master_disable_node('localhost', :worker_2_proxy_port); NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back. master_disable_node ---------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -71,14 +71,14 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 (1 row) -- fail activate node by failing reference table creation SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -90,7 +90,7 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -98,7 +98,7 @@ SELECT citus.mitmproxy('conn.allow()'); SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -107,14 +107,14 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 (1 row) -- fail create schema command SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -127,7 +127,7 @@ CONTEXT: while executing command on localhost:xxxxx SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -136,14 +136,14 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 (1 row) -- fail activate node by failing reference table creation SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -154,7 +154,7 @@ ERROR: canceling statement due to user request SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -163,13 +163,13 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -180,7 +180,7 @@ ERROR: you cannot remove the primary node of a node group which has shard place DROP TABLE event_table; SELECT master_remove_node('localhost', :worker_2_proxy_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -188,7 +188,7 @@ SELECT master_remove_node('localhost', :worker_2_proxy_port); SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -197,7 +197,7 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 (1 row) @@ -206,13 +206,13 @@ ORDER BY placementid; -- be injected failure through network SELECT master_add_inactive_node('localhost', :worker_2_proxy_port); master_add_inactive_node --------------------------- +--------------------------------------------------------------------- 3 (1 row) SELECT master_remove_node('localhost', :worker_2_proxy_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -221,7 +221,7 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 (1 row) @@ -229,7 +229,7 @@ ORDER BY placementid; -- to newly added node. SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -243,7 +243,7 @@ CONTEXT: while executing command on localhost:xxxxx SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -252,13 +252,13 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -269,7 +269,7 @@ ERROR: canceling statement due to user request SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 57637 (1 row) @@ -278,21 +278,21 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 (1 row) -- reset cluster to original state SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx master_add_node ------------------ +--------------------------------------------------------------------- 6 (1 row) @@ -300,7 +300,7 @@ NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 9060 localhost | 57637 (2 rows) @@ -310,7 +310,7 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 200000 | 1 (2 rows) @@ -318,13 +318,13 @@ ORDER BY placementid; -- fail master_add_node by failing copy out operation SELECT master_remove_node('localhost', :worker_1_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -336,20 +336,20 @@ CONTEXT: while executing command on localhost:xxxxx SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 9060 (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT master_add_node('localhost', :worker_1_port); NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx master_add_node ------------------ +--------------------------------------------------------------------- 8 (1 row) @@ -357,7 +357,7 @@ NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 9060 localhost | 57637 (2 rows) @@ -367,7 +367,7 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 200000 | 1 200000 | 1 (2 rows) @@ -378,7 +378,7 @@ NOTICE: drop cascades to table add_remove_node.user_table SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASCADE') ORDER BY nodeport; nodename | nodeport | success | result ------------+----------+---------+------------- +--------------------------------------------------------------------- localhost | 9060 | t | DROP SCHEMA localhost | 57637 | t | DROP SCHEMA (2 rows) diff --git a/src/test/regress/expected/failure_connection_establishment.out b/src/test/regress/expected/failure_connection_establishment.out index 75d6d13c2..573f85811 100644 --- a/src/test/regress/expected/failure_connection_establishment.out +++ b/src/test/regress/expected/failure_connection_establishment.out @@ -7,7 +7,7 @@ -- SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -24,7 +24,7 @@ CREATE TABLE products ( ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -39,7 +39,7 @@ DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY const SET citus.node_connection_timeout TO 400; SELECT citus.mitmproxy('conn.delay(500)'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -47,7 +47,7 @@ ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -62,19 +62,19 @@ INSERT INTO r1 (id, name) VALUES SELECT create_reference_table('r1'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.clear_network_traffic(); clear_network_traffic ------------------------ +--------------------------------------------------------------------- (1 row) SELECT citus.mitmproxy('conn.delay(500)'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -89,13 +89,13 @@ SET citus.task_assignment_policy TO 'round-robin'; -- test would be if one of the queries does not return the result but an error. SELECT name FROM r1 WHERE id = 2; name ------- +--------------------------------------------------------------------- bar (1 row) SELECT name FROM r1 WHERE id = 2; name ------- +--------------------------------------------------------------------- bar (1 row) @@ -103,13 +103,13 @@ SELECT name FROM r1 WHERE id = 2; -- connection to have been delayed and thus caused a timeout SELECT citus.dump_network_traffic(); dump_network_traffic -------------------------------------- +--------------------------------------------------------------------- (0,coordinator,"[initial message]") (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -119,7 +119,7 @@ SELECT citus.mitmproxy('conn.allow()'); SET citus.force_max_query_parallelization TO ON; SELECT citus.mitmproxy('conn.delay(500)'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -127,13 +127,13 @@ SELECT citus.mitmproxy('conn.delay(500)'); -- test would be if one of the queries does not return the result but an error. SELECT count(*) FROM products; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM products; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -141,13 +141,13 @@ SELECT count(*) FROM products; -- is the worker SELECT citus.dump_network_traffic() ORDER BY 1 OFFSET 1; dump_network_traffic -------------------------------------- +--------------------------------------------------------------------- (1,coordinator,"[initial message]") (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -155,7 +155,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE single_replicatated(key int); SELECT create_distributed_table('single_replicatated', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -164,7 +164,7 @@ SELECT create_distributed_table('single_replicatated', 'key'); SET citus.force_max_query_parallelization TO ON; SELECT citus.mitmproxy('conn.delay(500)'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -176,7 +176,7 @@ SET citus.force_max_query_parallelization TO OFF; -- mark placement INVALID SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -189,13 +189,13 @@ WHERE shardstate = 3 AND shardid IN (SELECT shardid from pg_dist_shard where logicalrelid = 'products'::regclass); invalid_placement_count -------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.delay(500)'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -209,14 +209,14 @@ WHERE shardstate = 3 AND shardid IN (SELECT shardid from pg_dist_shard where logicalrelid = 'products'::regclass); invalid_placement_count -------------------------- +--------------------------------------------------------------------- 1 (1 row) -- show that INSERT went through SELECT count(*) FROM products WHERE product_no = 100; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -226,12 +226,12 @@ SELECT get_global_active_transactions(); WARNING: could not establish connection after 400 ms WARNING: connection error: localhost:xxxxx get_global_active_transactions --------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_copy_on_hash.out b/src/test/regress/expected/failure_copy_on_hash.out index f7fec2242..0e2fce4c3 100644 --- a/src/test/regress/expected/failure_copy_on_hash.out +++ b/src/test/regress/expected/failure_copy_on_hash.out @@ -6,7 +6,7 @@ SET search_path TO 'copy_distributed_table'; SET citus.next_shard_id TO 1710000; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -17,7 +17,7 @@ SET citus.max_cached_conns_per_worker to 0; CREATE TABLE test_table(id int, value_1 int); SELECT create_distributed_table('test_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -31,7 +31,7 @@ CREATE VIEW unhealthy_shard_count AS -- Just kill the connection after sending the first query to the worker. SELECT citus.mitmproxy('conn.kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -45,26 +45,26 @@ ERROR: could not connect to any active placements CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- Now, kill the connection while copying the data SELECT citus.mitmproxy('conn.onCopyData().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -72,19 +72,19 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()'); ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -92,7 +92,7 @@ SELECT count(*) FROM test_table; -- instead of killing it. SELECT citus.mitmproxy('conn.onCopyData().cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -100,26 +100,26 @@ SELECT citus.mitmproxy('conn.onCopyData().cancel(' || pg_backend_pid() || ')'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- kill the connection after worker sends command complete message SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -127,26 +127,26 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").kill()'); ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- similar to above one, but cancel the connection on command complete SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -154,26 +154,26 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").cancel(' || pg_ ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- kill the connection on PREPARE TRANSACTION SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -182,19 +182,19 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -203,7 +203,7 @@ SET client_min_messages TO ERROR; -- kill on command complete on COMMIT PREPARE, command should succeed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -211,19 +211,19 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill() SET client_min_messages TO NOTICE; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -231,7 +231,7 @@ TRUNCATE TABLE test_table; -- kill on ROLLBACK, command could be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -242,19 +242,19 @@ WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -265,13 +265,13 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE test_table_2(id int, value_1 int); SELECT create_distributed_table('test_table_2','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.mitmproxy('conn.kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -298,7 +298,7 @@ DETAIL: server closed the connection unexpectedly CONTEXT: COPY test_table_2, line 5: "9,10" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -309,7 +309,7 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate WHERE pds.logicalrelid = 'test_table_2'::regclass ORDER BY shardid, nodeport; logicalrelid | shardid | shardstate ---------------+---------+------------ +--------------------------------------------------------------------- test_table_2 | 1710004 | 3 test_table_2 | 1710004 | 1 test_table_2 | 1710005 | 3 @@ -325,7 +325,7 @@ DROP TABLE test_table_2; CREATE TABLE test_table_2(id int, value_1 int); SELECT create_distributed_table('test_table_2','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -333,7 +333,7 @@ SELECT create_distributed_table('test_table_2','id'); -- The query should abort SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -345,7 +345,7 @@ CONTEXT: while executing command on localhost:xxxxx COPY test_table_2, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -356,7 +356,7 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate WHERE pds.logicalrelid = 'test_table_2'::regclass ORDER BY shardid, nodeport; logicalrelid | shardid | shardstate ---------------+---------+------------ +--------------------------------------------------------------------- test_table_2 | 1710008 | 1 test_table_2 | 1710008 | 1 test_table_2 | 1710009 | 1 @@ -372,7 +372,7 @@ DROP TABLE test_table_2; CREATE TABLE test_table_2(id int, value_1 int); SELECT create_distributed_table('test_table_2','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -381,7 +381,7 @@ SELECT create_distributed_table('test_table_2','id'); -- You can check the issue about this behaviour: https://github.com/citusdata/citus/issues/1933 SELECT citus.mitmproxy('conn.onCopyData().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -389,7 +389,7 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()'); ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -400,7 +400,7 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate WHERE pds.logicalrelid = 'test_table_2'::regclass ORDER BY shardid, nodeport; logicalrelid | shardid | shardstate ---------------+---------+------------ +--------------------------------------------------------------------- test_table_2 | 1710012 | 1 test_table_2 | 1710012 | 1 test_table_2 | 1710013 | 1 diff --git a/src/test/regress/expected/failure_copy_to_reference.out b/src/test/regress/expected/failure_copy_to_reference.out index fb74b6b6e..a253b446d 100644 --- a/src/test/regress/expected/failure_copy_to_reference.out +++ b/src/test/regress/expected/failure_copy_to_reference.out @@ -8,14 +8,14 @@ SET citus.next_shard_id TO 130000; SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table(id int, value_1 int); SELECT create_reference_table('test_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -30,7 +30,7 @@ CREATE VIEW unhealthy_shard_count AS -- response we get from the worker SELECT citus.mitmproxy('conn.kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -39,26 +39,26 @@ ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -67,26 +67,26 @@ ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -95,26 +95,26 @@ ERROR: canceling statement due to user request CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends COPY command SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -126,26 +126,26 @@ CONTEXT: while executing command on localhost:xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends COPY command SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -154,26 +154,26 @@ ERROR: canceling statement due to user request CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the worker sends CopyComplete SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -181,26 +181,26 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").kill()'); ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends CopyData SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -208,19 +208,19 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").cancel(' || p ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -228,7 +228,7 @@ SELECT count(*) FROM test_table; -- the query should abort SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -240,26 +240,26 @@ CONTEXT: while executing command on localhost:xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -268,26 +268,26 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) -- cancelling on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -295,19 +295,19 @@ SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").cancel(' || ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -315,33 +315,33 @@ SELECT count(*) FROM test_table; -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) \copy test_table FROM STDIN DELIMITER ',' SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -349,14 +349,14 @@ TRUNCATE test_table; -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) \copy test_table FROM STDIN DELIMITER ',' SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -366,19 +366,19 @@ SELECT citus.mitmproxy('conn.allow()'); -- we expect to see 1 recovered prepared transactions. SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -387,7 +387,7 @@ TRUNCATE test_table; -- sends the ROLLBACK so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -399,19 +399,19 @@ WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -420,7 +420,7 @@ SELECT count(*) FROM test_table; -- both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -432,25 +432,25 @@ WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index 7901308d9..e6649c5ac 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -9,7 +9,7 @@ SET search_path TO 'create_distributed_table_non_empty_failure'; SET citus.next_shard_id TO 11000000; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -21,7 +21,7 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- in the first test, kill the first connection we sent from the coordinator SELECT citus.mitmproxy('conn.kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -32,14 +32,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) -- in the first test, cancel the first connection we sent from the coordinator SELECT citus.mitmproxy('conn.cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -47,14 +47,14 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends CREATE SCHEMA SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -65,13 +65,13 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,1) (2 rows) @@ -82,7 +82,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata -- Since we already sent the command at this stage, the schemas get created in workers SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -90,20 +90,20 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,1) (2 rows) SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_non_empty_failure$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,9060,t,"DROP SCHEMA") (localhost,57637,t,"DROP SCHEMA") (2 rows) @@ -111,7 +111,7 @@ SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_n -- this triggers a schema creation which prevents further transactions around dependency propagation SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -120,7 +120,7 @@ DROP TYPE schema_proc; -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -131,19 +131,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) (2 rows) @@ -154,7 +154,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata -- Interrupts are hold in CreateShardsWithRoundRobinPolicy SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -162,19 +162,19 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) (2 rows) @@ -185,7 +185,7 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- kill as soon as the coordinator sends CREATE TABLE SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -196,14 +196,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends COPY SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -214,14 +214,14 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) -- kill when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -230,7 +230,7 @@ NOTICE: Copying data from local table... ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -238,7 +238,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- should not be created and rollbacked properly SELECT citus.mitmproxy('conn.onQuery(query="COPY").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -246,14 +246,14 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) -- cancel when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -262,7 +262,7 @@ NOTICE: Copying data from local table... ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -272,7 +272,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -281,7 +281,7 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -289,7 +289,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- successfully rollbacked the created shards SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -297,19 +297,19 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 1 (1 row) @@ -317,31 +317,31 @@ SELECT recover_prepared_transactions(); -- shards should be created and kill should not affect SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 2 (1 row) @@ -356,25 +356,25 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- shards should be created and kill should not affect SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -385,21 +385,21 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ROLLBACK; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -407,27 +407,27 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -436,7 +436,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ CREATE TABLE colocated_table(id int, value_1 int); SELECT create_distributed_table('colocated_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -445,7 +445,7 @@ SELECT create_distributed_table('colocated_table', 'id'); -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -453,7 +453,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_ ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -462,7 +462,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -473,13 +473,13 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -489,7 +489,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -497,7 +497,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_ ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -506,7 +506,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -517,13 +517,13 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -533,7 +533,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -541,7 +541,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_ ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -550,7 +550,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_command").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -561,13 +561,13 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -575,7 +575,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Now run the same tests with 1pc SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -588,7 +588,7 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); SET citus.multi_shard_commit_protocol TO '1pc'; SELECT citus.mitmproxy('conn.kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -599,19 +599,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -619,7 +619,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- in the first test, cancel the first connection we sent from the coordinator SELECT citus.mitmproxy('conn.cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -627,19 +627,19 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -647,7 +647,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- this triggers a schema creation which prevents further transactions around dependency propagation SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -656,7 +656,7 @@ DROP TYPE schema_proc; -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -667,19 +667,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) (2 rows) @@ -690,7 +690,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata -- Interrupts are hold in CreateShardsWithRoundRobinPolicy SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -698,19 +698,19 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) (2 rows) @@ -721,7 +721,7 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- kill as soon as the coordinator sends CREATE TABLE SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -732,14 +732,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends COPY SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -750,14 +750,14 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) -- kill when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -765,7 +765,7 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -773,7 +773,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- should not be created and rollbacked properly SELECT citus.mitmproxy('conn.onQuery(query="COPY").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -781,14 +781,14 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) -- cancel when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -796,7 +796,7 @@ SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -804,21 +804,21 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ROLLBACK; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -826,27 +826,27 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -854,21 +854,21 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- the command can be COMMITed SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -879,27 +879,27 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- should be COMMITed SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -909,7 +909,7 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); CREATE TABLE colocated_table(id int, value_1 int); SELECT create_distributed_table('colocated_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -918,7 +918,7 @@ SELECT create_distributed_table('colocated_table', 'id'); -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -926,7 +926,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_ ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -935,7 +935,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -946,7 +946,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -955,7 +955,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -963,7 +963,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_ ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -972,7 +972,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -983,20 +983,20 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_create_index_concurrently.out b/src/test/regress/expected/failure_create_index_concurrently.out index a93d6e784..0e1f0968d 100644 --- a/src/test/regress/expected/failure_create_index_concurrently.out +++ b/src/test/regress/expected/failure_create_index_concurrently.out @@ -4,7 +4,7 @@ -- failure. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -14,14 +14,14 @@ SET SEARCH_PATH=index_schema; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_distributed_table('index_test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- kill the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -31,7 +31,7 @@ DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -39,7 +39,7 @@ SELECT citus.mitmproxy('conn.allow()'); SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$) WHERE nodeport = :worker_2_proxy_port; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 9060 | t | 0 (1 row) @@ -47,14 +47,14 @@ DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_reference_table('index_test'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) -- kill the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -64,7 +64,7 @@ DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -72,7 +72,7 @@ DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_distributed_table('index_test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -81,7 +81,7 @@ SELECT create_distributed_table('index_test', 'id'); -- therefore dump_network_traffic() calls are not made SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -91,7 +91,7 @@ DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -99,14 +99,14 @@ DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_reference_table('index_test'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) -- cancel the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -116,7 +116,7 @@ DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -124,7 +124,7 @@ DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_distributed_table('index_test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -132,7 +132,7 @@ CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -- kill the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="DROP INDEX CONCURRENTLY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -142,7 +142,7 @@ DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -150,7 +150,7 @@ SELECT citus.mitmproxy('conn.allow()'); SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$) WHERE nodeport = :worker_2_proxy_port; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 9060 | t | 4 (1 row) @@ -161,7 +161,7 @@ NOTICE: drop cascades to table index_schema.index_test SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$) WHERE nodeport = :worker_2_proxy_port; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 9060 | t | 0 (1 row) diff --git a/src/test/regress/expected/failure_create_reference_table.out b/src/test/regress/expected/failure_create_reference_table.out index 9944a1cdb..7ced28939 100644 --- a/src/test/regress/expected/failure_create_reference_table.out +++ b/src/test/regress/expected/failure_create_reference_table.out @@ -6,7 +6,7 @@ SET search_path TO 'failure_reference_table'; SET citus.next_shard_id TO 10000000; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -20,7 +20,7 @@ INSERT INTO ref_table VALUES(1),(2),(3); -- out and not create any placement SELECT citus.mitmproxy('conn.onQuery().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -31,14 +31,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT count(*) FROM pg_dist_shard_placement; count -------- +--------------------------------------------------------------------- 0 (1 row) -- Kill after creating transaction on worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -49,14 +49,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT count(*) FROM pg_dist_shard_placement; count -------- +--------------------------------------------------------------------- 0 (1 row) -- Cancel after creating transaction on worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -64,14 +64,14 @@ SELECT create_reference_table('ref_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard_placement; count -------- +--------------------------------------------------------------------- 0 (1 row) -- Kill after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -82,14 +82,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT count(*) FROM pg_dist_shard_placement; count -------- +--------------------------------------------------------------------- 0 (1 row) -- Cancel after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -97,14 +97,14 @@ SELECT create_reference_table('ref_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard_placement; count -------- +--------------------------------------------------------------------- 0 (1 row) -- Kill after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -113,14 +113,14 @@ NOTICE: Copying data from local table... ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT count(*) FROM pg_dist_shard_placement; count -------- +--------------------------------------------------------------------- 0 (1 row) -- Cancel after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -129,7 +129,7 @@ NOTICE: Copying data from local table... ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard_placement; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -139,7 +139,7 @@ SET client_min_messages TO ERROR; -- prepared transaction afterwards. SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -148,32 +148,32 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard_placement; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 1 (1 row) -- Kill after commiting prepared, this should succeed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shardid, nodeport; shardid | nodeport | shardstate -----------+----------+------------ +--------------------------------------------------------------------- 10000008 | 9060 | 1 10000008 | 57637 | 1 (2 rows) @@ -181,7 +181,7 @@ SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shard SET client_min_messages TO NOTICE; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -193,7 +193,7 @@ INSERT INTO ref_table VALUES(1),(2),(3); -- Test in transaction SELECT citus.mitmproxy('conn.onQuery().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -208,7 +208,7 @@ COMMIT; -- kill on ROLLBACK, should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -216,7 +216,7 @@ BEGIN; SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -225,14 +225,14 @@ WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+----------+----------+------------- +--------------------------------------------------------------------- (0 rows) -- cancel when the coordinator send ROLLBACK, should be rollbacked. We ignore cancellations -- during the ROLLBACK. SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -240,14 +240,14 @@ BEGIN; SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) ROLLBACK; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+----------+----------+------------- +--------------------------------------------------------------------- (0 rows) DROP SCHEMA failure_reference_table CASCADE; diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index 7fc3c1b2e..c3a81f03e 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -5,7 +5,7 @@ CREATE SCHEMA failure_create_table; SET search_path TO 'failure_create_table'; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -15,7 +15,7 @@ CREATE TABLE test_table(id int, value_1 int); -- Kill connection before sending query to the worker SELECT citus.mitmproxy('conn.kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -26,19 +26,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -49,7 +49,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- https://github.com/citusdata/citus/pull/1652 SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -60,19 +60,19 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'failure_create_table'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,1) (2 rows) @@ -84,7 +84,7 @@ DROP TYPE schema_proc; -- Now, kill the connection while opening transaction on workers. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -95,19 +95,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -115,7 +115,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Now, kill the connection after sending create table command with worker_apply_shard_ddl_command UDF SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_command").after(1).kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -126,19 +126,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -149,7 +149,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_command").after(1).kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -161,19 +161,19 @@ DETAIL: server closed the connection unexpectedly COMMIT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -183,7 +183,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- shard creation. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -191,19 +191,19 @@ SELECT create_distributed_table('test_table','id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -214,13 +214,13 @@ CREATE TABLE test_table(id int, value_1 int); CREATE TABLE temp_table(id int, value_1 int); SELECT create_distributed_table('temp_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -231,26 +231,26 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -258,19 +258,19 @@ SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -278,7 +278,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Kill and cancel the connection after worker sends "PREPARE TRANSACTION" ack with colocate_with option SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -287,26 +287,26 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -314,19 +314,19 @@ SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -341,7 +341,7 @@ CREATE TABLE test_table(id int, value_1 int); -- Kill connection before sending query to the worker SELECT citus.mitmproxy('conn.kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -354,19 +354,19 @@ DETAIL: server closed the connection unexpectedly ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -378,7 +378,7 @@ DROP TYPE schema_proc; -- Now, kill the connection while creating transaction on workers in transaction. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -391,19 +391,19 @@ DETAIL: server closed the connection unexpectedly ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -414,7 +414,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- executor. So, we'll have two output files SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -424,25 +424,25 @@ ERROR: canceling statement due to user request COMMIT; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 1 (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -457,7 +457,7 @@ SET citus.multi_shard_commit_protocol TO "1pc"; -- Kill connection before sending query to the worker with 1pc. SELECT citus.mitmproxy('conn.kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -470,19 +470,19 @@ DETAIL: server closed the connection unexpectedly ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -490,7 +490,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Kill connection while sending create table command with 1pc. SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -503,19 +503,19 @@ DETAIL: server closed the connection unexpectedly ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -527,7 +527,7 @@ DROP TYPE schema_proc; -- Now, kill the connection while opening transactions on workers with 1pc. Transaction will be opened due to BEGIN. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -540,19 +540,19 @@ DETAIL: server closed the connection unexpectedly ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -562,7 +562,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- shard creation unless the executor is used. So, we'll have two output files SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -572,25 +572,25 @@ ERROR: canceling statement due to user request COMMIT; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -603,14 +603,14 @@ SET citus.multi_shard_commit_protocol TO "2pc"; CREATE TABLE test_table_2(id int, value_1 int); SELECT master_create_distributed_table('test_table_2', 'id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) -- Kill connection before sending query to the worker SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -621,25 +621,25 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -647,7 +647,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Kill the connection after worker sends "PREPARE TRANSACTION" ack SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -656,19 +656,19 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) @@ -676,7 +676,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Cancel the connection after sending prepare transaction in master_create_worker_shards SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -685,25 +685,25 @@ ERROR: canceling statement due to user request -- Show that there is no pending transaction SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 1 (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_shard; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) diff --git a/src/test/regress/expected/failure_cte_subquery.out b/src/test/regress/expected/failure_cte_subquery.out index e59d1c3d7..5c962dbcb 100644 --- a/src/test/regress/expected/failure_cte_subquery.out +++ b/src/test/regress/expected/failure_cte_subquery.out @@ -8,13 +8,13 @@ CREATE TABLE users_table (user_id int, user_name text); CREATE TABLE events_table(user_id int, event_id int, event_type int); SELECT create_distributed_table('users_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('events_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -22,7 +22,7 @@ CREATE TABLE users_table_local AS SELECT * FROM users_table; -- kill at the first copy (push) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -56,7 +56,7 @@ CONTEXT: while executing command on localhost:xxxxx -- kill at the second copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM cte_failure.events_table_16000002").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -90,7 +90,7 @@ DETAIL: server closed the connection unexpectedly -- kill at the third copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -124,7 +124,7 @@ DETAIL: server closed the connection unexpectedly -- cancel at the first copy (push) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -155,7 +155,7 @@ ERROR: canceling statement due to user request -- cancel at the second copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -186,7 +186,7 @@ ERROR: canceling statement due to user request -- cancel at the third copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -217,7 +217,7 @@ ERROR: canceling statement due to user request -- distributed update tests SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -226,7 +226,7 @@ INSERT INTO users_table VALUES (1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E'); INSERT INTO events_table VALUES (1,1,1), (1,2,1), (1,3,1), (2,1, 4), (3, 4,1), (5, 1, 2), (5, 2, 1), (5, 2,2); SELECT * FROM users_table ORDER BY 1, 2; user_id | user_name ----------+----------- +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -240,7 +240,7 @@ INSERT INTO users_table SELECT * FROM cte_delete; -- verify contents are the same SELECT * FROM users_table ORDER BY 1, 2; user_id | user_name ----------+----------- +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -251,7 +251,7 @@ SELECT * FROM users_table ORDER BY 1, 2; -- kill connection during deletion SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -264,13 +264,13 @@ DETAIL: server closed the connection unexpectedly -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM users_table ORDER BY 1, 2; user_id | user_name ----------+----------- +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -281,7 +281,7 @@ SELECT * FROM users_table ORDER BY 1, 2; -- kill connection during insert SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -294,13 +294,13 @@ CONTEXT: while executing command on localhost:xxxxx -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM users_table ORDER BY 1, 2; user_id | user_name ----------+----------- +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -311,7 +311,7 @@ SELECT * FROM users_table ORDER BY 1, 2; -- cancel during deletion SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -321,13 +321,13 @@ ERROR: canceling statement due to user request -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM users_table ORDER BY 1, 2; user_id | user_name ----------+----------- +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -338,7 +338,7 @@ SELECT * FROM users_table ORDER BY 1, 2; -- cancel during insert SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -348,13 +348,13 @@ ERROR: canceling statement due to user request -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM users_table ORDER BY 1, 2; user_id | user_name ----------+----------- +--------------------------------------------------------------------- 1 | A 2 | B 3 | C @@ -365,7 +365,7 @@ SELECT * FROM users_table ORDER BY 1, 2; -- test sequential delete/insert SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -381,7 +381,7 @@ END; RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_ddl.out b/src/test/regress/expected/failure_ddl.out index 4a9f27140..e3f442665 100644 --- a/src/test/regress/expected/failure_ddl.out +++ b/src/test/regress/expected/failure_ddl.out @@ -12,7 +12,7 @@ SET citus.max_cached_conns_per_worker TO 0; SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -24,7 +24,7 @@ SET citus.shard_replication_factor = 1; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -32,7 +32,7 @@ SELECT create_distributed_table('test_table', 'key'); -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -43,7 +43,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) @@ -51,7 +51,7 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -59,14 +59,14 @@ ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -77,14 +77,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -92,14 +92,14 @@ ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -111,14 +111,14 @@ DETAIL: server closed the connection unexpectedly -- show that we've never commited the changes SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -127,21 +127,21 @@ ERROR: canceling statement due to user request -- show that we've never commited the changes SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -152,13 +152,13 @@ SELECT citus.mitmproxy('conn.allow()'); -- that live in the failed worker, since we're running 1PC SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -174,21 +174,21 @@ SET citus.shard_replication_factor = 1; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- cancel as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -196,13 +196,13 @@ SELECT citus.mitmproxy('conn.allow()'); -- should have been applied without any issues since cancel is ignored SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -217,7 +217,7 @@ ALTER TABLE test_table DROP COLUMN new_column; SET client_min_messages TO WARNING; SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -236,20 +236,20 @@ WARNING: could not commit transaction for shard xxxxx on any active node WARNING: could not commit transaction for shard xxxxx on any active node SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SET client_min_messages TO ERROR; SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -262,14 +262,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- case Citus messes up this behaviour SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) ALTER TABLE test_table DROP COLUMN new_column; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -280,7 +280,7 @@ ALTER TABLE test_table ADD COLUMN new_column INT; -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -298,7 +298,7 @@ CONTEXT: while executing command on localhost:xxxxx -- case Citus messes up this behaviour SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -310,7 +310,7 @@ ROLLBACK; -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -319,19 +319,19 @@ ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -344,7 +344,7 @@ SET citus.multi_shard_commit_protocol TO '2pc'; -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -355,7 +355,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) @@ -363,7 +363,7 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -371,14 +371,14 @@ ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -389,14 +389,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -404,14 +404,14 @@ ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -422,14 +422,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) -- cancel as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -437,14 +437,14 @@ ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -453,19 +453,19 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -476,13 +476,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- see that the command is rollbacked SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -492,7 +492,7 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- cancelling on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -500,19 +500,19 @@ ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -523,13 +523,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- see that the command is rollbacked SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 1 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -540,26 +540,26 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) ALTER TABLE test_table DROP COLUMN new_column; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------- +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") (localhost,57637,100801,t,"{key,value}") @@ -569,13 +569,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------- +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") (localhost,57637,100801,t,"{key,value}") @@ -585,14 +585,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -600,13 +600,13 @@ SELECT citus.mitmproxy('conn.allow()'); -- since we've not commited the prepared transactions SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -617,13 +617,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- see that the command is committed SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -635,7 +635,7 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -644,20 +644,20 @@ ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) -- ROLLBACK should have failed on the distributed table and the placements SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -669,7 +669,7 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -678,20 +678,20 @@ ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) -- make sure that the transaction is rollbacked SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") (localhost,57637,100801,t,"{key,new_column,value}") @@ -707,7 +707,7 @@ DROP TABLE test_table; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -715,7 +715,7 @@ SELECT create_distributed_table('test_table', 'key'); -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -726,7 +726,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) @@ -734,7 +734,7 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -742,14 +742,14 @@ ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -760,14 +760,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -775,14 +775,14 @@ ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -793,14 +793,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -808,14 +808,14 @@ ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -824,7 +824,7 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -835,13 +835,13 @@ SELECT citus.mitmproxy('conn.allow()'); -- the transaction has not been commited on any placement yet SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------- +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") (localhost,9060,100806,t,"{key,value}") @@ -856,26 +856,26 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg ------------------------- +--------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,new_column,value}") (localhost,9060,100805,t,"{key,new_column,value}") (localhost,9060,100806,t,"{key,new_column,value}") @@ -889,13 +889,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------------------ +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,new_column,value}") (localhost,9060,100805,t,"{key,new_column,value}") (localhost,9060,100806,t,"{key,new_column,value}") @@ -909,14 +909,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) ALTER TABLE test_table DROP COLUMN new_column; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -924,13 +924,13 @@ SELECT citus.mitmproxy('conn.allow()'); -- since we've not commited the prepared transactions SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ----------------------------------------------------- +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,new_column,value}") (localhost,9060,100805,t,"{key,new_column,value}") (localhost,9060,100806,t,"{key,new_column,value}") @@ -945,13 +945,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- see that the command is committed SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------- +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") (localhost,9060,100806,t,"{key,value}") @@ -967,7 +967,7 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -976,20 +976,20 @@ ALTER TABLE test_table ADD COLUMN new_column INT; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) -- ROLLBACK should have failed on the distributed table and the placements SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------- +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") (localhost,9060,100806,t,"{key,value}") @@ -1005,7 +1005,7 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1014,20 +1014,20 @@ ALTER TABLE test_table ADD COLUMN new_column INT; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) -- make sure that the transaction is rollbacked SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; run_command_on_placements ------------------------------------------- +--------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") (localhost,9060,100806,t,"{key,value}") @@ -1043,7 +1043,7 @@ SET citus.multi_shard_modify_mode TO 'sequential'; -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1054,14 +1054,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1069,14 +1069,14 @@ ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg -------------- +--------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1088,7 +1088,7 @@ DETAIL: server closed the connection unexpectedly -- kill as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1100,7 +1100,7 @@ DETAIL: server closed the connection unexpectedly -- cancel as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_insert_select_pushdown.out b/src/test/regress/expected/failure_insert_select_pushdown.out index 3dac1df2e..ce83c58de 100644 --- a/src/test/regress/expected/failure_insert_select_pushdown.out +++ b/src/test/regress/expected/failure_insert_select_pushdown.out @@ -5,7 +5,7 @@ -- SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -18,20 +18,20 @@ CREATE TABLE events_table(user_id int, event_id int, event_type int); CREATE TABLE events_summary(user_id int, event_id int, event_count int); SELECT create_distributed_table('events_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('events_summary', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO events_table VALUES (1, 1, 3 ), (1, 2, 1), (1, 3, 2), (2, 4, 3), (3, 5, 1), (4, 7, 1), (4, 1, 9), (4, 3, 2); SELECT count(*) FROM events_summary; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -39,7 +39,7 @@ SELECT count(*) FROM events_summary; -- kill worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -51,20 +51,20 @@ DETAIL: server closed the connection unexpectedly --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM events_summary; count -------- +--------------------------------------------------------------------- 0 (1 row) -- cancel worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -73,27 +73,27 @@ ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM events_summary; count -------- +--------------------------------------------------------------------- 0 (1 row) -- test self insert/select SELECT count(*) FROM events_table; count -------- +--------------------------------------------------------------------- 8 (1 row) -- kill worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -105,20 +105,20 @@ DETAIL: server closed the connection unexpectedly --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM events_table; count -------- +--------------------------------------------------------------------- 8 (1 row) -- cancel worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -127,20 +127,20 @@ ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM events_table; count -------- +--------------------------------------------------------------------- 8 (1 row) RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_insert_select_via_coordinator.out b/src/test/regress/expected/failure_insert_select_via_coordinator.out index ab90cfe40..85a357ea5 100644 --- a/src/test/regress/expected/failure_insert_select_via_coordinator.out +++ b/src/test/regress/expected/failure_insert_select_via_coordinator.out @@ -15,32 +15,32 @@ CREATE TABLE events_reference(event_type int, event_count int); CREATE TABLE events_reference_distributed(event_type int, event_count int); SELECT create_distributed_table('events_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('events_summary', 'event_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('events_reference'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('events_reference_distributed', 'event_type'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO events_table VALUES (1, 1, 3 ), (1, 2, 1), (1, 3, 2), (2, 4, 3), (3, 5, 1), (4, 7, 1), (4, 1, 9), (4, 3, 2); SELECT count(*) FROM events_summary; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -48,7 +48,7 @@ SELECT count(*) FROM events_summary; -- kill coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -60,7 +60,7 @@ CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -72,7 +72,7 @@ CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -81,7 +81,7 @@ ERROR: canceling statement due to user request -- cancel data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -90,13 +90,13 @@ ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM events_summary; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -104,7 +104,7 @@ SELECT count(*) FROM events_summary; -- kill coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -116,7 +116,7 @@ CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -128,7 +128,7 @@ CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -137,7 +137,7 @@ ERROR: canceling statement due to user request -- cancel data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -146,13 +146,13 @@ ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM events_reference; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -162,7 +162,7 @@ INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP -- kill coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -174,7 +174,7 @@ CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -186,7 +186,7 @@ CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -195,7 +195,7 @@ ERROR: canceling statement due to user request -- cancel data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -204,20 +204,20 @@ ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM events_reference_distributed; count -------- +--------------------------------------------------------------------- 0 (1 row) RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_multi_dml.out b/src/test/regress/expected/failure_multi_dml.out index 85609126d..abc9c7896 100644 --- a/src/test/regress/expected/failure_multi_dml.out +++ b/src/test/regress/expected/failure_multi_dml.out @@ -1,6 +1,6 @@ SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -11,14 +11,14 @@ ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100; CREATE TABLE dml_test (id integer, name text); SELECT create_distributed_table('dml_test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COPY dml_test FROM STDIN WITH CSV; SELECT citus.clear_network_traffic(); clear_network_traffic ------------------------ +--------------------------------------------------------------------- (1 row) @@ -27,7 +27,7 @@ SELECT citus.clear_network_traffic(); -- fail at DELETE SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -49,7 +49,7 @@ COMMIT; --- shouldn't see any changes performed in failed transaction SELECT * FROM dml_test ORDER BY id ASC; id | name -----+------- +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -59,7 +59,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at DELETE SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -78,7 +78,7 @@ COMMIT; --- shouldn't see any changes performed in failed transaction SELECT * FROM dml_test ORDER BY id ASC; id | name -----+------- +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -88,7 +88,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at INSERT SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -108,7 +108,7 @@ COMMIT; --- shouldn't see any changes before failed INSERT SELECT * FROM dml_test ORDER BY id ASC; id | name -----+------- +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -118,7 +118,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at INSERT SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -135,7 +135,7 @@ COMMIT; --- shouldn't see any changes before failed INSERT SELECT * FROM dml_test ORDER BY id ASC; id | name -----+------- +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -145,7 +145,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at UPDATE SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -164,7 +164,7 @@ COMMIT; --- shouldn't see any changes after failed UPDATE SELECT * FROM dml_test ORDER BY id ASC; id | name -----+------- +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -174,7 +174,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at UPDATE SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -190,7 +190,7 @@ COMMIT; --- shouldn't see any changes after failed UPDATE SELECT * FROM dml_test ORDER BY id ASC; id | name -----+------- +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -200,7 +200,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at PREPARE TRANSACTION SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -222,31 +222,31 @@ COMMIT; false ); master_run_on_worker ---------------------------- +--------------------------------------------------------------------- (localhost,57636,t,BEGIN) (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; shardid ---------- +--------------------------------------------------------------------- (0 rows) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) -- shouldn't see any changes after failed PREPARE SELECT * FROM dml_test ORDER BY id ASC; id | name -----+------- +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -256,7 +256,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at PREPARE TRANSACTION SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -271,25 +271,25 @@ COMMIT; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; shardid ---------- +--------------------------------------------------------------------- (0 rows) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) -- shouldn't see any changes after failed PREPARE SELECT * FROM dml_test ORDER BY id ASC; id | name -----+------- +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -299,7 +299,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -316,25 +316,25 @@ COMMIT; SET client_min_messages TO DEFAULT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; shardid ---------- +--------------------------------------------------------------------- (0 rows) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 1 (1 row) -- should see changes, because of txn recovery SELECT * FROM dml_test ORDER BY id ASC; id | name -----+--------- +--------------------------------------------------------------------- 3 | gamma 4 | Delta 5 | Epsilon @@ -343,7 +343,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at COMMITs are ignored by Postgres SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -357,7 +357,7 @@ COMMIT; -- should see changes, because cancellation is ignored SELECT * FROM dml_test ORDER BY id ASC; id | name -----+--------- +--------------------------------------------------------------------- 3 | gamma 4 | Delta 5 | Epsilon @@ -371,7 +371,7 @@ SET citus.shard_replication_factor = 2; -- two placements CREATE TABLE dml_test (id integer, name text); SELECT create_distributed_table('dml_test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -380,7 +380,7 @@ COPY dml_test FROM STDIN WITH CSV; -- fail at COMMIT (actually COMMIT this time, as no 2pc in use) SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -399,7 +399,7 @@ CONTEXT: while executing command on localhost:xxxxx --- should see all changes, but they only went to one placement (other is unhealthy) SELECT * FROM dml_test ORDER BY id ASC; id | name -----+--------- +--------------------------------------------------------------------- 3 | gamma 4 | Delta 5 | Epsilon @@ -407,13 +407,13 @@ SELECT * FROM dml_test ORDER BY id ASC; SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; shardid ---------- +--------------------------------------------------------------------- 103402 (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -424,7 +424,7 @@ SET citus.shard_replication_factor = 1; CREATE TABLE dml_test (id integer, name text); SELECT create_reference_table('dml_test'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -432,7 +432,7 @@ COPY dml_test FROM STDIN WITH CSV; -- fail at COMMIT (by failing to PREPARE) SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -448,7 +448,7 @@ CONTEXT: while executing command on localhost:xxxxx --- shouldn't see any changes after failed COMMIT SELECT * FROM dml_test ORDER BY id ASC; id | name -----+------- +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -458,7 +458,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at COMMIT (by cancelling on PREPARE) SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -473,7 +473,7 @@ ERROR: canceling statement due to user request --- shouldn't see any changes after cancelled PREPARE SELECT * FROM dml_test ORDER BY id ASC; id | name -----+------- +--------------------------------------------------------------------- 1 | Alpha 2 | Beta 3 | Gamma @@ -483,7 +483,7 @@ SELECT * FROM dml_test ORDER BY id ASC; -- allow connection to allow DROP SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_multi_row_insert.out b/src/test/regress/expected/failure_multi_row_insert.out index 2eac35bfe..f13fe7bfe 100644 --- a/src/test/regress/expected/failure_multi_row_insert.out +++ b/src/test/regress/expected/failure_multi_row_insert.out @@ -11,7 +11,7 @@ SET citus.shard_replication_factor TO 1; SELECT pg_backend_pid() as pid \gset SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -19,13 +19,13 @@ CREATE TABLE distributed_table(key int, value int); CREATE TABLE reference_table(value int); SELECT create_distributed_table('distributed_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -38,7 +38,7 @@ SELECT create_reference_table('reference_table'); -- Failure and cancellation on multi-row INSERT that hits the same shard with the same value SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -53,7 +53,7 @@ DETAIL: server closed the connection unexpectedly -- Failure and cancellation on multi-row INSERT that hits the same shard with different values SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -68,7 +68,7 @@ DETAIL: server closed the connection unexpectedly -- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -79,7 +79,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -88,7 +88,7 @@ ERROR: canceling statement due to user request -- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker, happening on the second query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -99,7 +99,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -108,7 +108,7 @@ ERROR: canceling statement due to user request -- Failure and cancellation multi-row INSERT that hits multiple shards in multiple workers SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -119,7 +119,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -128,7 +128,7 @@ ERROR: canceling statement due to user request -- one test for the reference tables for completeness SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -136,7 +136,7 @@ INSERT INTO reference_table VALUES (1), (2), (3), (4); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -145,7 +145,7 @@ ERROR: canceling statement due to user request -- cancel the second insert over the same connection SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -154,17 +154,17 @@ ERROR: canceling statement due to user request -- we've either failed or cancelled all queries, so should be empty SELECT * FROM distributed_table; key | value ------+------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM reference_table; value -------- +--------------------------------------------------------------------- (0 rows) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_multi_shard_update_delete.out b/src/test/regress/expected/failure_multi_shard_update_delete.out index 9cf343283..eaefb686e 100644 --- a/src/test/regress/expected/failure_multi_shard_update_delete.out +++ b/src/test/regress/expected/failure_multi_shard_update_delete.out @@ -10,7 +10,7 @@ SET citus.shard_replication_factor TO 1; SET citus.max_cached_conns_per_worker TO 0; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -19,19 +19,19 @@ CREATE TABLE r1(a int, b int PRIMARY KEY); CREATE TABLE t2(a int REFERENCES t1(a) ON DELETE CASCADE, b int REFERENCES r1(b) ON DELETE CASCADE, c int); SELECT create_distributed_table('t1', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('r1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('t2', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -42,13 +42,13 @@ INSERT INTO t2 VALUES (1, 1, 1), (1, 2, 1), (2, 1, 2), (2, 2, 4), (3, 1, 3), (3, SELECT pg_backend_pid() as pid \gset SELECT count(*) FROM t2; count -------- +--------------------------------------------------------------------- 7 (1 row) SHOW citus.multi_shard_commit_protocol ; citus.multi_shard_commit_protocol ------------------------------------ +--------------------------------------------------------------------- 2pc (1 row) @@ -57,7 +57,7 @@ SHOW citus.multi_shard_commit_protocol ; -- test both kill and cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -70,14 +70,14 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is deleted SELECT count(*) FROM t2; count -------- +--------------------------------------------------------------------- 7 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -89,14 +89,14 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is deleted SELECT count(*) FROM t2; count -------- +--------------------------------------------------------------------- 7 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -106,14 +106,14 @@ ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; count -------- +--------------------------------------------------------------------- 7 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -122,7 +122,7 @@ ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; count -------- +--------------------------------------------------------------------- 7 (1 row) @@ -133,13 +133,13 @@ SELECT count(*) FROM t2; -- test both kill and cancellation SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 -----+---- +--------------------------------------------------------------------- 3 | 1 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -152,14 +152,14 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 -----+---- +--------------------------------------------------------------------- 3 | 1 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -171,14 +171,14 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 -----+---- +--------------------------------------------------------------------- 3 | 1 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -188,14 +188,14 @@ ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 -----+---- +--------------------------------------------------------------------- 3 | 1 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -204,7 +204,7 @@ ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 -----+---- +--------------------------------------------------------------------- 3 | 1 (1 row) @@ -215,7 +215,7 @@ SET citus.multi_shard_commit_protocol TO '1PC'; -- test both kill and cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -228,14 +228,14 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is deleted SELECT count(*) FROM t2; count -------- +--------------------------------------------------------------------- 7 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -247,14 +247,14 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is deleted SELECT count(*) FROM t2; count -------- +--------------------------------------------------------------------- 7 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -264,14 +264,14 @@ ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; count -------- +--------------------------------------------------------------------- 7 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -280,7 +280,7 @@ ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; count -------- +--------------------------------------------------------------------- 7 (1 row) @@ -291,13 +291,13 @@ SELECT count(*) FROM t2; -- test both kill and cancellation SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 -----+---- +--------------------------------------------------------------------- 3 | 1 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -310,14 +310,14 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 -----+---- +--------------------------------------------------------------------- 3 | 1 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -329,14 +329,14 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 -----+---- +--------------------------------------------------------------------- 3 | 1 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -346,14 +346,14 @@ ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 -----+---- +--------------------------------------------------------------------- 3 | 1 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -362,7 +362,7 @@ ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 -----+---- +--------------------------------------------------------------------- 3 | 1 (1 row) @@ -378,20 +378,20 @@ RESET citus.multi_shard_commit_protocol; -- test coverage SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) -- check counts before delete SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b2 ----- +--------------------------------------------------------------------- 3 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -403,13 +403,13 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b2 ----- +--------------------------------------------------------------------- 3 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -421,14 +421,14 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b2 ----- +--------------------------------------------------------------------- 3 (1 row) -- test update with subquery pull SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -436,13 +436,13 @@ CREATE TABLE t3 AS SELECT * FROM t2; SELECT create_distributed_table('t3', 'a'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM t3 ORDER BY 1, 2, 3; a | b | c ----+---+--- +--------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 2 | 1 | 2 @@ -454,7 +454,7 @@ SELECT * FROM t3 ORDER BY 1, 2, 3; SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -469,13 +469,13 @@ CONTEXT: while executing command on localhost:xxxxx --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM t3 ORDER BY 1, 2, 3; a | b | c ----+---+--- +--------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 2 | 1 | 2 @@ -488,7 +488,7 @@ SELECT * FROM t3 ORDER BY 1, 2, 3; -- kill update part SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE multi_shard.t3_201009").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -503,13 +503,13 @@ DETAIL: server closed the connection unexpectedly --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM t3 ORDER BY 1, 2, 3; a | b | c ----+---+--- +--------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 2 | 1 | 2 @@ -525,7 +525,7 @@ SELECT * FROM t3 ORDER BY 1, 2, 3; SET citus.shard_replication_factor to 2; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -534,20 +534,20 @@ CREATE TABLE t3 AS SELECT * FROM t2; SELECT create_distributed_table('t3', 'a'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) -- prevent update of one replica of one shard SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t3_201013").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -559,7 +559,7 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -567,13 +567,13 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO BEGIN; SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -581,7 +581,7 @@ UPDATE t2 SET b = 2 WHERE b = 1; -- verify update is performed on t2 SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b1 | b2 -----+---- +--------------------------------------------------------------------- 0 | 6 (1 row) @@ -595,13 +595,13 @@ END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -613,7 +613,7 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -621,7 +621,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO SET citus.multi_shard_commit_protocol TO '1PC'; SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -633,7 +633,7 @@ DETAIL: server closed the connection unexpectedly -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -641,13 +641,13 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO BEGIN; SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -655,7 +655,7 @@ UPDATE t2 SET b = 2 WHERE b = 1; -- verify update is performed on t2 SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b1 | b2 -----+---- +--------------------------------------------------------------------- 0 | 6 (1 row) @@ -669,19 +669,19 @@ END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 -----+---- +--------------------------------------------------------------------- 3 | 3 (1 row) SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index 3e781aaa1..54046c3c1 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -10,14 +10,14 @@ SET citus.replication_model TO 'streaming'; SELECT pg_backend_pid() as pid \gset SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE t1 (id int PRIMARY KEY); SELECT create_distributed_table('t1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -25,14 +25,14 @@ INSERT INTO t1 SELECT x FROM generate_series(1,100) AS f(x); -- Initial metadata status SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; hasmetadata -------------- +--------------------------------------------------------------------- f (1 row) -- Failure to set groupid in the worker SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -40,7 +40,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -52,7 +52,7 @@ CONTEXT: while executing command on localhost:xxxxx -- Failure to drop all tables in pg_dist_partition SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -60,7 +60,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -72,7 +72,7 @@ CONTEXT: while executing command on localhost:xxxxx -- Failure to truncate pg_dist_node in the worker SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -80,7 +80,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -92,7 +92,7 @@ CONTEXT: while executing command on localhost:xxxxx -- Failure to populate pg_dist_node in the worker SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -100,7 +100,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -112,26 +112,26 @@ CONTEXT: while executing command on localhost:xxxxx -- Verify that coordinator knows worker does not have valid metadata SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; hasmetadata -------------- +--------------------------------------------------------------------- f (1 row) -- Verify we can sync metadata after unsuccessful attempts SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; hasmetadata -------------- +--------------------------------------------------------------------- t (1 row) @@ -139,7 +139,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; CREATE TABLE t2 (id int PRIMARY KEY); SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_placement").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -150,7 +150,7 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_shard").cancel(' || :pid || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -161,7 +161,7 @@ SELECT count(*) > 0 AS is_table_distributed FROM pg_dist_partition WHERE logicalrelid='t2'::regclass; is_table_distributed ----------------------- +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/failure_ref_tables.out b/src/test/regress/expected/failure_ref_tables.out index 2bb66d36c..3f9d39ee4 100644 --- a/src/test/regress/expected/failure_ref_tables.out +++ b/src/test/regress/expected/failure_ref_tables.out @@ -1,34 +1,34 @@ SET citus.next_shard_id TO 100500; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE ref_table (key int, value int); SELECT create_reference_table('ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) \copy ref_table FROM stdin delimiter ','; SELECT citus.clear_network_traffic(); clear_network_traffic ------------------------ +--------------------------------------------------------------------- (1 row) SELECT COUNT(*) FROM ref_table; count -------- +--------------------------------------------------------------------- 4 (1 row) -- verify behavior of single INSERT; should fail to execute SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -39,14 +39,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT COUNT(*) FROM ref_table WHERE key=5; count -------- +--------------------------------------------------------------------- 0 (1 row) -- verify behavior of UPDATE ... RETURNING; should not execute SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -57,14 +57,14 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT COUNT(*) FROM ref_table WHERE key=7; count -------- +--------------------------------------------------------------------- 0 (1 row) -- verify fix to #2214; should raise error and fail to execute SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -78,21 +78,21 @@ DETAIL: server closed the connection unexpectedly COMMIT; SELECT COUNT(*) FROM ref_table WHERE key=value; count -------- +--------------------------------------------------------------------- 0 (1 row) -- all shards should still be healthy SELECT COUNT(*) FROM pg_dist_shard_placement WHERE shardstate = 3; count -------- +--------------------------------------------------------------------- 0 (1 row) -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_savepoints.out b/src/test/regress/expected/failure_savepoints.out index 58f4ceca0..c43dc832c 100644 --- a/src/test/regress/expected/failure_savepoints.out +++ b/src/test/regress/expected/failure_savepoints.out @@ -5,7 +5,7 @@ -- as invalid and continue with a WARNING. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -19,7 +19,7 @@ CREATE TABLE artists ( ); SELECT create_distributed_table('artists', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -31,7 +31,7 @@ INSERT INTO artists VALUES (4, 'William Kurelek'); -- simply fail at SAVEPOINT SELECT citus.mitmproxy('conn.onQuery(query="^SAVEPOINT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -55,14 +55,14 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; SELECT * FROM artists WHERE id IN (4, 5); id | name -----+----------------- +--------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at RELEASE SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -87,14 +87,14 @@ CONTEXT: while executing command on localhost:xxxxx ROLLBACK; SELECT * FROM artists WHERE id IN (4, 5); id | name -----+----------------- +--------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at ROLLBACK SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -111,14 +111,14 @@ COMMIT; ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id IN (4, 5); id | name -----+----------------- +--------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at second RELEASE SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").after(1).kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -143,14 +143,14 @@ CONTEXT: while executing command on localhost:xxxxx COMMIT; SELECT * FROM artists WHERE id IN (4, 5); id | name -----+----------------- +--------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at second ROLLBACK SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").after(1).kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -169,13 +169,13 @@ COMMIT; ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id IN (4, 5); id | name -----+----------------- +--------------------------------------------------------------------- 4 | William Kurelek (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").after(1).kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -192,12 +192,12 @@ RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=7; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -220,7 +220,7 @@ COMMIT; ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id=6; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- replication factor > 1 @@ -233,14 +233,14 @@ SET citus.shard_count = 1; SET citus.shard_replication_factor = 2; -- single shard, on both workers SELECT create_distributed_table('researchers', 'lab_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- simply fail at SAVEPOINT SELECT citus.mitmproxy('conn.onQuery(query="^SAVEPOINT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -262,7 +262,7 @@ COMMIT; -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; id | lab_id | name -----+--------+------ +--------------------------------------------------------------------- (0 rows) UPDATE pg_dist_shard_placement SET shardstate = 1 @@ -270,14 +270,14 @@ WHERE shardstate = 3 AND shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass ) RETURNING placementid; placementid -------------- +--------------------------------------------------------------------- (0 rows) TRUNCATE researchers; -- fail at rollback SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -294,7 +294,7 @@ ERROR: failure on connection marked as essential: localhost:xxxxx -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; id | lab_id | name -----+--------+------ +--------------------------------------------------------------------- (0 rows) UPDATE pg_dist_shard_placement SET shardstate = 1 @@ -302,14 +302,14 @@ WHERE shardstate = 3 AND shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass ) RETURNING placementid; placementid -------------- +--------------------------------------------------------------------- (0 rows) TRUNCATE researchers; -- fail at release SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -330,7 +330,7 @@ COMMIT; -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; id | lab_id | name -----+--------+------ +--------------------------------------------------------------------- (0 rows) UPDATE pg_dist_shard_placement SET shardstate = 1 @@ -338,14 +338,14 @@ WHERE shardstate = 3 AND shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass ) RETURNING placementid; placementid -------------- +--------------------------------------------------------------------- (0 rows) TRUNCATE researchers; -- clean up SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_setup.out b/src/test/regress/expected/failure_setup.out index 195a738b1..b9e2a708d 100644 --- a/src/test/regress/expected/failure_setup.out +++ b/src/test/regress/expected/failure_setup.out @@ -1,19 +1,19 @@ SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) -- add the workers SELECT master_add_node('localhost', :worker_1_port); master_add_node ------------------ +--------------------------------------------------------------------- 1 (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); -- an mitmproxy which forwards to the second worker master_add_node ------------------ +--------------------------------------------------------------------- 2 (1 row) diff --git a/src/test/regress/expected/failure_single_mod.out b/src/test/regress/expected/failure_single_mod.out index cbdb49c2b..254dad7fa 100644 --- a/src/test/regress/expected/failure_single_mod.out +++ b/src/test/regress/expected/failure_single_mod.out @@ -1,12 +1,12 @@ SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT citus.clear_network_traffic(); clear_network_traffic ------------------------ +--------------------------------------------------------------------- (1 row) @@ -15,14 +15,14 @@ SET citus.shard_replication_factor = 2; CREATE TABLE mod_test (key int, value text); SELECT create_distributed_table('mod_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- verify behavior of single INSERT; should mark shard as failed SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -33,7 +33,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT COUNT(*) FROM mod_test WHERE key=2; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -43,7 +43,7 @@ WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass ) AND shardstate = 3 RETURNING placementid; placementid -------------- +--------------------------------------------------------------------- 125 (1 row) @@ -51,14 +51,14 @@ TRUNCATE mod_test; -- verify behavior of UPDATE ... RETURNING; should mark as failed SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) INSERT INTO mod_test VALUES (2, 6); SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -68,13 +68,13 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. key ------ +--------------------------------------------------------------------- 2 (1 row) SELECT COUNT(*) FROM mod_test WHERE value='ok'; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -84,7 +84,7 @@ WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass ) AND shardstate = 3 RETURNING placementid; placementid -------------- +--------------------------------------------------------------------- 125 (1 row) @@ -93,7 +93,7 @@ TRUNCATE mod_test; -- should succeed but mark a placement as failed SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -109,7 +109,7 @@ DETAIL: server closed the connection unexpectedly COMMIT; SELECT COUNT(*) FROM mod_test WHERE key=2; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -119,7 +119,7 @@ WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass ) AND shardstate = 3 RETURNING placementid; placementid -------------- +--------------------------------------------------------------------- 125 (1 row) diff --git a/src/test/regress/expected/failure_single_select.out b/src/test/regress/expected/failure_single_select.out index 3a8553338..c0519d91e 100644 --- a/src/test/regress/expected/failure_single_select.out +++ b/src/test/regress/expected/failure_single_select.out @@ -1,12 +1,12 @@ SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT citus.clear_network_traffic(); clear_network_traffic ------------------------ +--------------------------------------------------------------------- (1 row) @@ -15,7 +15,7 @@ SET citus.shard_replication_factor = 2; CREATE TABLE select_test (key int, value text); SELECT create_distributed_table('select_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -23,7 +23,7 @@ SELECT create_distributed_table('select_test', 'key'); INSERT INTO select_test VALUES (3, 'test data'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -33,7 +33,7 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. key | value ------+----------- +--------------------------------------------------------------------- 3 | test data (1 row) @@ -43,14 +43,14 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. key | value ------+----------- +--------------------------------------------------------------------- 3 | test data (1 row) -- kill after first SELECT; txn should work (though placement marked bad) SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -62,7 +62,7 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. key | value ------+----------- +--------------------------------------------------------------------- 3 | test data 3 | more data (2 rows) @@ -74,7 +74,7 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. key | value ------+---------------- +--------------------------------------------------------------------- 3 | test data 3 | more data 3 | even more data @@ -92,7 +92,7 @@ TRUNCATE select_test; INSERT INTO select_test VALUES (3, 'test data'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -103,7 +103,7 @@ ERROR: canceling statement due to user request -- cancel after first SELECT; txn should fail and nothing should be marked as invalid SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -118,7 +118,7 @@ WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'select_test'::regclass ); shardstate ------------- +--------------------------------------------------------------------- 1 (1 row) @@ -127,7 +127,7 @@ TRUNCATE select_test; -- error after second SELECT; txn should fail SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -135,7 +135,7 @@ BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; key | value ------+----------- +--------------------------------------------------------------------- 3 | more data (1 row) @@ -146,7 +146,7 @@ COMMIT; -- error after second SELECT; txn should work (though placement marked bad) SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).reset()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -154,7 +154,7 @@ BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; key | value ------+----------- +--------------------------------------------------------------------- 3 | more data (1 row) @@ -165,7 +165,7 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. key | value ------+---------------- +--------------------------------------------------------------------- 3 | more data 3 | even more data (2 rows) @@ -173,13 +173,13 @@ DETAIL: server closed the connection unexpectedly COMMIT; SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(2).kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -196,7 +196,7 @@ SET citus.shard_replication_factor = 1; CREATE TABLE select_test (key int, value text); SELECT create_distributed_table('select_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -204,13 +204,13 @@ SET citus.max_cached_conns_per_worker TO 1; -- allow connection to be cached INSERT INTO select_test VALUES (1, 'test data'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM select_test WHERE key = 1; key | value ------+----------- +--------------------------------------------------------------------- 1 | test data (1 row) @@ -222,13 +222,13 @@ DETAIL: server closed the connection unexpectedly -- now the same test with query cancellation SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM select_test WHERE key = 1; key | value ------+----------- +--------------------------------------------------------------------- 1 | test data (1 row) diff --git a/src/test/regress/expected/failure_test_helpers.out b/src/test/regress/expected/failure_test_helpers.out index 5362412a1..e25562a67 100644 --- a/src/test/regress/expected/failure_test_helpers.out +++ b/src/test/regress/expected/failure_test_helpers.out @@ -6,7 +6,7 @@ ALTER SYSTEM SET citus.recover_2pc_interval TO -1; ALTER SYSTEM set citus.enable_statistics_collection TO false; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/failure_truncate.out b/src/test/regress/expected/failure_truncate.out index 6d79600cc..c1b55392b 100644 --- a/src/test/regress/expected/failure_truncate.out +++ b/src/test/regress/expected/failure_truncate.out @@ -12,7 +12,7 @@ SET citus.max_cached_conns_per_worker TO 0; SET citus.force_max_query_parallelization TO on; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -23,7 +23,7 @@ SET citus.shard_replication_factor = 1; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -39,7 +39,7 @@ CREATE VIEW unhealthy_shard_count AS -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -50,19 +50,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -70,7 +70,7 @@ SELECT count(*) FROM test_table; -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -78,26 +78,26 @@ TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -108,26 +108,26 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -135,26 +135,26 @@ TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -165,26 +165,26 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -192,19 +192,19 @@ TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -215,26 +215,26 @@ SELECT count(*) FROM test_table; -- Note: This is the result of using 1pc and there is no way to recover from it SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 8 (1 row) @@ -246,26 +246,26 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- should have been applied without any issues since cancel is ignored SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -278,7 +278,7 @@ SET client_min_messages TO WARNING; -- should have been committed both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -297,19 +297,19 @@ WARNING: could not commit transaction for shard xxxxx on any active node WARNING: could not commit transaction for shard xxxxx on any active node SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -321,26 +321,26 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- case Citus messes up this behaviour SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -350,7 +350,7 @@ CREATE TABLE reference_table(i int UNIQUE); INSERT INTO reference_table SELECT x FROM generate_series(1,20) as f(x); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -359,7 +359,7 @@ ALTER TABLE test_table ADD CONSTRAINT foreign_key FOREIGN KEY (value) REFERENCES -- still cascaded to referencing table or failed successfuly SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -368,25 +368,25 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -394,7 +394,7 @@ SELECT count(*) FROM reference_table; -- still cascaded to referencing table or failed successfuly SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -402,25 +402,25 @@ TRUNCATE reference_table CASCADE; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -428,7 +428,7 @@ SELECT count(*) FROM reference_table; -- rollbacked properly SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -439,25 +439,25 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -465,7 +465,7 @@ SELECT count(*) FROM reference_table; -- if the command still cascaded to referencing table or failed successfuly SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -473,25 +473,25 @@ TRUNCATE reference_table CASCADE; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -500,7 +500,7 @@ SELECT count(*) FROM reference_table; -- failed successfuly SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -509,25 +509,25 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -536,7 +536,7 @@ SELECT count(*) FROM test_table; -- failed successfuly SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -544,25 +544,25 @@ TRUNCATE reference_table CASCADE; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -572,7 +572,7 @@ SET citus.multi_shard_commit_protocol TO '2pc'; -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -583,19 +583,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -603,7 +603,7 @@ SELECT count(*) FROM test_table; -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -611,26 +611,26 @@ TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -641,26 +641,26 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -668,26 +668,26 @@ TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.test_table").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -698,26 +698,26 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.test_table").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -725,26 +725,26 @@ TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -753,13 +753,13 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -767,20 +767,20 @@ SELECT * FROM unhealthy_shard_count; -- see that the command is rollbacked SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- cancelling on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -788,13 +788,13 @@ TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -802,13 +802,13 @@ SELECT * FROM unhealthy_shard_count; -- see that the command is rollbacked SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -816,27 +816,27 @@ SELECT count(*) FROM test_table; -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -844,14 +844,14 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -861,13 +861,13 @@ SELECT citus.mitmproxy('conn.allow()'); -- we expect to see 2 recovered prepared transactions. SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -877,7 +877,7 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -886,13 +886,13 @@ TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -901,7 +901,7 @@ SELECT count(*) FROM test_table; -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -910,19 +910,19 @@ TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -935,7 +935,7 @@ DROP TABLE test_table CASCADE; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -951,7 +951,7 @@ CREATE VIEW unhealthy_shard_count AS -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -962,19 +962,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -982,7 +982,7 @@ SELECT count(*) FROM test_table; -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -990,26 +990,26 @@ TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1020,26 +1020,26 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1047,26 +1047,26 @@ TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1077,26 +1077,26 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1104,26 +1104,26 @@ TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1132,13 +1132,13 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1146,13 +1146,13 @@ SELECT * FROM unhealthy_shard_count; -- see that the command is rollbacked SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -1160,33 +1160,33 @@ SELECT count(*) FROM test_table; -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1194,20 +1194,20 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1218,13 +1218,13 @@ SELECT * FROM unhealthy_shard_count; -- transactions. SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1234,7 +1234,7 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1243,19 +1243,19 @@ TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) @@ -1264,7 +1264,7 @@ SELECT count(*) FROM test_table; -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^ROLLBACK").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -1273,25 +1273,25 @@ TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) SELECT * FROM unhealthy_shard_count; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 20 (1 row) diff --git a/src/test/regress/expected/failure_vacuum.out b/src/test/regress/expected/failure_vacuum.out index e7961d4e2..cb32dab8c 100644 --- a/src/test/regress/expected/failure_vacuum.out +++ b/src/test/regress/expected/failure_vacuum.out @@ -4,7 +4,7 @@ SET citus.next_shard_id TO 12000000; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -14,19 +14,19 @@ SET citus.multi_shard_commit_protocol TO '1pc'; CREATE TABLE vacuum_test (key int, value int); SELECT create_distributed_table('vacuum_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.clear_network_traffic(); clear_network_traffic ------------------------ +--------------------------------------------------------------------- (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -37,7 +37,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -48,7 +48,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -58,7 +58,7 @@ ANALYZE vacuum_test; SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass); shardid | shardstate -----------+------------ +--------------------------------------------------------------------- 12000000 | 3 (1 row) @@ -69,7 +69,7 @@ WHERE shardid IN ( -- the same tests with cancel SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -77,7 +77,7 @@ VACUUM vacuum_test; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -86,27 +86,27 @@ ERROR: canceling statement due to user request -- cancel during COMMIT should be ignored SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) ANALYZE vacuum_test; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE other_vacuum_test (key int, value int); SELECT create_distributed_table('other_vacuum_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -117,7 +117,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -126,7 +126,7 @@ ERROR: canceling statement due to user request -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/failure_vacuum_1.out b/src/test/regress/expected/failure_vacuum_1.out index 5b153efa8..8a51e8b51 100644 --- a/src/test/regress/expected/failure_vacuum_1.out +++ b/src/test/regress/expected/failure_vacuum_1.out @@ -4,7 +4,7 @@ SET citus.next_shard_id TO 12000000; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -14,19 +14,19 @@ SET citus.multi_shard_commit_protocol TO '1pc'; CREATE TABLE vacuum_test (key int, value int); SELECT create_distributed_table('vacuum_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.clear_network_traffic(); clear_network_traffic ------------------------ +--------------------------------------------------------------------- (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -37,7 +37,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -48,7 +48,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -58,7 +58,7 @@ ANALYZE vacuum_test; SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass); shardid | shardstate -----------+------------ +--------------------------------------------------------------------- 12000000 | 3 (1 row) @@ -69,7 +69,7 @@ WHERE shardid IN ( -- the same tests with cancel SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -77,7 +77,7 @@ VACUUM vacuum_test; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -86,27 +86,27 @@ ERROR: canceling statement due to user request -- cancel during COMMIT should be ignored SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) ANALYZE vacuum_test; SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE other_vacuum_test (key int, value int); SELECT create_distributed_table('other_vacuum_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -116,7 +116,7 @@ LINE 1: VACUUM vacuum_test, other_vacuum_test; ^ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) @@ -127,7 +127,7 @@ LINE 1: VACUUM vacuum_test, other_vacuum_test; -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); mitmproxy ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/fast_path_router_modify.out b/src/test/regress/expected/fast_path_router_modify.out index 964c49629..f46f63d85 100644 --- a/src/test/regress/expected/fast_path_router_modify.out +++ b/src/test/regress/expected/fast_path_router_modify.out @@ -10,7 +10,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE modify_fast_path(key int, value_1 int, value_2 text); SELECT create_distributed_table('modify_fast_path', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -18,14 +18,14 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE modify_fast_path_replication_2(key int, value_1 int, value_2 text); SELECT create_distributed_table('modify_fast_path_replication_2', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE modify_fast_path_reference(key int, value_1 int, value_2 text); SELECT create_reference_table('modify_fast_path_reference'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -111,7 +111,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 key | value_1 | value_2 ------+---------+--------- +--------------------------------------------------------------------- (0 rows) -- modifying ctes are not supported via fast-path @@ -128,7 +128,7 @@ DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT key, value_1, DEBUG: Creating router plan DEBUG: Plan is router executable key | value_1 | value_2 ------+---------+--------- +--------------------------------------------------------------------- (0 rows) -- for update/share is supported via fast-path when replication factor = 1 or reference table @@ -138,7 +138,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 key | value_1 | value_2 ------+---------+--------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM modify_fast_path WHERE key = 1 FOR SHARE; @@ -147,7 +147,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 key | value_1 | value_2 ------+---------+--------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM modify_fast_path_reference WHERE key = 1 FOR UPDATE; @@ -155,7 +155,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable key | value_1 | value_2 ------+---------+--------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM modify_fast_path_reference WHERE key = 1 FOR SHARE; @@ -163,7 +163,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable key | value_1 | value_2 ------+---------+--------- +--------------------------------------------------------------------- (0 rows) -- for update/share is not supported via fast-path wen replication factor > 1 @@ -253,7 +253,7 @@ DETAIL: distribution column value: 1 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement modify_fast_path_plpsql -------------------------- +--------------------------------------------------------------------- (1 row) @@ -269,7 +269,7 @@ DETAIL: distribution column value: 2 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement modify_fast_path_plpsql -------------------------- +--------------------------------------------------------------------- (1 row) @@ -285,7 +285,7 @@ DETAIL: distribution column value: 3 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement modify_fast_path_plpsql -------------------------- +--------------------------------------------------------------------- (1 row) @@ -301,7 +301,7 @@ DETAIL: distribution column value: 4 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement modify_fast_path_plpsql -------------------------- +--------------------------------------------------------------------- (1 row) @@ -317,7 +317,7 @@ DETAIL: distribution column value: 5 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement modify_fast_path_plpsql -------------------------- +--------------------------------------------------------------------- (1 row) @@ -336,7 +336,7 @@ DETAIL: distribution column value: 6 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement modify_fast_path_plpsql -------------------------- +--------------------------------------------------------------------- (1 row) @@ -352,7 +352,7 @@ DETAIL: distribution column value: 6 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement modify_fast_path_plpsql -------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/foreign_key_restriction_enforcement.out b/src/test/regress/expected/foreign_key_restriction_enforcement.out index 2769a83a9..fa8af16a7 100644 --- a/src/test/regress/expected/foreign_key_restriction_enforcement.out +++ b/src/test/regress/expected/foreign_key_restriction_enforcement.out @@ -11,28 +11,28 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE transitive_reference_table(id int PRIMARY KEY); SELECT create_reference_table('transitive_reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE reference_table(id int PRIMARY KEY, value_1 int); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE on_update_fkey_table(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('on_update_fkey_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE unrelated_dist_table(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('unrelated_dist_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -48,13 +48,13 @@ SET client_min_messages TO DEBUG1; BEGIN; SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) @@ -62,13 +62,13 @@ ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) @@ -77,31 +77,31 @@ ROLLBACK; BEGIN; SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 15; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 16; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 17; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 18; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -109,31 +109,31 @@ ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 15; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 16; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 17; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 18; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -142,7 +142,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -151,7 +151,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -161,7 +161,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -173,7 +173,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -186,7 +186,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -198,7 +198,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -211,7 +211,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -222,7 +222,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -235,7 +235,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -246,7 +246,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -259,13 +259,13 @@ ROLLBACK; BEGIN; SELECT count(*) FROM unrelated_dist_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -277,13 +277,13 @@ ROLLBACK; BEGIN; SELECT count(*) FROM unrelated_dist_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -297,13 +297,13 @@ ROLLBACK; BEGIN; SELECT count(*) FROM unrelated_dist_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -315,13 +315,13 @@ ROLLBACK; BEGIN; SELECT count(*) FROM unrelated_dist_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -334,7 +334,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -343,7 +343,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -356,13 +356,13 @@ DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -373,13 +373,13 @@ DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -391,25 +391,25 @@ DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 99; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 199; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 299; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 399; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -420,25 +420,25 @@ DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 99; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 199; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 299; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 399; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -550,7 +550,7 @@ DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) @@ -561,7 +561,7 @@ DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) @@ -571,7 +571,7 @@ BEGIN; ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE int; SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) @@ -580,7 +580,7 @@ BEGIN; ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE int; SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) @@ -700,20 +700,20 @@ DEBUG: validating foreign key constraint "fkey" TRUNCATE on_update_fkey_table; DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially ROLLBACK; ------ +--------------------------------------------------------------------- --- Now, start testing the other way araound ------ +--------------------------------------------------------------------- -- case 4.1: SELECT to a dist table is follwed by a SELECT to a reference table BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -721,13 +721,13 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -736,7 +736,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -748,7 +748,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -761,7 +761,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -772,7 +772,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -784,7 +784,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -798,7 +798,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -815,7 +815,7 @@ SET client_min_messages to LOG; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -826,7 +826,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -839,7 +839,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE id = 9; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -849,7 +849,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE id = 9; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -864,7 +864,7 @@ BEGIN; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; SELECT count(*) FROM reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -873,7 +873,7 @@ BEGIN; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; SELECT count(*) FROM transitive_reference_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -1034,14 +1034,14 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1055,14 +1055,14 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); SELECT create_distributed_table('tt4', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1084,21 +1084,21 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); SELECT create_distributed_table('tt4', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1113,14 +1113,14 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1140,14 +1140,14 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1163,14 +1163,14 @@ BEGIN; CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1190,14 +1190,14 @@ BEGIN; CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1212,7 +1212,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) @@ -1244,7 +1244,7 @@ BEGIN; SELECT create_reference_table('test_table_1'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1268,7 +1268,7 @@ BEGIN; SELECT create_reference_table('test_table_1'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1288,13 +1288,13 @@ BEGIN; CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1303,13 +1303,13 @@ BEGIN; ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0); SELECT count(*) FROM test_table_2; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table_1; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1328,7 +1328,7 @@ DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_p DEBUG: building index "reference_table_pkey" on table "reference_table" serially SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1337,7 +1337,7 @@ DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "distributed_table DEBUG: building index "distributed_table_pkey" on table "distributed_table" serially SELECT create_distributed_table('distributed_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1364,7 +1364,7 @@ DEBUG: Plan 170 query after replacing subqueries and CTEs: DELETE FROM test_fke DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode id | value_1 | id -----+---------+---- +--------------------------------------------------------------------- (0 rows) -- load some more data for one more test with real-time selects @@ -1385,7 +1385,7 @@ DEBUG: Plan 174 query after replacing subqueries and CTEs: SELECT count(*) AS c DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1418,7 +1418,7 @@ DEBUG: generating subplan 181_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx. DEBUG: generating subplan 181_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id DEBUG: Plan 181 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('181_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('181_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id)) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1430,7 +1430,7 @@ BEGIN; DEBUG: generating subplan 184_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id DEBUG: Plan 184 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id id ----- +--------------------------------------------------------------------- (0 rows) ROLLBACK; diff --git a/src/test/regress/expected/foreign_key_to_reference_table.out b/src/test/regress/expected/foreign_key_to_reference_table.out index eefe93e32..02f32e6d8 100644 --- a/src/test/regress/expected/foreign_key_to_reference_table.out +++ b/src/test/regress/expected/foreign_key_to_reference_table.out @@ -29,7 +29,7 @@ SELECT CREATE TABLE referenced_table(id int UNIQUE, test_column int); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -38,7 +38,7 @@ SELECT create_reference_table('referenced_table'); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -54,7 +54,7 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -70,7 +70,7 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -89,14 +89,14 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id, test_column)); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -112,7 +112,7 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -131,21 +131,21 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id)); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET NULL; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid -------------------+------------------------------------------------+----------------------------------------------- +--------------------------------------------------------------------- fkey_ref_7000043 | fkey_reference_table.referencing_table_7000043 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000044 | fkey_reference_table.referencing_table_7000044 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000045 | fkey_reference_table.referencing_table_7000045 | fkey_reference_table.referenced_table_7000042 @@ -160,13 +160,13 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET NULL); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid ------------------------------------+------------------------------------------------+----------------------------------------------- +--------------------------------------------------------------------- referencing_table_id_fkey_7000051 | fkey_reference_table.referencing_table_7000051 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000052 | fkey_reference_table.referencing_table_7000052 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000053 | fkey_reference_table.referencing_table_7000053 | fkey_reference_table.referenced_table_7000042 @@ -181,14 +181,14 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid -------------------+------------------------------------------------+----------------------------------------------- +--------------------------------------------------------------------- fkey_ref_7000059 | fkey_reference_table.referencing_table_7000059 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000060 | fkey_reference_table.referencing_table_7000060 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000061 | fkey_reference_table.referencing_table_7000061 | fkey_reference_table.referenced_table_7000042 @@ -204,14 +204,14 @@ BEGIN; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid ------------------------------------+------------------------------------------------+----------------------------------------------- +--------------------------------------------------------------------- referencing_table_id_fkey_7000067 | fkey_reference_table.referencing_table_7000067 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000068 | fkey_reference_table.referencing_table_7000068 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000069 | fkey_reference_table.referencing_table_7000069 | fkey_reference_table.referenced_table_7000042 @@ -226,14 +226,14 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE SET NULL; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid -------------------+------------------------------------------------+----------------------------------------------- +--------------------------------------------------------------------- fkey_ref_7000075 | fkey_reference_table.referencing_table_7000075 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000076 | fkey_reference_table.referencing_table_7000076 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000077 | fkey_reference_table.referencing_table_7000077 | fkey_reference_table.referenced_table_7000042 @@ -248,14 +248,14 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE SET DEFAULT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid -------------------+------------------------------------------------+----------------------------------------------- +--------------------------------------------------------------------- fkey_ref_7000083 | fkey_reference_table.referencing_table_7000083 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000084 | fkey_reference_table.referencing_table_7000084 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000085 | fkey_reference_table.referencing_table_7000085 | fkey_reference_table.referenced_table_7000042 @@ -270,14 +270,14 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid -------------------+------------------------------------------------+----------------------------------------------- +--------------------------------------------------------------------- fkey_ref_7000091 | fkey_reference_table.referencing_table_7000091 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000092 | fkey_reference_table.referencing_table_7000092 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000093 | fkey_reference_table.referencing_table_7000093 | fkey_reference_table.referenced_table_7000042 @@ -293,7 +293,7 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -303,7 +303,7 @@ DETAIL: Adding a column with a constraint in one command is not supported becau HINT: You can issue each command separately such as ALTER TABLE referencing_table ADD COLUMN referencing data_type; ALTER TABLE referencing_table ADD CONSTRAINT constraint_name FOREIGN KEY (referencing) REFERENCES referenced_table(id) ON UPDATE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid -------+-------+------------ +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -312,7 +312,7 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -322,7 +322,7 @@ DETAIL: Citus Community Edition currently supports foreign key constraints only HINT: Please change "citus.shard_replication_factor to 1". To learn more about using foreign keys with other replication factors, please contact us at https://citusdata.com/about/contact_us. SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid -------+-------+------------ +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -330,7 +330,7 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -340,7 +340,7 @@ DETAIL: Citus Community Edition currently supports foreign key constraints only HINT: Please change "citus.shard_replication_factor to 1". To learn more about using foreign keys with other replication factors, please contact us at https://citusdata.com/about/contact_us. SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid -------+-------+------------ +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -349,13 +349,13 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid ------------------------------------+------------------------------------------------+----------------------------------------------- +--------------------------------------------------------------------- referencing_table_id_fkey_7000123 | fkey_reference_table.referencing_table_7000123 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000124 | fkey_reference_table.referencing_table_7000124 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000125 | fkey_reference_table.referencing_table_7000125 | fkey_reference_table.referenced_table_7000042 @@ -372,21 +372,21 @@ BEGIN; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id)); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid ------------------------------------+------------------------------------------------+----------------------------------------------- +--------------------------------------------------------------------- referencing_table_id_fkey_7000132 | fkey_reference_table.referencing_table_7000132 | fkey_reference_table.referenced_table_7000131 referencing_table_id_fkey_7000133 | fkey_reference_table.referencing_table_7000133 | fkey_reference_table.referenced_table_7000131 referencing_table_id_fkey_7000134 | fkey_reference_table.referencing_table_7000134 | fkey_reference_table.referenced_table_7000131 @@ -403,7 +403,7 @@ DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -412,14 +412,14 @@ ERROR: cannot create foreign key constraint since relations are not colocated o DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table SELECT * FROM table_fkeys_in_workers WHERE name LIKE 'fkey_ref%' ORDER BY 1,2,3; name | relid | refd_relid -------+-------+------------ +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -428,7 +428,7 @@ ERROR: cannot create foreign key constraint since relations are not colocated o DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table SELECT * FROM table_fkeys_in_workers WHERE name LIKE 'fkey_ref%' ORDER BY 1,2,3; name | relid | refd_relid -------+-------+------------ +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -438,13 +438,13 @@ CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, te CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -471,7 +471,7 @@ TRUNCATE referenced_table CASCADE; NOTICE: truncate cascades to table "referencing_table" SELECT count(*) FROM referencing_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -484,13 +484,13 @@ CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('referencing_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -507,13 +507,13 @@ CREATE TABLE referenced_schema.referenced_table(id int UNIQUE, test_column int, CREATE TABLE referencing_schema.referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_schema.referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_schema.referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -523,7 +523,7 @@ INSERT INTO referencing_schema.referencing_table SELECT x, x from generate_serie DELETE FROM referenced_schema.referenced_table WHERE id > 800; SELECT count(*) FROM referencing_schema.referencing_table; count -------- +--------------------------------------------------------------------- 800 (1 row) @@ -536,13 +536,13 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referencing_table(id int, ref_id int DEFAULT 1); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -552,7 +552,7 @@ INSERT INTO referencing_table SELECT x, x FROM generate_series(1,1000) AS f(x); DELETE FROM referenced_table WHERE test_column > 800; SELECT count(*) FROM referencing_table WHERE ref_id = 1; count -------- +--------------------------------------------------------------------- 201 (1 row) @@ -564,13 +564,13 @@ CREATE TABLE referenced_table(test_column composite, PRIMARY KEY(test_column)); CREATE TABLE referencing_table(id int, referencing_composite composite); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -580,7 +580,7 @@ INSERT INTO referencing_table SELECT x, (x+1, x+1)::composite FROM generate_seri DELETE FROM referenced_table WHERE (test_column).key1 > 900; SELECT count(*) FROM referencing_table; count -------- +--------------------------------------------------------------------- 899 (1 row) @@ -596,13 +596,13 @@ CREATE TABLE referenced_table(test_column SERIAL PRIMARY KEY, test_column2 int); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -612,7 +612,7 @@ INSERT INTO referencing_table SELECT x, x FROM generate_series(1,1000) AS f(x); DELETE FROM referenced_table WHERE test_column2 > 10; SELECT count(*) FROM referencing_table; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -628,13 +628,13 @@ CREATE TABLE referenced_table(test_column int PRIMARY KEY, test_column2 int); CREATE TABLE referencing_table(id int, ref_id SERIAL); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -659,13 +659,13 @@ CREATE TABLE referenced_table(test_column SERIAL PRIMARY KEY, test_column2 int); CREATE TABLE referencing_table(id int, ref_id SERIAL); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -688,13 +688,13 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referencing_table(id int, ref_id int DEFAULT -1); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -710,13 +710,13 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referencing_table(id int, ref_id int DEFAULT -1); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -729,7 +729,7 @@ DO UPDATE SET test_column = -1 * EXCLUDED.test_column; SELECT * FROM referencing_table WHERE ref_id < 0 ORDER BY 1; id | ref_id -----+-------- +--------------------------------------------------------------------- 1 | -1 2 | -2 3 | -3 @@ -747,7 +747,7 @@ INSERT INTO referencing_table VALUES (1,1), (2,2), (3,3); SELECT create_reference_table('referenced_table'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -774,19 +774,19 @@ CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(te CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('referenced_table2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -794,7 +794,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCE ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid ------------------------+------------------------------------------------+------------------------------------------------ +--------------------------------------------------------------------- fkey_ref_7000226 | fkey_reference_table.referencing_table_7000226 | fkey_reference_table.referenced_table_7000224 fkey_ref_7000227 | fkey_reference_table.referencing_table_7000227 | fkey_reference_table.referenced_table_7000224 fkey_ref_7000228 | fkey_reference_table.referencing_table_7000228 | fkey_reference_table.referenced_table_7000224 @@ -831,21 +831,21 @@ DETAIL: Key (id)=(1015) is not present in table "referenced_table_7000224". INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(600,900) AS f(x); SELECT count(*) FROM referencing_table; count -------- +--------------------------------------------------------------------- 301 (1 row) DELETE FROM referenced_table WHERE test_column < 700; SELECT count(*) FROM referencing_table; count -------- +--------------------------------------------------------------------- 201 (1 row) DELETE FROM referenced_table2 WHERE test_column2 > 800; SELECT count(*) FROM referencing_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -860,25 +860,25 @@ CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE, FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('referenced_table2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 16 (1 row) @@ -900,19 +900,19 @@ CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(te CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('referenced_table2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -922,7 +922,7 @@ BEGIN; COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid ------------------------+------------------------------------------------+------------------------------------------------ +--------------------------------------------------------------------- fkey_ref_7000246 | fkey_reference_table.referencing_table_7000246 | fkey_reference_table.referenced_table_7000244 fkey_ref_7000247 | fkey_reference_table.referencing_table_7000247 | fkey_reference_table.referenced_table_7000244 fkey_ref_7000248 | fkey_reference_table.referencing_table_7000248 | fkey_reference_table.referenced_table_7000244 @@ -956,21 +956,21 @@ ERROR: insert or update on table "referencing_table_7000248" violates foreign k INSERT INTO referencing_table SELECT x, x+501 FROM generate_series(0,1000) AS f(x); SELECT count(*) FROM referencing_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) DELETE FROM referenced_table WHERE test_column < 700; SELECT count(*) FROM referencing_table; count -------- +--------------------------------------------------------------------- 301 (1 row) DELETE FROM referenced_table2 WHERE test_column2 > 800; SELECT count(*) FROM referencing_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -986,19 +986,19 @@ CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES r BEGIN; SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('referenced_table2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1006,7 +1006,7 @@ BEGIN; COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 16 (1 row) @@ -1029,19 +1029,19 @@ CREATE TABLE referencing_table(id int PRIMARY KEY, ref_id int); CREATE TABLE referencing_table2(id int, ref_id int); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1053,7 +1053,7 @@ ALTER TABLE referencing_table2 ADD CONSTRAINT fkey_ref_to_dist FOREIGN KEY (id) COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; name | relid | refd_relid ---------------------------+-------------------------------------------------+------------------------------------------------ +--------------------------------------------------------------------- fkey_ref_7000265 | fkey_reference_table.referencing_table_7000265 | fkey_reference_table.referenced_table_7000264 fkey_ref_7000266 | fkey_reference_table.referencing_table_7000266 | fkey_reference_table.referenced_table_7000264 fkey_ref_7000267 | fkey_reference_table.referencing_table_7000267 | fkey_reference_table.referenced_table_7000264 @@ -1096,20 +1096,20 @@ INSERT INTO referencing_table2 SELECT x, x+1 FROM generate_series(0,300) AS f(x) DELETE FROM referenced_table WHERE test_column < 200; SELECT count(*) FROM referencing_table; count -------- +--------------------------------------------------------------------- 201 (1 row) SELECT count(*) FROM referencing_table2; count -------- +--------------------------------------------------------------------- 101 (1 row) DELETE FROM referencing_table WHERE id > 200; SELECT count(*) FROM referencing_table2; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -1126,7 +1126,7 @@ CREATE TABLE referencing_table(id int PRIMARY KEY, ref_id int, FOREIGN KEY (id) CREATE TABLE referencing_table2(id int, ref_id int, FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column2) ON DELETE CASCADE, FOREIGN KEY (id) REFERENCES referencing_table(id) ON DELETE CASCADE); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1134,20 +1134,20 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 24 (1 row) @@ -1168,26 +1168,26 @@ CREATE TABLE referencing_table(id int, ref_id int, ref_id2 int, PRIMARY KEY(id, CREATE TABLE referencing_referencing_table(id int, ref_id int, FOREIGN KEY (id, ref_id) REFERENCES referencing_table(id, ref_id) ON DELETE CASCADE); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id, ref_id2) REFERENCES referenced_table(test_column, test_column2) ON DELETE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.referencing%' ORDER BY 1,2,3; name | relid | refd_relid -------------------------------------------------------+------------------------------------------------------------+------------------------------------------------ +--------------------------------------------------------------------- fkey_ref_7000299 | fkey_reference_table.referencing_table_7000299 | fkey_reference_table.referenced_table_7000298 fkey_ref_7000300 | fkey_reference_table.referencing_table_7000300 | fkey_reference_table.referenced_table_7000298 fkey_ref_7000301 | fkey_reference_table.referencing_table_7000301 | fkey_reference_table.referenced_table_7000298 @@ -1212,7 +1212,7 @@ INSERT INTO referencing_referencing_table SELECT x, x+1 FROM generate_series(1,9 DELETE FROM referenced_table WHERE test_column > 800; SELECT max(ref_id) FROM referencing_referencing_table; max ------ +--------------------------------------------------------------------- 800 (1 row) @@ -1228,21 +1228,21 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES test_table_1(id)); SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_3(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES test_table_2(id)); SELECT create_distributed_table('test_table_3', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1256,14 +1256,14 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1279,14 +1279,14 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY); SELECT create_reference_table('test_table_2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1306,7 +1306,7 @@ BEGIN; SELECT create_reference_table('test_table_1'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1322,13 +1322,13 @@ BEGIN; CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1342,26 +1342,26 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 8 (1 row) ALTER TABLE test_table_2 DROP CONSTRAINT test_table_2_value_1_fkey; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1372,13 +1372,13 @@ BEGIN; CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1391,7 +1391,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1402,13 +1402,13 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1416,7 +1416,7 @@ ALTER TABLE test_table_1 DROP CONSTRAINT test_table_1_pkey CASCADE; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1427,13 +1427,13 @@ BEGIN; CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1442,7 +1442,7 @@ NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_tab COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1452,20 +1452,20 @@ CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) ALTER TABLE test_table_2 DROP COLUMN value_1; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1476,13 +1476,13 @@ CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) BEGIN; SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1490,7 +1490,7 @@ BEGIN; COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1500,13 +1500,13 @@ CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1514,7 +1514,7 @@ ALTER TABLE test_table_1 DROP COLUMN id CASCADE; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1525,13 +1525,13 @@ CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) BEGIN; SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1540,7 +1540,7 @@ NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_tab COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1550,13 +1550,13 @@ CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1573,7 +1573,7 @@ ERROR: integer out of range CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 8 (1 row) @@ -1586,13 +1586,13 @@ CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) BEGIN; SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1602,7 +1602,7 @@ NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_tab COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1612,13 +1612,13 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1628,7 +1628,7 @@ TRUNCATE test_table_1 CASCADE; NOTICE: truncate cascades to table "test_table_2" SELECT * FROM test_table_2; id | value_1 -----+--------- +--------------------------------------------------------------------- (0 rows) DROP TABLE test_table_1, test_table_2; @@ -1637,13 +1637,13 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1655,7 +1655,7 @@ NOTICE: truncate cascades to table "test_table_2" COMMIT; SELECT * FROM test_table_2; id | value_1 -----+--------- +--------------------------------------------------------------------- (0 rows) DROP TABLE test_table_1, test_table_2; @@ -1665,13 +1665,13 @@ CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) BEGIN; SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1682,7 +1682,7 @@ NOTICE: truncate cascades to table "test_table_2" COMMIT; SELECT * FROM test_table_2; id | value_1 -----+--------- +--------------------------------------------------------------------- (0 rows) DROP TABLE test_table_1, test_table_2; @@ -1691,13 +1691,13 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1706,12 +1706,12 @@ INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3); TRUNCATE test_table_2 CASCADE; SELECT * FROM test_table_2; id | value_1 -----+--------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM test_table_1; id ----- +--------------------------------------------------------------------- 1 2 3 @@ -1723,13 +1723,13 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1740,12 +1740,12 @@ BEGIN; COMMIT; SELECT * FROM test_table_2; id | value_1 -----+--------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM test_table_1; id ----- +--------------------------------------------------------------------- 1 2 3 @@ -1759,19 +1759,19 @@ CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_3(id int PRIMARY KEY, value_1 int); SELECT create_reference_table('test_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_3', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1797,13 +1797,13 @@ ERROR: distributing partitioned tables in only supported for hash-distributed t -- partitioned tables are supported as hash distributed table SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1821,7 +1821,7 @@ CONTEXT: while executing command on localhost:xxxxx INSERT INTO referencing_table VALUES (0, 1); SELECT * FROM referencing_table; id | value_1 -----+--------- +--------------------------------------------------------------------- 0 | 1 (1 row) @@ -1841,7 +1841,7 @@ BEGIN; ALTER TABLE referencing_table ADD COLUMN x INT; SELECT * FROM referencing_table; id | value_1 | x -----+---------+--- +--------------------------------------------------------------------- (0 rows) ROLLBACK; @@ -1850,7 +1850,7 @@ BEGIN; ALTER TABLE referencing_table ADD COLUMN x INT; SELECT * FROM referencing_table; id | value_1 | x -----+---------+--- +--------------------------------------------------------------------- (0 rows) ROLLBACK; diff --git a/src/test/regress/expected/full_join.out b/src/test/regress/expected/full_join.out index 9cfbdcd82..1ba772019 100644 --- a/src/test/regress/expected/full_join.out +++ b/src/test/regress/expected/full_join.out @@ -9,19 +9,19 @@ CREATE TABLE test_table_2(id bigint, val1 int); CREATE TABLE test_table_3(id int, val1 bigint); SELECT create_distributed_table('test_table_1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_3', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -31,7 +31,7 @@ INSERT INTO test_table_3 VALUES(1,1),(3,3),(4,5); -- Simple full outer join SELECT id FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; id ----- +--------------------------------------------------------------------- 1 2 3 @@ -41,7 +41,7 @@ SELECT id FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; -- Get all columns as the result of the full join SELECT * FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; id | val1 | val1 -----+------+------ +--------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 3 | 3 | 3 @@ -56,7 +56,7 @@ SELECT * FROM USING(id) ORDER BY 1; id ----- +--------------------------------------------------------------------- 1 2 3 @@ -72,7 +72,7 @@ SELECT * FROM USING(id, val1) ORDER BY 1; id | val1 -----+------ +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -83,7 +83,7 @@ SELECT * FROM -- Full join using multiple columns SELECT * FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) ORDER BY 1; id | val1 -----+------ +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -98,7 +98,7 @@ GROUP BY id ORDER BY 2 ASC LIMIT 3; count | avg_value | not_null --------+-----------+---------- +--------------------------------------------------------------------- 1 | 2 | t 1 | 6 | t 1 | 12 | t @@ -109,7 +109,7 @@ FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) GROUP BY test_table_1.id ORDER BY 1; max ------ +--------------------------------------------------------------------- 1 2 3 @@ -122,7 +122,7 @@ FROM test_table_1 LEFT JOIN test_table_3 USING(id, val1) GROUP BY test_table_1.id ORDER BY 1; max ------ +--------------------------------------------------------------------- 1 2 3 @@ -139,7 +139,7 @@ INSERT INTO test_table_3 VALUES(7, NULL); -- Get all columns as the result of the full join SELECT * FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; id | val1 | val1 -----+------+------ +--------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 3 | 3 | 3 @@ -150,7 +150,7 @@ SELECT * FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; -- Get the same result (with multiple id) SELECT * FROM test_table_1 FULL JOIN test_table_3 ON (test_table_1.id = test_table_3.id) ORDER BY 1; id | val1 | id | val1 -----+------+----+------ +--------------------------------------------------------------------- 1 | 1 | 1 | 1 2 | 2 | | 3 | 3 | 3 | 3 @@ -161,7 +161,7 @@ SELECT * FROM test_table_1 FULL JOIN test_table_3 ON (test_table_1.id = test_tab -- Full join using multiple columns SELECT * FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) ORDER BY 1; id | val1 -----+------ +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -179,13 +179,13 @@ CREATE TABLE test_table_1(id int, val1 text); CREATE TABLE test_table_2(id int, val1 varchar(30)); SELECT create_distributed_table('test_table_1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -194,7 +194,7 @@ INSERT INTO test_table_2 VALUES(2,'val_2'),(3,'val_3'),(4,'val_4'), (5, NULL); -- Simple full outer join SELECT id FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1; id ----- +--------------------------------------------------------------------- 1 2 3 @@ -205,7 +205,7 @@ SELECT id FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1; -- Get all columns as the result of the full join SELECT * FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1; id | val1 | val1 -----+-------+------- +--------------------------------------------------------------------- 1 | val_1 | 2 | val_2 | val_2 3 | val_3 | val_3 @@ -221,7 +221,7 @@ SELECT * FROM USING(id, val1) ORDER BY 1,2; id | val1 -----+------- +--------------------------------------------------------------------- 1 | val_1 2 | val_2 3 | val_3 @@ -235,7 +235,7 @@ SELECT * FROM -- Full join using multiple columns SELECT * FROM test_table_1 FULL JOIN test_table_2 USING(id, val1) ORDER BY 1,2; id | val1 -----+------- +--------------------------------------------------------------------- 1 | val_1 2 | val_2 3 | val_3 diff --git a/src/test/regress/expected/intermediate_result_pruning.out b/src/test/regress/expected/intermediate_result_pruning.out index 129b1c449..efbc2f080 100644 --- a/src/test/regress/expected/intermediate_result_pruning.out +++ b/src/test/regress/expected/intermediate_result_pruning.out @@ -7,28 +7,28 @@ SET citus.shard_replication_factor = 1; CREATE TABLE table_1 (key int, value text); SELECT create_distributed_table('table_1', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table_2 (key int, value text); SELECT create_distributed_table('table_2', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table_3 (key int, value text); SELECT create_distributed_table('table_3', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE ref_table (key int, value text); SELECT create_reference_table('ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -52,7 +52,7 @@ DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT count(*) AS cou DEBUG: Subplan 5_1 will be sent to localhost:xxxxx DEBUG: Subplan 5_1 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -69,7 +69,7 @@ DEBUG: generating subplan 7_1 for CTE some_values_1: SELECT key, random() AS ra DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) DEBUG: Subplan 7_1 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -87,7 +87,7 @@ DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count(*) AS cou DEBUG: Subplan 9_1 will be sent to localhost:xxxxx DEBUG: Subplan 9_1 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -107,7 +107,7 @@ DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 11_1 will be sent to localhost:xxxxx DEBUG: Subplan 11_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -128,7 +128,7 @@ DEBUG: Subplan 14_1 will be sent to localhost:xxxxx DEBUG: Subplan 14_1 will be sent to localhost:xxxxx DEBUG: Subplan 14_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -150,7 +150,7 @@ DEBUG: Subplan 17_1 will be sent to localhost:xxxxx DEBUG: Subplan 17_1 will be sent to localhost:xxxxx DEBUG: Subplan 17_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -172,7 +172,7 @@ DEBUG: Subplan 20_1 will be sent to localhost:xxxxx DEBUG: Subplan 20_1 will be sent to localhost:xxxxx DEBUG: Subplan 20_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -194,7 +194,7 @@ DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 23_1 will be sent to localhost:xxxxx DEBUG: Subplan 23_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -215,7 +215,7 @@ DEBUG: Subplan 26_1 will be sent to localhost:xxxxx DEBUG: Subplan 26_2 will be sent to localhost:xxxxx DEBUG: Subplan 26_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -238,7 +238,7 @@ DEBUG: Subplan 29_1 will be sent to localhost:xxxxx DEBUG: Subplan 29_2 will be sent to localhost:xxxxx DEBUG: Subplan 29_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -256,7 +256,7 @@ DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 32_1 will be sent to localhost:xxxxx DEBUG: Subplan 32_1 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -269,7 +269,7 @@ SELECT FROM (some_values_1 JOIN ref_table USING (key)) JOIN table_2 USING (key) WHERE table_2.key = 1; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -291,7 +291,7 @@ DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 35_1 will be sent to localhost:xxxxx DEBUG: Subplan 35_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -321,7 +321,7 @@ DEBUG: Subplan 38_1 will be sent to localhost:xxxxx DEBUG: Subplan 39_1 will be sent to localhost:xxxxx DEBUG: Subplan 39_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -350,7 +350,7 @@ DEBUG: Subplan 42_1 will be sent to localhost:xxxxx DEBUG: Subplan 43_1 will be sent to localhost:xxxxx DEBUG: Subplan 43_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -374,7 +374,7 @@ DEBUG: Subplan 46_2 will be sent to localhost:xxxxx DEBUG: Subplan 46_3 will be sent to localhost:xxxxx DEBUG: Subplan 46_3 will be sent to localhost:xxxxx key | key | value ------+-----+------- +--------------------------------------------------------------------- (0 rows) -- join on intermediate results, so should only @@ -390,7 +390,7 @@ DEBUG: Plan 50 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 50_1 will be sent to localhost:xxxxx DEBUG: Subplan 50_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -407,7 +407,7 @@ DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 53_1 will be sent to localhost:xxxxx DEBUG: Subplan 53_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -429,7 +429,7 @@ DEBUG: Plan 56 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 56_1 will be sent to localhost:xxxxx DEBUG: Subplan 56_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -489,7 +489,7 @@ DEBUG: Subplan 59_5 will be sent to localhost:xxxxx DEBUG: Subplan 59_5 will be sent to localhost:xxxxx DEBUG: Subplan 59_6 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -545,7 +545,7 @@ DEBUG: Subplan 66_4 will be sent to localhost:xxxxx DEBUG: Subplan 66_5 will be sent to localhost:xxxxx DEBUG: Subplan 66_6 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -560,7 +560,7 @@ DEBUG: Plan 73 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Subplan 73_1 will be sent to localhost:xxxxx DEBUG: Subplan 73_2 will be sent to localhost:xxxxx key ------ +--------------------------------------------------------------------- (0 rows) -- the intermediate results should just hit a single worker @@ -590,7 +590,7 @@ DEBUG: Subplan 77_1 will be sent to localhost:xxxxx DEBUG: Subplan 77_2 will be sent to localhost:xxxxx DEBUG: Subplan 76_2 will be sent to localhost:xxxxx key ------ +--------------------------------------------------------------------- (0 rows) -- one final test with SET operations, where @@ -620,7 +620,7 @@ DEBUG: Subplan 82_1 will be sent to localhost:xxxxx DEBUG: Subplan 82_2 will be sent to localhost:xxxxx DEBUG: Subplan 81_2 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -639,7 +639,7 @@ DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 86_1 will be sent to localhost:xxxxx DEBUG: Subplan 86_1 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 14 (1 row) @@ -656,7 +656,7 @@ DEBUG: generating subplan 88_1 for subquery SELECT key, random() AS random FROM DEBUG: Plan 88 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('88_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) DEBUG: Subplan 88_1 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -683,7 +683,7 @@ DEBUG: Subplan 90_2 will be sent to localhost:xxxxx DEBUG: Subplan 92_1 will be sent to localhost:xxxxx DEBUG: Subplan 92_1 will be sent to localhost:xxxxx key | value ------+------- +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -713,7 +713,7 @@ DEBUG: Subplan 94_2 will be sent to localhost:xxxxx DEBUG: Subplan 96_1 will be sent to localhost:xxxxx DEBUG: Subplan 96_1 will be sent to localhost:xxxxx key | value ------+------- +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -738,7 +738,7 @@ DEBUG: Plan 98 query after replacing subqueries and CTEs: SELECT key, value FRO DEBUG: Subplan 98_1 will be sent to localhost:xxxxx DEBUG: Subplan 99_1 will be sent to localhost:xxxxx key | value ------+------- +--------------------------------------------------------------------- 6 | 6 (1 row) @@ -838,37 +838,37 @@ CREATE TABLE range_partitioned(range_column text, data int); SET client_min_messages TO DEBUG1; SELECT create_distributed_table('range_partitioned', 'range_column', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('range_partitioned'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1480013 (1 row) SELECT master_create_empty_shard('range_partitioned'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1480014 (1 row) SELECT master_create_empty_shard('range_partitioned'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1480015 (1 row) SELECT master_create_empty_shard('range_partitioned'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1480016 (1 row) SELECT master_create_empty_shard('range_partitioned'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1480017 (1 row) @@ -889,7 +889,7 @@ DEBUG: generating subplan 120_1 for subquery SELECT data FROM intermediate_resu DEBUG: Plan 120 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) 'A'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('120_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) DEBUG: Subplan 120_1 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -906,7 +906,7 @@ DEBUG: Plan 122 query after replacing subqueries and CTEs: SELECT count(*) AS c DEBUG: Subplan 122_1 will be sent to localhost:xxxxx DEBUG: Subplan 122_1 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -926,7 +926,7 @@ DEBUG: Plan 124 query after replacing subqueries and CTEs: SELECT count(*) AS c DEBUG: Subplan 124_1 will be sent to localhost:xxxxx DEBUG: Subplan 124_1 will be sent to localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/intermediate_results.out b/src/test/regress/expected/intermediate_results.out index 9d981808d..68d813c94 100644 --- a/src/test/regress/expected/intermediate_results.out +++ b/src/test/regress/expected/intermediate_results.out @@ -10,13 +10,13 @@ CREATE OR REPLACE FUNCTION pg_catalog.store_intermediate_result_on_node(nodename BEGIN; SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 5 (1 row) SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int); x | x2 ----+---- +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -28,7 +28,7 @@ COMMIT; -- in separate transactions, the result is no longer available SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 5 (1 row) @@ -38,7 +38,7 @@ BEGIN; CREATE TABLE interesting_squares (user_id text, interested_in text); SELECT create_distributed_table('interesting_squares', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -46,7 +46,7 @@ INSERT INTO interesting_squares VALUES ('jon', '2'), ('jon', '5'), ('jack', '3') -- put an intermediate result on all workers SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); broadcast_intermediate_result -------------------------------- +--------------------------------------------------------------------- 5 (1 row) @@ -56,7 +56,7 @@ FROM interesting_squares JOIN (SELECT * FROM read_intermediate_result('squares', WHERE user_id = 'jon' ORDER BY x; x | x2 ----+---- +--------------------------------------------------------------------- 2 | 4 5 | 25 (2 rows) @@ -66,7 +66,7 @@ BEGIN; -- put an intermediate result on all workers SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); broadcast_intermediate_result -------------------------------- +--------------------------------------------------------------------- 5 (1 row) @@ -76,7 +76,7 @@ FROM interesting_squares JOIN (SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int)) squares ON (x::text = interested_in) ORDER BY x; x | x2 ----+---- +--------------------------------------------------------------------- 2 | 4 3 | 9 5 | 25 @@ -111,7 +111,7 @@ SET client_min_messages TO DEFAULT; BEGIN; SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 5 (1 row) @@ -122,7 +122,7 @@ END; BEGIN; SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 5 (1 row) @@ -140,7 +140,7 @@ INSERT INTO stored_squares VALUES ('jon', '(5,25)'::intermediate_results.square_ BEGIN; SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 4 (1 row) @@ -150,13 +150,13 @@ COMMIT; BEGIN; SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 4 (1 row) SELECT * FROM read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type); s --------- +--------------------------------------------------------------------- (2,4) (3,9) (4,16) @@ -168,7 +168,7 @@ BEGIN; -- put an intermediate result in text format on all workers SELECT broadcast_intermediate_result('stored_squares', 'SELECT square, metadata FROM stored_squares'); broadcast_intermediate_result -------------------------------- +--------------------------------------------------------------------- 4 (1 row) @@ -179,7 +179,7 @@ SELECT * FROM interesting_squares JOIN ( ) squares ON ((s).x = interested_in) WHERE user_id = 'jon' ORDER BY 1,2; user_id | interested_in | s | m ----------+---------------+--------+-------------- +--------------------------------------------------------------------- jon | 2 | (2,4) | {"value": 2} jon | 5 | (5,25) | {"value": 5} (2 rows) @@ -191,7 +191,7 @@ SELECT * FROM interesting_squares JOIN ( ) squares ON ((s).x = interested_in) ORDER BY 1,2; user_id | interested_in | s | m ----------+---------------+--------+-------------- +--------------------------------------------------------------------- jack | 3 | (3,9) | {"value": 3} jon | 2 | (2,4) | {"value": 2} jon | 5 | (5,25) | {"value": 5} @@ -202,39 +202,39 @@ BEGIN; -- accurate row count estimates for primitive types SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,632) s'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 632 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int); QUERY PLAN ------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Function Scan on read_intermediate_result res (cost=0.00..4.55 rows=632 width=8) (1 row) -- less accurate results for variable types SELECT create_intermediate_result('hellos', $$SELECT s, 'hello-'||s FROM generate_series(1,63) s$$); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 63 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('hellos', 'binary') AS res (x int, y text); QUERY PLAN ------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Function Scan on read_intermediate_result res (cost=0.00..0.32 rows=30 width=36) (1 row) -- not very accurate results for text encoding SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 4 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type); QUERY PLAN ----------------------------------------------------------------------------------- +--------------------------------------------------------------------- Function Scan on read_intermediate_result res (cost=0.00..0.01 rows=1 width=32) (1 row) @@ -246,7 +246,7 @@ TO PROGRAM WITH (FORMAT text); SELECT * FROM squares ORDER BY x; x | x2 ----+---- +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -272,19 +272,19 @@ SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_seri create_intermediate_result('squares_2', 'SELECT s, s*s FROM generate_series(4,6) s'), create_intermediate_result('squares_3', 'SELECT s, s*s FROM generate_series(7,10) s'); create_intermediate_result | create_intermediate_result | create_intermediate_result -----------------------------+----------------------------+---------------------------- +--------------------------------------------------------------------- 3 | 3 | 4 (1 row) SELECT count(*) FROM read_intermediate_results(ARRAY[]::text[], 'binary') AS res (x int, x2 int); count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS res (x int, x2 int); x | x2 ----+---- +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -292,7 +292,7 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2', 'squares_3']::text[], 'binary') AS res (x int, x2 int); x | x2 -----+----- +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -309,7 +309,7 @@ COMMIT; -- in separate transactions, the result is no longer available SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,5) s'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 5 (1 row) @@ -319,7 +319,7 @@ ERROR: result "squares_1" does not exist BEGIN; SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,3) s'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 3 (1 row) @@ -336,13 +336,13 @@ ROLLBACK TO SAVEPOINT s1; -- after rollbacks we should be able to run vail read_intermediate_results still. SELECT count(*) FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS res (x int, x2 int); count -------- +--------------------------------------------------------------------- 3 (1 row) SELECT count(*) FROM read_intermediate_results(ARRAY[]::text[], 'binary') AS res (x int, x2 int); count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -357,7 +357,7 @@ SELECT broadcast_intermediate_result('stored_squares_1', broadcast_intermediate_result('stored_squares_2', 'SELECT s, s*s, ROW(2::text, 3) FROM generate_series(4,6) s'); broadcast_intermediate_result | broadcast_intermediate_result --------------------------------+------------------------------- +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -368,7 +368,7 @@ SELECT * FROM interesting_squares JOIN ( ) squares ON (squares.x::text = interested_in) WHERE user_id = 'jon' ORDER BY 1,2; user_id | interested_in | x | x2 | z ----------+---------------+---+----+------- +--------------------------------------------------------------------- jon | 2 | 2 | 4 | (1,2) jon | 5 | 5 | 25 | (2,3) (2 rows) @@ -380,13 +380,13 @@ BEGIN; SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,632) s'), create_intermediate_result('squares_2', 'SELECT s, s*s FROM generate_series(633,1024) s'); create_intermediate_result | create_intermediate_result -----------------------------+---------------------------- +--------------------------------------------------------------------- 632 | 392 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2'], 'binary') AS res (x int, x2 int); QUERY PLAN -------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Function Scan on read_intermediate_results res (cost=0.00..7.37 rows=1024 width=8) (1 row) @@ -394,26 +394,26 @@ EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 's SELECT create_intermediate_result('hellos_1', $$SELECT s, 'hello-'||s FROM generate_series(1,63) s$$), create_intermediate_result('hellos_2', $$SELECT s, 'hello-'||s FROM generate_series(64,129) s$$); create_intermediate_result | create_intermediate_result -----------------------------+---------------------------- +--------------------------------------------------------------------- 63 | 66 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['hellos_1', 'hellos_2'], 'binary') AS res (x int, y text); QUERY PLAN ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Function Scan on read_intermediate_results res (cost=0.00..0.66 rows=62 width=36) (1 row) -- not very accurate results for text encoding SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 4 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['stored_squares'], 'text') AS res (s intermediate_results.square_type); QUERY PLAN ------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Function Scan on read_intermediate_results res (cost=0.00..0.01 rows=1 width=32) (1 row) @@ -425,19 +425,19 @@ END; BEGIN; SELECT broadcast_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1, 5) s'); broadcast_intermediate_result -------------------------------- +--------------------------------------------------------------------- 5 (1 row) SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_2_port); fetch_intermediate_results ----------------------------- +--------------------------------------------------------------------- 111 (1 row) SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 int); x | x2 ----+---- +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -447,13 +447,13 @@ SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_1_port); fetch_intermediate_results ----------------------------- +--------------------------------------------------------------------- 111 (1 row) SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 int); x | x2 ----+---- +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -467,14 +467,14 @@ BEGIN; SELECT store_intermediate_result_on_node('localhost', :worker_1_port, 'squares_1', 'SELECT s, s*s FROM generate_series(1, 2) s'); store_intermediate_result_on_node ------------------------------------ +--------------------------------------------------------------------- (1 row) SELECT store_intermediate_result_on_node('localhost', :worker_1_port, 'squares_2', 'SELECT s, s*s FROM generate_series(3, 4) s'); store_intermediate_result_on_node ------------------------------------ +--------------------------------------------------------------------- (1 row) @@ -495,13 +495,13 @@ ROLLBACK TO SAVEPOINT s1; -- fetch from worker 1 should succeed SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port); fetch_intermediate_results ----------------------------- +--------------------------------------------------------------------- 114 (1 row) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int); x | x2 ----+---- +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -511,13 +511,13 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], -- fetching again should succeed SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port); fetch_intermediate_results ----------------------------- +--------------------------------------------------------------------- 114 (1 row) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int); x | x2 ----+---- +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -528,7 +528,7 @@ ROLLBACK TO SAVEPOINT s1; -- empty result id list should succeed SELECT * FROM fetch_intermediate_results(ARRAY[]::text[], 'localhost', :worker_1_port); fetch_intermediate_results ----------------------------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/limit_intermediate_size.out b/src/test/regress/expected/limit_intermediate_size.out index e1471536e..beaebf5cb 100644 --- a/src/test/regress/expected/limit_intermediate_size.out +++ b/src/test/regress/expected/limit_intermediate_size.out @@ -59,7 +59,7 @@ UNION (select count(*) as c from cte5) ) as foo; sum ------ +--------------------------------------------------------------------- 91 (1 row) @@ -179,7 +179,7 @@ cte4 AS ( SELECT * FROM cte UNION ALL SELECT * FROM cte4 ORDER BY 1,2,3,4,5 LIMIT 5; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | @@ -210,7 +210,7 @@ ORDER BY 1,2 LIMIT 10; user_id | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 0 1 | 0 1 | 0 @@ -233,7 +233,7 @@ cte2 AS ( ) SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10; user_id | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 0 1 | 0 1 | 0 @@ -253,7 +253,7 @@ WITH cte AS ) SELECT * FROM cte ORDER BY 1,2,3,4,5 LIMIT 10; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | @@ -283,7 +283,7 @@ WITH cte AS ( ) SELECT * FROM cte ORDER BY 1,2,3,4,5 LIMIT 10; user_id | time | event_type | value_2 | value_3 ----------+---------------------------------+------------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 0 | 2 | 0 1 | Wed Nov 22 22:51:43.132261 2017 | 0 | 5 | 1 1 | Wed Nov 22 22:51:43.132261 2017 | 1 | 1 | 1 diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index 34fdf9080..73638b8d0 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -7,21 +7,21 @@ SET citus.next_shard_id TO 1470000; CREATE TABLE reference_table (key int PRIMARY KEY); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE distributed_table (key int PRIMARY KEY , value text, age bigint CHECK (age > 10), FOREIGN KEY (key) REFERENCES reference_table(key) ON DELETE CASCADE); SELECT create_distributed_table('distributed_table','key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE second_distributed_table (key int PRIMARY KEY , value text, FOREIGN KEY (key) REFERENCES distributed_table(key) ON DELETE CASCADE); SELECT create_distributed_table('second_distributed_table','key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -40,7 +40,7 @@ CREATE TABLE collections_list ( ) PARTITION BY LIST (collection_id ); SELECT create_distributed_table('collections_list', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -80,38 +80,38 @@ $$ LANGUAGE plpgsql; -- we'll use these values in the tests SELECT shard_of_distribution_column_is_local(1); shard_of_distribution_column_is_local ---------------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT shard_of_distribution_column_is_local(6); shard_of_distribution_column_is_local ---------------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT shard_of_distribution_column_is_local(500); shard_of_distribution_column_is_local ---------------------------------------- +--------------------------------------------------------------------- t (1 row) SELECT shard_of_distribution_column_is_local(701); shard_of_distribution_column_is_local ---------------------------------------- +--------------------------------------------------------------------- t (1 row) -- distribution key values of 11 and 12 are REMOTE to shards SELECT shard_of_distribution_column_is_local(11); shard_of_distribution_column_is_local ---------------------------------------- +--------------------------------------------------------------------- f (1 row) SELECT shard_of_distribution_column_is_local(12); shard_of_distribution_column_is_local ---------------------------------------- +--------------------------------------------------------------------- f (1 row) @@ -123,7 +123,7 @@ SET citus.log_local_commands TO ON; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -132,20 +132,20 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e -- favors parallel execution even if everyting is local to node SELECT count(*) FROM distributed_table WHERE key IN (1,6); count -------- +--------------------------------------------------------------------- 1 (1 row) -- queries that hit any remote shards should NOT use local execution SELECT count(*) FROM distributed_table WHERE key IN (1,11); count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM distributed_table; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -180,7 +180,7 @@ ON CONFLICT(key) DO UPDATE SET value = '22' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470001 distributed_table, local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE (((distributed_table.key OPERATOR(pg_catalog.=) 1) AND (distributed_table.key OPERATOR(pg_catalog.=) second_distributed_table.key)) AND ((worker_hash(distributed_table.key) OPERATOR(pg_catalog.>=) '-2147483648'::integer) AND (worker_hash(distributed_table.key) OPERATOR(pg_catalog.<=) '-1073741825'::integer))) ON CONFLICT(key) DO UPDATE SET value = '22'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 22 | 20 (1 row) @@ -195,7 +195,7 @@ WHERE ON CONFLICT(key) DO UPDATE SET value = '22' RETURNING *; key | value | age ------+-------+----- +--------------------------------------------------------------------- (0 rows) -- INSERT..SELECT via coordinator consists of two steps, select + COPY @@ -209,7 +209,7 @@ INSERT INTO distributed_table SELECT * FROM distributed_table ON CONFLICT DO NOT -- though going through distributed execution EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; QUERY PLAN ------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All @@ -222,7 +222,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Task Count: 1 Tasks Shown: All @@ -235,7 +235,7 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distribute EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; QUERY PLAN ------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All @@ -249,7 +249,7 @@ EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Task Count: 1 Tasks Shown: All @@ -265,13 +265,13 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) DELETE FROM distributed_ta SELECT * FROM distributed_table WHERE key = 1 AND age = 20 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((key OPERATOR(pg_catalog.=) 1) AND (age OPERATOR(pg_catalog.=) 20)) ORDER BY key, value, age key | value | age ------+-------+----- +--------------------------------------------------------------------- (0 rows) SELECT * FROM second_distributed_table WHERE key = 1 ORDER BY 1,2; LOG: executing the command locally: SELECT key, value FROM local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value key | value ------+------- +--------------------------------------------------------------------- (0 rows) -- Put rows back for other tests @@ -295,14 +295,14 @@ BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 29 | 20 (1 row) SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 29 | 20 (1 row) @@ -311,7 +311,7 @@ ROLLBACK; SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 22 | 20 (1 row) @@ -320,7 +320,7 @@ BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 29 | 20 (1 row) @@ -332,7 +332,7 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.distribut LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE true LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.second_distributed_table_1470007 second_distributed_table WHERE true count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -341,19 +341,19 @@ ROLLBACK; SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 22 | 20 (1 row) SELECT count(*) FROM second_distributed_table; count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT * FROM second_distributed_table; key | value ------+------- +--------------------------------------------------------------------- 1 | 1 6 | '6' (2 rows) @@ -365,7 +365,7 @@ BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '23' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '23'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 23 | 20 (1 row) @@ -374,7 +374,7 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.distribut SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 23 | 20 (1 row) @@ -384,7 +384,7 @@ LOG: executing the command locally: SELECT key, value, age FROM local_shard_exe LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 23 | 20 (1 row) @@ -398,7 +398,7 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.distribut LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) key | value | age ------+-------+----- +--------------------------------------------------------------------- (0 rows) COMMIT; @@ -406,7 +406,7 @@ COMMIT; SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age key | value | age ------+-------+----- +--------------------------------------------------------------------- (0 rows) -- if we start with a distributed execution, we should keep @@ -417,7 +417,7 @@ BEGIN; -- locally, it is not going to be executed locally SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; key | value | age ------+-------+----- +--------------------------------------------------------------------- (0 rows) -- but we can still execute parallel queries, even if @@ -427,7 +427,7 @@ NOTICE: truncate cascades to table "second_distributed_table" -- TRUNCATE cascaded into second_distributed_table SELECT count(*) FROM second_distributed_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -442,7 +442,7 @@ BEGIN; -- done distributed execution SELECT * FROM distributed_table WHERE key = 500 ORDER BY 1,2,3; key | value | age ------+-------+----- +--------------------------------------------------------------------- 500 | 500 | 25 (1 row) @@ -452,7 +452,7 @@ NOTICE: truncate cascades to table "second_distributed_table" -- ensure that TRUNCATE made it SELECT * FROM distributed_table WHERE key = 500 ORDER BY 1,2,3; key | value | age ------+-------+----- +--------------------------------------------------------------------- (0 rows) ROLLBACK; @@ -469,14 +469,14 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.reference SELECT count(*) FROM distributed_table WHERE key = 701; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 701) count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM second_distributed_table WHERE key = 701; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE (key OPERATOR(pg_catalog.=) 701) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -485,7 +485,7 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.>) 700) LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.>) 700) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -499,21 +499,21 @@ BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM distributed_table WHERE key = 6; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 6) count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM distributed_table WHERE key = 500; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 500) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -523,7 +523,7 @@ BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -538,7 +538,7 @@ BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -553,7 +553,7 @@ BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -567,7 +567,7 @@ BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -579,7 +579,7 @@ ROLLBACK; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 11 | 21 (1 row) @@ -594,7 +594,7 @@ ROLLBACK; BEGIN; INSERT INTO distributed_table VALUES (11, '111',29) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; key | value | age ------+-------+----- +--------------------------------------------------------------------- 11 | 29 | 121 (1 row) @@ -607,7 +607,7 @@ ROLLBACK; BEGIN; INSERT INTO distributed_table VALUES (11, '111',29) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; key | value | age ------+-------+----- +--------------------------------------------------------------------- 11 | 29 | 121 (1 row) @@ -688,7 +688,7 @@ SELECT * FROM local_insert, distributed_local_mixed; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age LOG: executing the command locally: SELECT key FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) ANY (SELECT local_insert.key FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.age FROM read_intermediate_result('81_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint)) local_insert)) key | value | age | key ------+-------+-----+----- +--------------------------------------------------------------------- 1 | 11 | 21 | 1 (1 row) @@ -698,7 +698,7 @@ WITH distributed_local_mixed AS (SELECT * FROM distributed_table), local_insert AS (INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *) SELECT * FROM local_insert, distributed_local_mixed ORDER BY 1,2,3,4,5; key | value | age | key | value | age ------+-------+-----+-----+-------+----- +--------------------------------------------------------------------- 1 | 29 | 21 | 1 | 11 | 21 (1 row) @@ -712,7 +712,7 @@ WHERE distributed_table.key = all_data.key AND distributed_table.key = 1; LOG: executing the command locally: WITH all_data AS (SELECT distributed_table_1.key, distributed_table_1.value, distributed_table_1.age FROM local_shard_execution.distributed_table_1470001 distributed_table_1 WHERE (distributed_table_1.key OPERATOR(pg_catalog.=) 1)) SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table, all_data WHERE ((distributed_table.key OPERATOR(pg_catalog.=) all_data.key) AND (distributed_table.key OPERATOR(pg_catalog.=) 1)) count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -731,7 +731,7 @@ WHERE ORDER BY 1 DESC; key ------ +--------------------------------------------------------------------- 1 (1 row) @@ -749,7 +749,7 @@ WHERE distributed_table.key = all_data.key AND distributed_table.key = 1 AND EXISTS (SELECT * FROM all_data); count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -764,7 +764,7 @@ FROM WHERE distributed_table.key = all_data.age AND distributed_table.key = 1; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -774,7 +774,7 @@ TRUNCATE reference_table, distributed_table, second_distributed_table; INSERT INTO reference_table VALUES (1),(2),(3),(4),(5),(6) RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 AS citus_table_alias (key) VALUES (1), (2), (3), (4), (5), (6) RETURNING citus_table_alias.key key ------ +--------------------------------------------------------------------- 1 2 3 @@ -787,7 +787,7 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.reference INSERT INTO distributed_table VALUES (1, '11',21), (5,'55',22) ON CONFLICT(key) DO UPDATE SET value = (EXCLUDED.value::int + 1)::text RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1,'11'::text,'21'::bigint), (5,'55'::text,'22'::bigint) ON CONFLICT(key) DO UPDATE SET value = (((excluded.value)::integer OPERATOR(pg_catalog.+) 1))::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 11 | 21 5 | 55 | 22 (2 rows) @@ -797,7 +797,7 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.distribut -- because the command is a multi-shard query INSERT INTO distributed_table VALUES (1, '11',21), (2,'22',22), (3,'33',33), (4,'44',44),(5,'55',55) ON CONFLICT(key) DO UPDATE SET value = (EXCLUDED.value::int + 1)::text RETURNING *; key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 12 | 21 2 | 22 | 22 3 | 33 | 33 @@ -813,42 +813,42 @@ BEGIN; EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -856,42 +856,42 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e EXECUTE local_prepare_param(1); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(5); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 5) count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(6); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 6) count -------- +--------------------------------------------------------------------- 0 (1 row) EXECUTE local_prepare_param(1); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(5); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 5) count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(6); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 6) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -900,7 +900,7 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.<>) 1) LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.<>) 1) count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -912,7 +912,7 @@ BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '100' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '100'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 100 | 21 (1 row) @@ -926,7 +926,7 @@ ROLLBACK; -- we've rollbacked everything SELECT count(*) FROM distributed_table WHERE value = '200'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -934,14 +934,14 @@ SELECT count(*) FROM distributed_table WHERE value = '200'; INSERT INTO reference_table VALUES (500) RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 (key) VALUES (500) RETURNING key key ------ +--------------------------------------------------------------------- 500 (1 row) DELETE FROM reference_table WHERE key = 500 RETURNING *; LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) 500) RETURNING key key ------ +--------------------------------------------------------------------- 500 (1 row) @@ -951,7 +951,7 @@ BEGIN; DELETE FROM distributed_table; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '100' RETURNING *; key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 11 | 21 (1 row) @@ -962,7 +962,7 @@ BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '100' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '100'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age key | value | age ------+-------+----- +--------------------------------------------------------------------- 1 | 100 | 21 (1 row) @@ -989,7 +989,7 @@ BEGIN; DELETE FROM reference_table WHERE key = 500 RETURNING *; LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) 500) RETURNING key key ------ +--------------------------------------------------------------------- 500 (1 row) @@ -1013,7 +1013,7 @@ BEGIN; SET LOCAL client_min_messages TO INFO; SELECT count(*) FROM distributed_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -1027,7 +1027,7 @@ SELECT * FROM distributed_table WHERE key = 500; SELECT * FROM v_local_query_execution; LOG: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (distributed_table.key OPERATOR(pg_catalog.=) 500)) v_local_query_execution key | value | age ------+-------+----- +--------------------------------------------------------------------- 500 | 500 | 25 (1 row) @@ -1038,7 +1038,7 @@ SELECT * FROM distributed_table; SELECT * FROM v_local_query_execution_2 WHERE key = 500; LOG: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470003 distributed_table) v_local_query_execution_2 WHERE (key OPERATOR(pg_catalog.=) 500) key | value | age ------+-------+----- +--------------------------------------------------------------------- 500 | 500 | 25 (1 row) @@ -1048,7 +1048,7 @@ BEGIN; SAVEPOINT my_savepoint; SELECT count(*) FROM distributed_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -1066,7 +1066,7 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.distribut LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE true LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE true count -------- +--------------------------------------------------------------------- 100 (1 row) @@ -1078,7 +1078,7 @@ COMMIT; INSERT INTO collections_list (collection_id) VALUES (0) RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470011 (key, ser, collection_id) VALUES ('3940649673949185'::bigint, '3940649673949185'::bigint, 0) RETURNING key, ser, ts, collection_id, value key | ser | ts | collection_id | value -------------------+------------------+----+---------------+------- +--------------------------------------------------------------------- 3940649673949185 | 3940649673949185 | | 0 | (1 row) @@ -1088,7 +1088,7 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.collectio SELECT count(*) FROM collections_list_0 WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.collections_list_0_1470013 collections_list_0 WHERE (key OPERATOR(pg_catalog.=) 1) count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -1096,7 +1096,7 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.collections_list_1470009 collections_list WHERE true LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.collections_list_1470011 collections_list WHERE true count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -1104,7 +1104,7 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e LOG: executing the command locally: SELECT key, ser, ts, collection_id, value FROM local_shard_execution.collections_list_1470009 collections_list WHERE true LOG: executing the command locally: SELECT key, ser, ts, collection_id, value FROM local_shard_execution.collections_list_1470011 collections_list WHERE true key | ser | ts | collection_id | value -------------------+------------------+----+---------------+------- +--------------------------------------------------------------------- 1 | 3940649673949186 | | 0 | 3940649673949185 | 3940649673949185 | | 0 | (2 rows) @@ -1116,7 +1116,7 @@ COMMIT; WITH distributed_local_mixed AS (INSERT INTO reference_table VALUES (1000) RETURNING *) SELECT * FROM distributed_local_mixed; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 (key) VALUES (1000) RETURNING key key ------- +--------------------------------------------------------------------- 1000 (1 row) @@ -1136,7 +1136,7 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.distribut LOG: executing the command locally: DELETE FROM local_shard_execution.distributed_table_1470001 distributed_table RETURNING key LOG: executing the command locally: DELETE FROM local_shard_execution.distributed_table_1470003 distributed_table RETURNING key key ------ +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1156,7 +1156,7 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.reference DELETE FROM reference_table RETURNING key; LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table RETURNING key key ------ +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1175,7 +1175,7 @@ CREATE TABLE event_responses ( ); SELECT create_distributed_table('event_responses', 'event_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1189,7 +1189,7 @@ END; $fn$; SELECT create_distributed_function('register_for_event(int,int,invite_resp)', 'p_event_id', 'event_responses'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/materialized_view.out b/src/test/regress/expected/materialized_view.out index 4be1c304e..f0d36450c 100644 --- a/src/test/regress/expected/materialized_view.out +++ b/src/test/regress/expected/materialized_view.out @@ -1,6 +1,6 @@ ---- +--------------------------------------------------------------------- --- materialized_view ---- +--------------------------------------------------------------------- -- This file contains test cases for materialized view support. -- materialized views work -- insert into... select works with views @@ -10,14 +10,14 @@ CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_sh CREATE TABLE temp_lineitem(LIKE lineitem_hash_part); SELECT create_distributed_table('temp_lineitem', 'l_orderkey', 'hash', 'lineitem_hash_part'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems; SELECT count(*) FROM temp_lineitem; count -------- +--------------------------------------------------------------------- 1706 (1 row) @@ -25,7 +25,7 @@ SELECT count(*) FROM temp_lineitem; INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems WHERE l_shipmode = 'MAIL'; SELECT count(*) FROM temp_lineitem; count -------- +--------------------------------------------------------------------- 1706 (1 row) @@ -34,7 +34,7 @@ CREATE MATERIALIZED VIEW mode_counts AS SELECT l_shipmode, count(*) FROM temp_lineitem GROUP BY l_shipmode; SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; l_shipmode | count -------------+------- +--------------------------------------------------------------------- AIR | 1706 (1 row) @@ -45,7 +45,7 @@ ERROR: relation mode_counts is not distributed INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems; SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; l_shipmode | count -------------+------- +--------------------------------------------------------------------- AIR | 1706 (1 row) @@ -53,7 +53,7 @@ SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; REFRESH MATERIALIZED VIEW mode_counts; SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; l_shipmode | count -------------+------- +--------------------------------------------------------------------- AIR | 3412 (1 row) @@ -67,7 +67,7 @@ WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey AND lineitem_has REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -80,7 +80,7 @@ WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -94,7 +94,7 @@ WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -111,7 +111,7 @@ FROM orders_hash_part JOIN ( REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; count -------- +--------------------------------------------------------------------- 2985 (1 row) @@ -124,7 +124,7 @@ WHERE lineitem_hash_part.l_orderkey=orders_reference.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -139,21 +139,21 @@ WHERE lineitem_local_to_hash_part.l_orderkey=orders_local_to_hash_part.o_orderke SELECT create_distributed_table('lineitem_local_to_hash_part', 'l_orderkey'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('orders_local_to_hash_part', 'o_orderkey'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -168,7 +168,7 @@ WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view WITH DATA; SELECT count(*) FROM materialized_view; count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -193,7 +193,7 @@ CREATE UNIQUE INDEX materialized_view_index ON materialized_view (o_orderdate); REFRESH MATERIALIZED VIEW CONCURRENTLY materialized_view; SELECT count(*) FROM materialized_view; count -------- +--------------------------------------------------------------------- 1699 (1 row) @@ -206,13 +206,13 @@ CREATE TABLE large (id int, tenant_id int); CREATE TABLE small (id int, tenant_id int); SELECT create_distributed_table('large','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('small','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -227,7 +227,7 @@ UPDATE large SET id=20 FROM small_view WHERE small_view.id=large.id; ERROR: materialized views in modify queries are not supported SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 1 | 2 2 | 3 5 | 4 @@ -239,7 +239,7 @@ UPDATE large SET id=28 FROM small_view WHERE small_view.id=large.id and small_vi ERROR: materialized views in modify queries are not supported SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 1 | 2 2 | 3 5 | 4 @@ -250,7 +250,7 @@ SELECT * FROM large ORDER BY 1, 2; DELETE FROM large WHERE tenant_id in (SELECT tenant_id FROM small_view); SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 6 | 5 (1 row) @@ -268,13 +268,13 @@ CREATE TABLE large_partitioned_p2 PARTITION OF large_partitioned FOR VALUES FROM CREATE TABLE large_partitioned_p3 PARTITION OF large_partitioned FOR VALUES FROM (20) TO (100); SELECT create_distributed_table('large_partitioned','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('small','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -291,7 +291,7 @@ UPDATE large_partitioned SET id=20 FROM small_view WHERE small_view.id=large_par ERROR: materialized views in modify queries are not supported SELECT * FROM large_partitioned ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 1 | 2 2 | 3 5 | 4 @@ -305,7 +305,7 @@ SELECT * FROM large_partitioned ORDER BY 1, 2; DELETE FROM large_partitioned WHERE id in (SELECT id FROM small_view); SELECT * FROM large_partitioned ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 2 | 3 5 | 4 26 | 32 @@ -322,13 +322,13 @@ DELETE FROM large_partitioned WHERE id in (SELECT * FROM all_small_view_ids); -- make sure that materialized view in a CTE/subquery can be joined with a distributed table WITH cte AS (SELECT *, random() FROM small_view) SELECT count(*) FROM cte JOIN small USING(id); count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM (SELECT *, random() FROM small_view) as subquery JOIN small USING(id); count -------- +--------------------------------------------------------------------- 4 (1 row) diff --git a/src/test/regress/expected/multi_703_upgrade.out b/src/test/regress/expected/multi_703_upgrade.out index 282458ad5..660f0bce2 100644 --- a/src/test/regress/expected/multi_703_upgrade.out +++ b/src/test/regress/expected/multi_703_upgrade.out @@ -16,7 +16,7 @@ INSERT INTO pg_dist_node (nodename, nodeport, groupid) ALTER EXTENSION citus UPDATE TO '7.0-3'; SELECT * FROM pg_dist_placement; placementid | shardid | shardstate | shardlength | groupid --------------+---------+------------+-------------+--------- +--------------------------------------------------------------------- 1 | 1 | 1 | 0 | 1 (1 row) diff --git a/src/test/regress/expected/multi_agg_approximate_distinct.out b/src/test/regress/expected/multi_agg_approximate_distinct.out index e17346e74..987d119f8 100644 --- a/src/test/regress/expected/multi_agg_approximate_distinct.out +++ b/src/test/regress/expected/multi_agg_approximate_distinct.out @@ -12,7 +12,7 @@ WHERE name = 'hll' -- Try to execute count(distinct) when approximate distincts aren't enabled SELECT count(distinct l_orderkey) FROM lineitem; count -------- +--------------------------------------------------------------------- 2985 (1 row) @@ -20,58 +20,58 @@ SELECT count(distinct l_orderkey) FROM lineitem; SET citus.count_distinct_error_rate = 0.1; SELECT count(distinct l_orderkey) FROM lineitem; count -------- +--------------------------------------------------------------------- 2612 (1 row) SET citus.count_distinct_error_rate = 0.01; SELECT count(distinct l_orderkey) FROM lineitem; count -------- +--------------------------------------------------------------------- 2967 (1 row) -- Check approximate count(distinct) for different data types SELECT count(distinct l_partkey) FROM lineitem; count -------- +--------------------------------------------------------------------- 11654 (1 row) SELECT count(distinct l_extendedprice) FROM lineitem; count -------- +--------------------------------------------------------------------- 11691 (1 row) SELECT count(distinct l_shipdate) FROM lineitem; count -------- +--------------------------------------------------------------------- 2483 (1 row) SELECT count(distinct l_comment) FROM lineitem; count -------- +--------------------------------------------------------------------- 11788 (1 row) -- Check that we can execute approximate count(distinct) on complex expressions SELECT count(distinct (l_orderkey * 2 + 1)) FROM lineitem; count -------- +--------------------------------------------------------------------- 2980 (1 row) SELECT count(distinct extract(month from l_shipdate)) AS my_month FROM lineitem; my_month ----------- +--------------------------------------------------------------------- 12 (1 row) SELECT count(distinct l_partkey) / count(distinct l_orderkey) FROM lineitem; ?column? ----------- +--------------------------------------------------------------------- 3 (1 row) @@ -80,14 +80,14 @@ SELECT count(distinct l_partkey) / count(distinct l_orderkey) FROM lineitem; SELECT count(distinct l_orderkey) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; count -------- +--------------------------------------------------------------------- 2355 (1 row) SELECT count(DISTINCT l_orderkey) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; count -------- +--------------------------------------------------------------------- 835 (1 row) @@ -97,7 +97,7 @@ SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM linei ORDER BY distinct_order_count ASC, l_quantity ASC LIMIT 10; distinct_order_count | l_quantity -----------------------+------------ +--------------------------------------------------------------------- 210 | 29.00 216 | 13.00 217 | 16.00 @@ -123,7 +123,7 @@ CREATE TABLE test_count_distinct_schema.nation_hash( ); SELECT create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -132,7 +132,7 @@ SET search_path TO public; SET citus.count_distinct_error_rate TO 0.01; SELECT COUNT (DISTINCT n_regionkey) FROM test_count_distinct_schema.nation_hash; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -140,7 +140,7 @@ SELECT COUNT (DISTINCT n_regionkey) FROM test_count_distinct_schema.nation_hash; SET search_path TO test_count_distinct_schema; SELECT COUNT (DISTINCT n_regionkey) FROM nation_hash; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -161,7 +161,7 @@ SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as t ORDER BY total LIMIT 10; l_returnflag | count_distinct | total ---------------+----------------+------- +--------------------------------------------------------------------- R | 1103 | 2901 A | 1108 | 2944 N | 1265 | 6155 @@ -177,7 +177,7 @@ SELECT ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | count | count | count -------------+-------+-------+------- +--------------------------------------------------------------------- 12005 | 4 | 4 | 4 5409 | 4 | 4 | 4 4964 | 4 | 4 | 4 @@ -194,7 +194,7 @@ SELECT SET citus.count_distinct_error_rate = 0.0; SELECT count(distinct l_orderkey) FROM lineitem; count -------- +--------------------------------------------------------------------- 2985 (1 row) diff --git a/src/test/regress/expected/multi_agg_approximate_distinct_0.out b/src/test/regress/expected/multi_agg_approximate_distinct_0.out index b714968d8..c6a39db5f 100644 --- a/src/test/regress/expected/multi_agg_approximate_distinct_0.out +++ b/src/test/regress/expected/multi_agg_approximate_distinct_0.out @@ -10,14 +10,14 @@ WHERE name = 'hll' \gset :create_cmd; hll_present -------------- +--------------------------------------------------------------------- f (1 row) -- Try to execute count(distinct) when approximate distincts aren't enabled SELECT count(distinct l_orderkey) FROM lineitem; count -------- +--------------------------------------------------------------------- 2985 (1 row) @@ -83,7 +83,7 @@ CREATE TABLE test_count_distinct_schema.nation_hash( ); SELECT create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -131,7 +131,7 @@ HINT: You need to have the hll extension loaded. SET citus.count_distinct_error_rate = 0.0; SELECT count(distinct l_orderkey) FROM lineitem; count -------- +--------------------------------------------------------------------- 2985 (1 row) diff --git a/src/test/regress/expected/multi_alter_table_add_constraints.out b/src/test/regress/expected/multi_alter_table_add_constraints.out index 0b307903e..c1eb436c1 100644 --- a/src/test/regress/expected/multi_alter_table_add_constraints.out +++ b/src/test/regress/expected/multi_alter_table_add_constraints.out @@ -14,7 +14,7 @@ CREATE TABLE products ( ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -48,7 +48,7 @@ CREATE TABLE products_ref ( ); SELECT create_reference_table('products_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -72,7 +72,7 @@ CREATE TABLE products_append ( ); SELECT create_distributed_table('products_append', 'product_no', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -98,7 +98,7 @@ DROP TABLE products_append; CREATE TABLE unique_test_table(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -136,7 +136,7 @@ DROP TABLE unique_test_table; CREATE TABLE unique_test_table_ref(id int, name varchar(20)); SELECT create_reference_table('unique_test_table_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -159,7 +159,7 @@ DROP TABLE unique_test_table_ref; CREATE TABLE unique_test_table_append(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table_append', 'id', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -190,7 +190,7 @@ CREATE TABLE products ( ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -218,7 +218,7 @@ CREATE TABLE products_ref ( ); SELECT create_reference_table('products_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -246,7 +246,7 @@ CREATE TABLE products_append ( ); SELECT create_distributed_table('products_append', 'product_no', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -266,7 +266,7 @@ CREATE TABLE products ( ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -295,7 +295,7 @@ CREATE TABLE products_ref ( ); SELECT create_reference_table('products_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -319,7 +319,7 @@ CREATE TABLE products_append ( ); SELECT create_distributed_table('products_append', 'product_no','append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -349,7 +349,7 @@ CREATE TABLE products ( ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -370,7 +370,7 @@ CREATE TABLE products_ref ( ); SELECT create_reference_table('products_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -390,7 +390,7 @@ CREATE TABLE products_append ( ); SELECT create_distributed_table('products_append', 'product_no', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -406,7 +406,7 @@ CREATE TABLE products ( ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -439,7 +439,7 @@ CREATE TABLE products ( ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -459,13 +459,13 @@ ROLLBACK; -- There should be no constraint on master and worker(s) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass; Constraint | Definition -------------+------------ +--------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass; Constraint | Definition -------------+------------ +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -479,13 +479,13 @@ ROLLBACK; -- There should be no constraint on master and worker(s) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass; Constraint | Definition -------------+------------ +--------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass; Constraint | Definition -------------+------------ +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -498,7 +498,7 @@ CREATE UNIQUE INDEX CONCURRENTLY alter_pk_idx ON sc1.alter_add_prim_key(x); ALTER TABLE sc1.alter_add_prim_key ADD CONSTRAINT alter_pk_idx PRIMARY KEY USING INDEX alter_pk_idx; SELECT create_distributed_table('sc1.alter_add_prim_key', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -517,7 +517,7 @@ SELECT (run_command_on_workers($$ ORDER BY 1,2,3,4; nodename | nodeport | success | result ------------+----------+---------+---------------------- +--------------------------------------------------------------------- localhost | 57637 | t | alter_pk_idx_1450234 localhost | 57638 | t | alter_pk_idx_1450234 (2 rows) @@ -527,7 +527,7 @@ CREATE TABLE sc2.alter_add_prim_key(x int, y int); SET search_path TO 'sc2'; SELECT create_distributed_table('alter_add_prim_key', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -548,7 +548,7 @@ SELECT (run_command_on_workers($$ ORDER BY 1,2,3,4; nodename | nodeport | success | result ------------+----------+---------+---------------------- +--------------------------------------------------------------------- localhost | 57637 | t | alter_pk_idx_1450236 localhost | 57638 | t | alter_pk_idx_1450236 (2 rows) @@ -561,7 +561,7 @@ SET search_path TO 'sc3'; SELECT create_distributed_table('alter_add_prim_key', 'x'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -583,7 +583,7 @@ SELECT (run_command_on_workers($$ ORDER BY 1,2,3,4; nodename | nodeport | success | result ------------+----------+---------+---------------------- +--------------------------------------------------------------------- localhost | 57637 | t | a_constraint_1450238 localhost | 57638 | t | a_constraint_1450238 (2 rows) @@ -604,7 +604,7 @@ SELECT (run_command_on_workers($$ ORDER BY 1,2,3,4; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | localhost | 57638 | t | (2 rows) diff --git a/src/test/regress/expected/multi_array_agg.out b/src/test/regress/expected/multi_array_agg.out index ad1e00aa2..5da5f5f41 100644 --- a/src/test/regress/expected/multi_array_agg.out +++ b/src/test/regress/expected/multi_array_agg.out @@ -10,7 +10,7 @@ $$; -- Check multi_cat_agg() aggregate which is used to implement array_agg() SELECT array_cat_agg(i) FROM (VALUES (ARRAY[1,2]), (NULL), (ARRAY[3,4])) AS t(i); array_cat_agg ---------------- +--------------------------------------------------------------------- {1,2,3,4} (1 row) @@ -25,7 +25,7 @@ ERROR: array_agg with order by is unsupported SELECT array_sort(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort --------------------------------------------------- +--------------------------------------------------------------------- {2132,15635,24027,63700,67310,155190} {106170} {4297,19036,29380,62143,128449,183095} @@ -41,7 +41,7 @@ SELECT array_sort(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort ------------------------------------------------------------------ +--------------------------------------------------------------------- {13309.60,21168.23,22824.48,28955.64,45983.16,49620.16} {44694.46} {2618.76,28733.64,32986.52,39890.88,46796.47,54058.05} @@ -57,7 +57,7 @@ SELECT array_sort(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort --------------------------------------------------------------------------------- +--------------------------------------------------------------------- {01-29-1996,01-30-1996,03-13-1996,03-30-1996,04-12-1996,04-21-1996} {01-28-1997} {10-29-1993,11-09-1993,12-04-1993,12-14-1993,01-16-1994,02-02-1994} @@ -73,7 +73,7 @@ SELECT array_sort(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(array_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort ----------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- {"AIR ","FOB ","MAIL ","MAIL ","REG AIR ","TRUCK "} {"RAIL "} {"AIR ","FOB ","RAIL ","RAIL ","SHIP ","TRUCK "} @@ -89,7 +89,7 @@ SELECT array_sort(array_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey -- Check that we can execute array_agg() within other functions SELECT array_length(array_agg(l_orderkey), 1) FROM lineitem; array_length --------------- +--------------------------------------------------------------------- 12000 (1 row) @@ -101,7 +101,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(array_agg(l_orderk WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | count | avg | array_sort -------------+-------+-----------------------+-------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476} 2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476} 3.00 | 14 | 4714.0392857142857143 | {5509,5543,5605,5606,5827,9124,9157,9184,9223,9254,9349,9414,9475,9477} @@ -112,7 +112,7 @@ SELECT l_quantity, array_sort(array_agg(extract (month FROM o_orderdate))) AS my FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | my_month -------------+------------------------------------------------ +--------------------------------------------------------------------- 1.00 | {2,3,4,4,4,5,5,5,6,7,7,7,7,9,9,11,11} 2.00 | {1,3,5,5,5,5,6,6,6,7,7,8,10,10,11,11,11,12,12} 3.00 | {3,4,5,6,7,7,8,8,8,9,9,10,11,11} @@ -123,7 +123,7 @@ SELECT l_quantity, array_sort(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | array_sort -------------+--------------------------------------------- +--------------------------------------------------------------------- 1.00 | {11269,11397,11713,11715,11973,18317,18445} 2.00 | {11847,18061,18247,18953} 3.00 | {18249,18315,18699,18951,18955} @@ -134,14 +134,14 @@ SELECT l_quantity, array_sort(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE SELECT array_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 10; array_agg --------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- {NULL,36.00,NULL,28.00,24.00,32.00,38.00,45.00,49.00,27.00,NULL,28.00,26.00,30.00,NULL,26.00,50.00,37.00,NULL,NULL,46.00,28.00,38.00,35.00,NULL} (1 row) -- Check that we return NULL in case there are no input rows to array_agg() SELECT array_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; array_agg ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_average_expression.out b/src/test/regress/expected/multi_average_expression.out index 74511d553..2900c4499 100644 --- a/src/test/regress/expected/multi_average_expression.out +++ b/src/test/regress/expected/multi_average_expression.out @@ -26,7 +26,7 @@ ORDER BY l_returnflag, l_linestatus; sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order | l_returnflag | l_linestatus ------------+----------------+----------------+------------------+---------------------+--------------------+------------------------+-------------+--------------+-------------- +--------------------------------------------------------------------- 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 | A | F 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 | N | F 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 | N | O @@ -46,7 +46,7 @@ SELECT FROM lineitem; avg ---------------------- +--------------------------------------------------------------------- 35.3570440077497924 (1 row) @@ -59,7 +59,7 @@ SELECT FROM lineitem; avg ------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_basic_queries.out b/src/test/regress/expected/multi_basic_queries.out index 13393acff..529179a2c 100644 --- a/src/test/regress/expected/multi_basic_queries.out +++ b/src/test/regress/expected/multi_basic_queries.out @@ -5,19 +5,19 @@ -- our partitioned table. SELECT count(*) FROM lineitem; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT sum(l_extendedprice) FROM lineitem; sum --------------- +--------------------------------------------------------------------- 457702024.50 (1 row) SELECT avg(l_extendedprice) FROM lineitem; avg --------------------- +--------------------------------------------------------------------- 38141.835375000000 (1 row) @@ -26,7 +26,7 @@ BEGIN; SET TRANSACTION READ ONLY; SELECT count(*) FROM lineitem; count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -34,7 +34,7 @@ COMMIT; -- Verify temp tables which are used for final result aggregation don't persist. SELECT count(*) FROM pg_class WHERE relname LIKE 'pg_merge_job_%' AND relkind = 'r'; count -------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_behavioral_analytics_basics.out b/src/test/regress/expected/multi_behavioral_analytics_basics.out index 45ee0c27a..c28f69123 100644 --- a/src/test/regress/expected/multi_behavioral_analytics_basics.out +++ b/src/test/regress/expected/multi_behavioral_analytics_basics.out @@ -1,8 +1,8 @@ ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Vanilla funnel query ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- INSERT INTO agg_results (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) FROM ( @@ -21,15 +21,15 @@ FROM ( -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 2 | 2 | 1.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel grouped by whether or not a user has done an event ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- INSERT INTO agg_results (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( @@ -70,15 +70,15 @@ FROM ( -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 4 | 2 | 1.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel, grouped by the number of times a user has done an event ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- INSERT INTO agg_results (user_id, value_1_agg, value_2_agg) SELECT user_id, @@ -147,17 +147,17 @@ ORDER BY -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 7 | 3 | 1.7142857142857143 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Most recently seen users_table events_table ------------------------------------- +--------------------------------------------------------------------- -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results (user_id, agg_time, value_2_agg) SELECT @@ -188,15 +188,15 @@ ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 3 | 3 | 2.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results (user_id) SELECT DISTINCT user_id @@ -207,15 +207,15 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 5 | 5 | 3.8000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in at least two of X and Y and Z segments ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id) SELECT user_id @@ -228,15 +228,15 @@ HAVING count(distinct value_1) >= 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -246,15 +246,15 @@ SELECT user_id, value_2 FROM users_table WHERE -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 20 | 6 | 3.7500000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -264,15 +264,15 @@ SELECT user_id, value_2 FROM users_table WHERE -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 4 | 2 | 4.2500000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -283,15 +283,15 @@ SELECT user_id, value_2 FROM users_table WHERE -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 29 | 5 | 3.1034482758620690 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -301,15 +301,15 @@ SELECT user_id, value_2 FROM users_table WHERE -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 11 | 1 | 5.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, @@ -329,15 +329,15 @@ INSERT INTO agg_results(user_id, value_2_agg) -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 4 | 2 | 3.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find me all users_table who logged in more than once ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg) SELECT user_id, value_1 from @@ -348,15 +348,15 @@ SELECT user_id, value_1 from -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+------------------------ +--------------------------------------------------------------------- 1 | 1 | 1.00000000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find me all users_table who has done some event and has filters ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id) Select user_id @@ -371,15 +371,15 @@ And user_id in -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 11 | 4 | 3.1818181818181818 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Which events_table did people who has done some specific events_table ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg) SELECT user_id, event_type FROM events_table @@ -388,15 +388,15 @@ GROUP BY user_id, event_type; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 34 | 6 | 3.4411764705882353 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find me all the users_table who has done some event more than three times ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id) select user_id from @@ -410,15 +410,15 @@ where event_type = 4 group by user_id having count(*) > 3 -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 4 | 4 | 2.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find my assets that have the highest probability and fetch their metadata ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg, value_3_agg) SELECT @@ -438,7 +438,7 @@ FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 3488 | 6 | 3.5372706422018349 (1 row) @@ -462,7 +462,7 @@ FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -486,7 +486,7 @@ FROM ORDER BY 1, 2; SELECT count(*), count(DISTINCT user_id), avg(user_id), avg(value_1_agg) FROM agg_results; count | count | avg | avg --------+-------+--------------------+------------------------ +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 | 0.16666666666666666667 (1 row) @@ -509,7 +509,7 @@ FROM ORDER BY 1, 2; SELECT count(*), count(DISTINCT user_id), avg(user_id), avg(value_1_agg) FROM agg_results; count | count | avg | avg --------+-------+--------------------+------------------------ +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 | 0.16666666666666666667 (1 row) diff --git a/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out b/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out index 684bbe02b..65c99c730 100644 --- a/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out +++ b/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out @@ -1,8 +1,8 @@ ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Vanilla funnel query -- single shard ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) @@ -23,15 +23,15 @@ WHERE user_id = 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 1 | 1 | 2.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Vanilla funnel query -- two shards ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) @@ -52,15 +52,15 @@ WHERE (user_id = 1 OR user_id = 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 2 | 2 | 1.5000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel grouped by whether or not a user has done an event -- single shard query ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) @@ -100,11 +100,11 @@ FROM ( WHERE t1.user_id = 2 GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel grouped by whether or not a user has done an event -- two shards query ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) @@ -145,17 +145,17 @@ FROM ( -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 1 | 1 | 2.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Most recently seen users_table events_table -- single shard query ------------------------------------- +--------------------------------------------------------------------- -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, agg_time, value_2_agg) SELECT @@ -187,17 +187,17 @@ ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 1 | 1 | 5.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Most recently seen users_table events_table -- two shards query ------------------------------------- +--------------------------------------------------------------------- -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, agg_time, value_2_agg) SELECT @@ -230,15 +230,15 @@ ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 2 | 2 | 3.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in segment X and Y and Z -- single shard ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id) SELECT DISTINCT user_id @@ -250,15 +250,15 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+------------------------ +--------------------------------------------------------------------- 1 | 1 | 1.00000000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in segment X and Y and Z -- two shards ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id) SELECT DISTINCT user_id @@ -270,15 +270,15 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+------------------------ +--------------------------------------------------------------------- 1 | 1 | 1.00000000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find customers who have done X, and satisfy other customer specific criteria -- single shard ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -289,15 +289,15 @@ SELECT user_id, value_2 FROM users_table WHERE -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 7 | 1 | 2.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find customers who have done X, and satisfy other customer specific criteria -- two shards ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -308,15 +308,15 @@ SELECT user_id, value_2 FROM users_table WHERE -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 10 | 2 | 1.7000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria -- single shard ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -327,15 +327,15 @@ SELECT user_id, value_2 FROM users_table WHERE -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+------------------------ +--------------------------------------------------------------------- 6 | 1 | 1.00000000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria -- two shards ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -346,15 +346,15 @@ SELECT user_id, value_2 FROM users_table WHERE -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 20 | 2 | 1.7000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X more than 2 times, and satisfy other customer specific criteria -- single shard ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, @@ -376,15 +376,15 @@ INSERT INTO agg_results_second(user_id, value_2_agg) -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 2 | 1 | 3.0000000000000000 (1 row) ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X more than 2 times, and satisfy other customer specific criteria -- two shards ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, @@ -405,7 +405,7 @@ INSERT INTO agg_results_second(user_id, value_2_agg) -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 4 | 2 | 3.5000000000000000 (1 row) diff --git a/src/test/regress/expected/multi_binary_master_copy_format.out b/src/test/regress/expected/multi_binary_master_copy_format.out index 8ff28f867..7269daeed 100644 --- a/src/test/regress/expected/multi_binary_master_copy_format.out +++ b/src/test/regress/expected/multi_binary_master_copy_format.out @@ -7,13 +7,13 @@ SET citus.binary_master_copy_format TO 'on'; SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM lineitem; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; l_shipmode ------------- +--------------------------------------------------------------------- TRUCK MAIL (2 rows) @@ -21,13 +21,13 @@ SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; RESET citus.task_executor_type; SELECT count(*) FROM lineitem; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; l_shipmode ------------- +--------------------------------------------------------------------- TRUCK MAIL (2 rows) diff --git a/src/test/regress/expected/multi_cache_invalidation.out b/src/test/regress/expected/multi_cache_invalidation.out index de1d80a54..1a1e2532d 100644 --- a/src/test/regress/expected/multi_cache_invalidation.out +++ b/src/test/regress/expected/multi_cache_invalidation.out @@ -7,13 +7,13 @@ CREATE TABLE mci_1.test (test_id integer NOT NULL, data int); CREATE TABLE mci_2.test (test_id integer NOT NULL, data int); SELECT create_distributed_table('mci_1.test', 'test_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('mci_2.test', 'test_id', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -21,7 +21,7 @@ INSERT INTO mci_1.test VALUES (1,2), (3,4); -- move shards into other append-distributed table SELECT run_command_on_placements('mci_1.test', 'ALTER TABLE %s SET SCHEMA mci_2'); run_command_on_placements -------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,1601000,t,"ALTER TABLE") (localhost,57638,1601000,t,"ALTER TABLE") (localhost,57637,1601001,t,"ALTER TABLE") @@ -37,7 +37,7 @@ SET logicalrelid = 'mci_2.test'::regclass, shardminvalue = NULL, shardmaxvalue = WHERE logicalrelid = 'mci_1.test'::regclass; SELECT * FROM mci_2.test ORDER BY test_id; test_id | data ----------+------ +--------------------------------------------------------------------- 1 | 2 3 | 4 (2 rows) diff --git a/src/test/regress/expected/multi_citus_tools.out b/src/test/regress/expected/multi_citus_tools.out index 4c6825840..05f983748 100644 --- a/src/test/regress/expected/multi_citus_tools.out +++ b/src/test/regress/expected/multi_citus_tools.out @@ -15,7 +15,7 @@ SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int ARRAY['select count(*) from pg_dist_shard']::text[], false); node_name | node_port | success | result ------------+-----------+---------+------------------------------------ +--------------------------------------------------------------------- localhost | 666 | f | failed to connect to localhost:xxxxx (1 row) @@ -23,7 +23,7 @@ SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int ARRAY['select count(*) from pg_dist_shard']::text[], true); node_name | node_port | success | result ------------+-----------+---------+------------------------------------ +--------------------------------------------------------------------- localhost | 666 | f | failed to connect to localhost:xxxxx (1 row) @@ -38,7 +38,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select count(*) from pg_dist_shard']::text[], false); node_name | node_port | success | result ------------+-----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 0 (1 row) @@ -48,7 +48,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select * from pg_dist_shard']::text[], false); node_name | node_port | success | result ------------+-----------+---------+------------------------------------------ +--------------------------------------------------------------------- localhost | 57637 | f | expected a single column in query target (1 row) @@ -57,7 +57,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select a from generate_series(1,2) a']::text[], false); node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result (1 row) @@ -68,7 +68,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], 'select a from generate_series(2,2) a']::text[], false); node_name | node_port | success | result ------------+-----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | t | 2 (2 rows) @@ -80,7 +80,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], 'select a from generate_series(1,2) a']::text[], false); node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | f | expected a single row in query result (2 rows) @@ -92,7 +92,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], 'select a from generate_series(1,2) a']::text[], false); node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result localhost | 57637 | f | expected a single row in query result (2 rows) @@ -104,7 +104,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], 'create table second_table(a int, b int)']::text[], false); node_name | node_port | success | result ------------+-----------+---------+-------------- +--------------------------------------------------------------------- localhost | 57637 | t | CREATE TABLE localhost | 57637 | t | CREATE TABLE (2 rows) @@ -114,7 +114,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[], false); node_name | node_port | success | result ------------+-----------+---------+------------- +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -122,7 +122,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select count(*) from first_table']::text[], false); node_name | node_port | success | result ------------+-----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 20 (1 row) @@ -131,7 +131,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['insert into second_table select * from first_table']::text[], false); node_name | node_port | success | result ------------+-----------+---------+------------- +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -139,7 +139,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['insert into second_table select * from first_table']::text[], false); node_name | node_port | success | result ------------+-----------+---------+------------- +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -148,7 +148,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select count(*) from second_table']::text[], false); node_name | node_port | success | result ------------+-----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 40 (1 row) @@ -163,7 +163,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['create index first_table_index on first_table(a)']::text[], false); node_name | node_port | success | result ------------+-----------+---------+-------------- +--------------------------------------------------------------------- localhost | 57637 | t | CREATE INDEX (1 row) @@ -172,7 +172,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['drop table first_table']::text[], false); node_name | node_port | success | result ------------+-----------+---------+------------ +--------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) @@ -180,7 +180,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['drop table second_table']::text[], false); node_name | node_port | success | result ------------+-----------+---------+------------ +--------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) @@ -189,7 +189,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select count(*) from second_table']::text[], false); node_name | node_port | success | result ------------+-----------+---------+------------------------------------------------ +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: relation "second_table" does not exist (1 row) @@ -201,7 +201,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select count(*) from pg_dist_shard']::text[], true); node_name | node_port | success | result ------------+-----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 0 (1 row) @@ -211,7 +211,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select * from pg_dist_shard']::text[], true); node_name | node_port | success | result ------------+-----------+---------+------------------------------------------ +--------------------------------------------------------------------- localhost | 57637 | f | expected a single column in query target (1 row) @@ -220,7 +220,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select a from generate_series(1,2) a']::text[], true); node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result (1 row) @@ -231,7 +231,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], 'select a from generate_series(2,2) a']::text[], true); node_name | node_port | success | result ------------+-----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | t | 2 (2 rows) @@ -243,7 +243,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], 'select a from generate_series(1,2) a']::text[], true); node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | f | expected a single row in query result (2 rows) @@ -255,7 +255,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], 'select a from generate_series(1,2) a']::text[], true); node_name | node_port | success | result ------------+-----------+---------+--------------------------------------- +--------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result localhost | 57637 | f | expected a single row in query result (2 rows) @@ -267,7 +267,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], 'create table second_table(a int, b int)']::text[], true); node_name | node_port | success | result ------------+-----------+---------+-------------- +--------------------------------------------------------------------- localhost | 57637 | t | CREATE TABLE localhost | 57637 | t | CREATE TABLE (2 rows) @@ -283,7 +283,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[], true); node_name | node_port | success | result ------------+-----------+---------+------------- +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -291,7 +291,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select count(*) from first_table']::text[], true); node_name | node_port | success | result ------------+-----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 20 (1 row) @@ -300,7 +300,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['insert into second_table select * from first_table']::text[], true); node_name | node_port | success | result ------------+-----------+---------+------------- +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -308,7 +308,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['insert into second_table select * from first_table']::text[], true); node_name | node_port | success | result ------------+-----------+---------+------------- +--------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -317,7 +317,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select count(*) from second_table']::text[], true); node_name | node_port | success | result ------------+-----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 40 (1 row) @@ -326,7 +326,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['create index first_table_index on first_table(a)']::text[], true); node_name | node_port | success | result ------------+-----------+---------+-------------- +--------------------------------------------------------------------- localhost | 57637 | t | CREATE INDEX (1 row) @@ -335,7 +335,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['drop table first_table']::text[], true); node_name | node_port | success | result ------------+-----------+---------+------------ +--------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) @@ -343,7 +343,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['drop table second_table']::text[], true); node_name | node_port | success | result ------------+-----------+---------+------------ +--------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) @@ -352,21 +352,21 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: ARRAY['select count(*) from second_table']::text[], true); node_name | node_port | success | result ------------+-----------+---------+------------------------------------------------ +--------------------------------------------------------------------- localhost | 57637 | f | ERROR: relation "second_table" does not exist (1 row) -- run_command_on_XXX tests SELECT * FROM run_command_on_workers('select 1') ORDER BY 2 ASC; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57638 | t | 1 (2 rows) SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') ORDER BY 2 ASC; nodename | nodeport | success | result ------------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | t | 0 localhost | 57638 | t | 0 (2 rows) @@ -376,13 +376,13 @@ SET citus.shard_count TO 5; CREATE TABLE check_placements (key int); SELECT create_distributed_table('check_placements', 'key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM run_command_on_placements('check_placements', 'select 1'); nodename | nodeport | shardid | success | result ------------+----------+---------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | 1240000 | t | 1 localhost | 57638 | 1240000 | t | 1 localhost | 57637 | 1240001 | t | 1 @@ -399,7 +399,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0 AND nodeport = :worker_1_port; SELECT * FROM run_command_on_placements('check_placements', 'select 1'); nodename | nodeport | shardid | success | result ------------+----------+---------+---------+-------- +--------------------------------------------------------------------- localhost | 57638 | 1240000 | t | 1 localhost | 57637 | 1240001 | t | 1 localhost | 57638 | 1240001 | t | 1 @@ -414,7 +414,7 @@ DROP TABLE check_placements CASCADE; CREATE TABLE check_colocated (key int); SELECT create_distributed_table('check_colocated', 'key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -422,7 +422,7 @@ SET citus.shard_count TO 4; CREATE TABLE second_table (key int); SELECT create_distributed_table('second_table', 'key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -436,7 +436,7 @@ SET citus.shard_count TO 5; CREATE TABLE second_table (key int); SELECT create_distributed_table('second_table', 'key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -450,14 +450,14 @@ SET citus.shard_count TO 5; CREATE TABLE second_table (key int); SELECT create_distributed_table('second_table', 'key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); nodename | nodeport | shardid1 | shardid2 | success | result ------------+----------+----------+----------+---------+-------- +--------------------------------------------------------------------- localhost | 57637 | 1240005 | 1240019 | t | 1 localhost | 57638 | 1240005 | 1240019 | t | 1 localhost | 57637 | 1240006 | 1240020 | t | 1 @@ -477,13 +477,13 @@ SET citus.shard_count TO 5; CREATE TABLE check_shards (key int); SELECT create_distributed_table('check_shards', 'key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM run_command_on_shards('check_shards', 'select 1'); shardid | success | result ----------+---------+-------- +--------------------------------------------------------------------- 1240024 | t | 1 1240025 | t | 1 1240026 | t | 1 @@ -495,7 +495,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0; SELECT * FROM run_command_on_shards('check_shards', 'select 1'); NOTICE: some shards do not have active placements shardid | success | result ----------+---------+-------- +--------------------------------------------------------------------- 1240025 | t | 1 1240027 | t | 1 (2 rows) diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index 552ac55f4..fe559c68c 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -10,20 +10,20 @@ DETAIL: There are no active worker nodes. -- add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------- +--------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) @@ -31,14 +31,14 @@ SELECT master_get_active_worker_nodes(); -- try to add a node that is already in the cluster SELECT * FROM master_add_node('localhost', :worker_1_port); master_add_node ------------------ +--------------------------------------------------------------------- 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------- +--------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) @@ -46,33 +46,33 @@ SELECT master_get_active_worker_nodes(); -- try to remove a node (with no placements) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) -- verify that the node has been deleted SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------- +--------------------------------------------------------------------- (localhost,57637) (1 row) -- try to disable a node with no placements see that node is removed SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT master_disable_node('localhost', :worker_2_port); master_disable_node ---------------------- +--------------------------------------------------------------------- (1 row) SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------- +--------------------------------------------------------------------- (localhost,57637) (1 row) @@ -81,21 +81,21 @@ SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; SELECT * FROM master_activate_node('localhost', :worker_2_port); master_activate_node ----------------------- +--------------------------------------------------------------------- 3 (1 row) CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- see that there are some active placements in the candidate node SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1220001 | 1 | localhost | 57638 1220003 | 1 | localhost | 57638 1220005 | 1 | localhost | 57638 @@ -111,7 +111,7 @@ SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------- +--------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) @@ -123,13 +123,13 @@ INSERT INTO test_reference_table VALUES (1, '1'); SELECT master_disable_node('localhost', :worker_2_port); NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back. master_disable_node ---------------------- +--------------------------------------------------------------------- (1 row) SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------- +--------------------------------------------------------------------- (localhost,57637) (1 row) @@ -170,49 +170,49 @@ SET ROLE node_metadata_user; BEGIN; SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port; master_update_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; nodename | nodeport | noderole ------------+----------+----------- +--------------------------------------------------------------------- localhost | 57637 | primary localhost | 57639 | primary localhost | 57640 | secondary @@ -223,14 +223,14 @@ ABORT; \c - postgres - :master_port SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------- +--------------------------------------------------------------------- (localhost,57637) (1 row) -- restore the node for next tests SELECT * FROM master_activate_node('localhost', :worker_2_port); master_activate_node ----------------------- +--------------------------------------------------------------------- 3 (1 row) @@ -242,7 +242,7 @@ SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=:worker_2_group; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1220001 | 3 | localhost | 57638 1220003 | 3 | localhost | 57638 1220005 | 3 | localhost | 57638 @@ -258,7 +258,7 @@ SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------- +--------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) @@ -266,7 +266,7 @@ SELECT master_get_active_worker_nodes(); -- clean-up SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -286,7 +286,7 @@ UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_grou -- test that you are allowed to remove secondary nodes even if there are placements SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -294,7 +294,7 @@ SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_remove_node('localhost', 9990); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -303,60 +303,60 @@ DROP TABLE cluster_management_test; -- check that adding/removing nodes are propagated to nodes with metadata SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport ------------+---------- +--------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport -----------+---------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port -- check that added nodes are not propagated to nodes without metadata SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport -----------+---------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -365,13 +365,13 @@ SELECT master_remove_node('localhost', :worker_1_port), master_remove_node('localhost', :worker_2_port); master_remove_node | master_remove_node ---------------------+-------------------- +--------------------------------------------------------------------- | (1 row) SELECT count(1) FROM pg_dist_node; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -380,13 +380,13 @@ SELECT master_add_node('localhost', :worker_1_port), master_add_node('localhost', :worker_2_port); master_add_node | master_add_node ------------------+----------------- +--------------------------------------------------------------------- 11 | 12 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+-----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ +--------------------------------------------------------------------- 11 | 9 | localhost | 57637 | default | f | t | primary | default | f | t 12 | 10 | localhost | 57638 | default | f | t | primary | default | f | t (2 rows) @@ -395,84 +395,84 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; BEGIN; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport -----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport ------------+---------- +--------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport ------------+---------- +--------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; master_remove_node --------------------- +--------------------------------------------------------------------- (2 rows) SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -480,21 +480,21 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); SET citus.shard_count TO 4; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) CREATE TABLE temp(col1 text, col2 int); SELECT create_distributed_table('temp', 'col1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -503,7 +503,7 @@ INSERT INTO temp VALUES ('row2', 2); COMMIT; SELECT col1, col2 FROM temp ORDER BY col1; col1 | col2 -------+------ +--------------------------------------------------------------------- row1 | 1 row2 | 2 (2 rows) @@ -517,7 +517,7 @@ WHERE AND pg_dist_shard.logicalrelid = 'temp'::regclass AND pg_dist_shard_placement.nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -530,13 +530,13 @@ DELETE FROM pg_dist_node; \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -551,45 +551,45 @@ ERROR: group 14 already has a primary node SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT 1 FROM master_add_node('localhost', 9998, groupid => :worker_1_group, noderole => 'secondary'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', 9997, groupid => :worker_1_group, noderole => 'unavailable'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- add_inactive_node also works with secondaries SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_group, noderole => 'secondary'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- check that you can add a seconary to a non-default cluster, and activate it, and remove it SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); master_add_inactive_node --------------------------- +--------------------------------------------------------------------- 22 (1 row) SELECT master_activate_node('localhost', 9999); master_activate_node ----------------------- +--------------------------------------------------------------------- 22 (1 row) SELECT master_disable_node('localhost', 9999); master_disable_node ---------------------- +--------------------------------------------------------------------- (1 row) SELECT master_remove_node('localhost', 9999); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -615,7 +615,7 @@ DETAIL: Failing row contains (16, 14, localhost, 57637, default, f, t, primary, SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap'); master_add_node ------------------ +--------------------------------------------------------------------- 25 (1 row) @@ -628,13 +628,13 @@ SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole = 'overflow' ); master_add_node ------------------ +--------------------------------------------------------------------- 26 (1 row) SELECT * FROM pg_dist_node WHERE nodeport=8887; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+-----------+----------+----------+-------------+----------+-----------+-----------------------------------------------------------------+----------------+------------------ +--------------------------------------------------------------------- 26 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t (1 row) @@ -643,13 +643,13 @@ SELECT * FROM pg_dist_node WHERE nodeport=8887; -- master_add_secondary_node lets you skip looking up the groupid SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port); master_add_secondary_node ---------------------------- +--------------------------------------------------------------------- 27 (1 row) SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port); master_add_secondary_node ---------------------------- +--------------------------------------------------------------------- 28 (1 row) @@ -657,7 +657,7 @@ SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000); ERROR: node at "localhost:xxxxx" does not exist SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); master_add_secondary_node ---------------------------- +--------------------------------------------------------------------- 29 (1 row) @@ -671,26 +671,26 @@ ERROR: there is already another node with the specified hostname and port -- master_update_node moves a node SELECT master_update_node(:worker_1_node, 'somehost', 9000); master_update_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ +--------------------------------------------------------------------- 16 | 14 | somehost | 9000 | default | f | t | primary | default | f | t (1 row) -- cleanup SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port); master_update_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+-----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ +--------------------------------------------------------------------- 16 | 14 | localhost | 57637 | default | f | t | primary | default | f | t (1 row) @@ -698,14 +698,14 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE test_dist (x int, y int); SELECT create_distributed_table('test_dist', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- testing behaviour when setting shouldhaveshards to false on partially empty node SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); master_set_node_property --------------------------- +--------------------------------------------------------------------- (1 row) @@ -715,25 +715,25 @@ CREATE TABLE test_dist_colocated_with_non_colocated (x int, y int); CREATE TABLE test_ref (a int, b int); SELECT create_distributed_table('test_dist_colocated', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_dist_colocated_with_non_colocated', 'x', colocate_with => 'test_dist_non_colocated'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('test_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -742,7 +742,7 @@ SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count -----------+------- +--------------------------------------------------------------------- 57637 | 2 57638 | 2 (2 rows) @@ -752,7 +752,7 @@ SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count -----------+------- +--------------------------------------------------------------------- 57637 | 4 (1 row) @@ -762,7 +762,7 @@ SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated_with_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count -----------+------- +--------------------------------------------------------------------- 57637 | 4 (1 row) @@ -771,7 +771,7 @@ SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count -----------+------- +--------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) @@ -781,7 +781,7 @@ DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, te -- testing behaviour when setting shouldhaveshards to false on fully empty node SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); master_set_node_property --------------------------- +--------------------------------------------------------------------- (1 row) @@ -791,13 +791,13 @@ CREATE TABLE test_dist_non_colocated (x int, y int); CREATE TABLE test_ref (a int, b int); SELECT create_distributed_table('test_dist', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('test_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -806,7 +806,7 @@ SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count -----------+------- +--------------------------------------------------------------------- 57637 | 4 (1 row) @@ -815,14 +815,14 @@ SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count -----------+------- +--------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); master_set_node_property --------------------------- +--------------------------------------------------------------------- (1 row) @@ -832,7 +832,7 @@ SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count -----------+------- +--------------------------------------------------------------------- 57637 | 4 (1 row) @@ -841,20 +841,20 @@ SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count -----------+------- +--------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) SELECT create_distributed_table('test_dist_colocated', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -864,7 +864,7 @@ SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count -----------+------- +--------------------------------------------------------------------- 57637 | 4 (1 row) @@ -874,7 +874,7 @@ SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count -----------+------- +--------------------------------------------------------------------- 57637 | 2 57638 | 2 (2 rows) diff --git a/src/test/regress/expected/multi_colocated_shard_transfer.out b/src/test/regress/expected/multi_colocated_shard_transfer.out index fab3190bf..817b895b3 100644 --- a/src/test/regress/expected/multi_colocated_shard_transfer.out +++ b/src/test/regress/expected/multi_colocated_shard_transfer.out @@ -18,7 +18,7 @@ WHERE colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ +--------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 1 1300000 | table1_group1 | 57638 | 1000 | 3 1300001 | table1_group1 | 57637 | 1000 | 1 @@ -40,7 +40,7 @@ ORDER BY s.shardid, sp.nodeport; -- repair colocated shards SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -54,7 +54,7 @@ WHERE colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ +--------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 1 1300000 | table1_group1 | 57638 | 1000 | 1 1300001 | table1_group1 | 57637 | 1000 | 1 @@ -84,7 +84,7 @@ WHERE p.logicalrelid = 'table5_groupX'::regclass ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ +--------------------------------------------------------------------- 1300016 | table5_groupx | 57637 | 0 | 1 1300016 | table5_groupx | 57638 | 0 | 3 1300017 | table5_groupx | 57637 | 0 | 1 @@ -98,7 +98,7 @@ ORDER BY s.shardid, sp.nodeport; -- repair NOT colocated shard SELECT master_copy_shard_placement(1300016, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -112,7 +112,7 @@ WHERE p.logicalrelid = 'table5_groupX'::regclass ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ +--------------------------------------------------------------------- 1300016 | table5_groupx | 57637 | 0 | 1 1300016 | table5_groupx | 57638 | 0 | 1 1300017 | table5_groupx | 57637 | 0 | 1 @@ -134,7 +134,7 @@ WHERE p.logicalrelid = 'table6_append'::regclass ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ +--------------------------------------------------------------------- 1300020 | table6_append | 57637 | 0 | 1 1300020 | table6_append | 57638 | 0 | 3 1300021 | table6_append | 57637 | 0 | 1 @@ -144,7 +144,7 @@ ORDER BY s.shardid, sp.nodeport; -- repair shard in append distributed table SELECT master_copy_shard_placement(1300020, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -158,7 +158,7 @@ WHERE p.logicalrelid = 'table6_append'::regclass ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ +--------------------------------------------------------------------- 1300020 | table6_append | 57637 | 0 | 1 1300020 | table6_append | 57638 | 0 | 1 1300021 | table6_append | 57637 | 0 | 1 @@ -178,7 +178,7 @@ WHERE colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ +--------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 3 1300000 | table1_group1 | 57638 | 1000 | 3 1300001 | table1_group1 | 57637 | 1000 | 1 @@ -210,7 +210,7 @@ WHERE colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ----------+---------------+----------+--------------+------------ +--------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 3 1300000 | table1_group1 | 57638 | 1000 | 3 1300001 | table1_group1 | 57637 | 1000 | 1 diff --git a/src/test/regress/expected/multi_colocation_utils.out b/src/test/regress/expected/multi_colocation_utils.out index 9800fb706..4a7d700f6 100644 --- a/src/test/regress/expected/multi_colocation_utils.out +++ b/src/test/regress/expected/multi_colocation_utils.out @@ -59,280 +59,280 @@ CREATE FUNCTION find_shard_interval_index(bigint) CREATE TABLE table1_group1 ( id int ); SELECT master_create_distributed_table('table1_group1', 'id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('table1_group1', 4, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE table2_group1 ( id int ); SELECT master_create_distributed_table('table2_group1', 'id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('table2_group1', 4, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE table3_group2 ( id int ); SELECT master_create_distributed_table('table3_group2', 'id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('table3_group2', 4, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE table4_group2 ( id int ); SELECT master_create_distributed_table('table4_group2', 'id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('table4_group2', 4, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE table5_groupX ( id int ); SELECT master_create_distributed_table('table5_groupX', 'id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('table5_groupX', 4, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE table6_append ( id int ); SELECT master_create_distributed_table('table6_append', 'id', 'append'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('table6_append'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1300020 (1 row) SELECT master_create_empty_shard('table6_append'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1300021 (1 row) -- make table1_group1 and table2_group1 co-located manually SELECT colocation_test_colocate_tables('table1_group1', 'table2_group1'); colocation_test_colocate_tables ---------------------------------- +--------------------------------------------------------------------- t (1 row) -- check co-location id SELECT get_table_colocation_id('table1_group1'); get_table_colocation_id -------------------------- +--------------------------------------------------------------------- 1000 (1 row) SELECT get_table_colocation_id('table5_groupX'); get_table_colocation_id -------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT get_table_colocation_id('table6_append'); get_table_colocation_id -------------------------- +--------------------------------------------------------------------- 0 (1 row) -- check self table co-location SELECT tables_colocated('table1_group1', 'table1_group1'); tables_colocated ------------------- +--------------------------------------------------------------------- t (1 row) SELECT tables_colocated('table5_groupX', 'table5_groupX'); tables_colocated ------------------- +--------------------------------------------------------------------- t (1 row) SELECT tables_colocated('table6_append', 'table6_append'); tables_colocated ------------------- +--------------------------------------------------------------------- t (1 row) -- check table co-location with same co-location group SELECT tables_colocated('table1_group1', 'table2_group1'); tables_colocated ------------------- +--------------------------------------------------------------------- t (1 row) -- check table co-location with different co-location group SELECT tables_colocated('table1_group1', 'table3_group2'); tables_colocated ------------------- +--------------------------------------------------------------------- f (1 row) -- check table co-location with invalid co-location group SELECT tables_colocated('table1_group1', 'table5_groupX'); tables_colocated ------------------- +--------------------------------------------------------------------- f (1 row) SELECT tables_colocated('table1_group1', 'table6_append'); tables_colocated ------------------- +--------------------------------------------------------------------- f (1 row) -- check self shard co-location SELECT shards_colocated(1300000, 1300000); shards_colocated ------------------- +--------------------------------------------------------------------- t (1 row) SELECT shards_colocated(1300016, 1300016); shards_colocated ------------------- +--------------------------------------------------------------------- t (1 row) SELECT shards_colocated(1300020, 1300020); shards_colocated ------------------- +--------------------------------------------------------------------- t (1 row) -- check shard co-location with same co-location group SELECT shards_colocated(1300000, 1300004); shards_colocated ------------------- +--------------------------------------------------------------------- t (1 row) -- check shard co-location with same table different co-location group SELECT shards_colocated(1300000, 1300001); shards_colocated ------------------- +--------------------------------------------------------------------- f (1 row) -- check shard co-location with different co-location group SELECT shards_colocated(1300000, 1300005); shards_colocated ------------------- +--------------------------------------------------------------------- f (1 row) -- check shard co-location with invalid co-location group SELECT shards_colocated(1300000, 1300016); shards_colocated ------------------- +--------------------------------------------------------------------- f (1 row) SELECT shards_colocated(1300000, 1300020); shards_colocated ------------------- +--------------------------------------------------------------------- f (1 row) -- check co-located table list SELECT UNNEST(get_colocated_table_array('table1_group1'))::regclass ORDER BY 1; unnest ---------------- +--------------------------------------------------------------------- table1_group1 table2_group1 (2 rows) SELECT UNNEST(get_colocated_table_array('table5_groupX'))::regclass ORDER BY 1; unnest ---------------- +--------------------------------------------------------------------- table5_groupx (1 row) SELECT UNNEST(get_colocated_table_array('table6_append'))::regclass ORDER BY 1; unnest ---------------- +--------------------------------------------------------------------- table6_append (1 row) -- check co-located shard list SELECT UNNEST(get_colocated_shard_array(1300000))::regclass ORDER BY 1; unnest ---------- +--------------------------------------------------------------------- 1300000 1300004 (2 rows) SELECT UNNEST(get_colocated_shard_array(1300016))::regclass ORDER BY 1; unnest ---------- +--------------------------------------------------------------------- 1300016 (1 row) SELECT UNNEST(get_colocated_shard_array(1300020))::regclass ORDER BY 1; unnest ---------- +--------------------------------------------------------------------- 1300020 (1 row) -- check FindShardIntervalIndex function SELECT find_shard_interval_index(1300000); find_shard_interval_index ---------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT find_shard_interval_index(1300001); find_shard_interval_index ---------------------------- +--------------------------------------------------------------------- 1 (1 row) SELECT find_shard_interval_index(1300002); find_shard_interval_index ---------------------------- +--------------------------------------------------------------------- 2 (1 row) SELECT find_shard_interval_index(1300003); find_shard_interval_index ---------------------------- +--------------------------------------------------------------------- 3 (1 row) SELECT find_shard_interval_index(1300016); find_shard_interval_index ---------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -341,14 +341,14 @@ SET citus.shard_count = 2; CREATE TABLE table1_groupA ( id int ); SELECT create_distributed_table('table1_groupA', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table2_groupA ( id int ); SELECT create_distributed_table('table2_groupA', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -357,14 +357,14 @@ SET citus.shard_replication_factor = 1; CREATE TABLE table1_groupB ( id int ); SELECT create_distributed_table('table1_groupB', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table2_groupB ( id int ); SELECT create_distributed_table('table2_groupB', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -376,14 +376,14 @@ SET citus.shard_replication_factor to DEFAULT; CREATE TABLE table1_groupC ( id text ); SELECT create_distributed_table('table1_groupC', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table2_groupC ( id text ); SELECT create_distributed_table('table2_groupC', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -392,14 +392,14 @@ SET citus.shard_count = 8; CREATE TABLE table1_groupD ( id int ); SELECT create_distributed_table('table1_groupD', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table2_groupD ( id int ); SELECT create_distributed_table('table2_groupD', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -407,14 +407,14 @@ SELECT create_distributed_table('table2_groupD', 'id'); CREATE TABLE table_append ( id int ); SELECT create_distributed_table('table_append', 'id', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table_range ( id int ); SELECT create_distributed_table('table_range', 'id', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -423,7 +423,7 @@ CREATE FOREIGN TABLE table3_groupD ( id int ) SERVER fake_fdw_server; SELECT create_distributed_table('table3_groupD', 'id'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -432,7 +432,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 3 | 4 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 5 | 2 | 1 | 23 | 0 @@ -444,7 +444,7 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY logicalrelid; logicalrelid | colocationid ----------------+-------------- +--------------------------------------------------------------------- table1_groupa | 4 table2_groupa | 4 table1_groupb | 5 @@ -460,7 +460,7 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition DROP TABLE table1_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 4 | 2 | 2 | 23 | 0 (1 row) @@ -468,7 +468,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid = 4; DROP TABLE table2_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 4 | 2 | 2 | 23 | 0 (1 row) @@ -477,14 +477,14 @@ SET citus.shard_count = 2; CREATE TABLE table1_groupE ( id int ); SELECT create_distributed_table('table1_groupE', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table2_groupE ( id int ); SELECT create_distributed_table('table2_groupE', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -492,7 +492,7 @@ SELECT create_distributed_table('table2_groupE', 'id'); CREATE TABLE table3_groupE ( dummy_column text, id int ); SELECT create_distributed_table('table3_groupE', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -501,7 +501,7 @@ CREATE SCHEMA schema_colocation; CREATE TABLE schema_colocation.table4_groupE ( id int ); SELECT create_distributed_table('schema_colocation.table4_groupE', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -509,28 +509,28 @@ SELECT create_distributed_table('schema_colocation.table4_groupE', 'id'); CREATE TABLE table1_group_none_1 ( id int ); SELECT create_distributed_table('table1_group_none_1', 'id', colocate_with => 'none'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table2_group_none_1 ( id int ); SELECT create_distributed_table('table2_group_none_1', 'id', colocate_with => 'table1_group_none_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table1_group_none_2 ( id int ); SELECT create_distributed_table('table1_group_none_2', 'id', colocate_with => 'none'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table4_groupE ( id int ); SELECT create_distributed_table('table4_groupE', 'id', colocate_with => 'default'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -539,7 +539,7 @@ SET citus.shard_count = 3; CREATE TABLE table1_group_none_3 ( id int ); SELECT create_distributed_table('table1_group_none_3', 'id', colocate_with => 'NONE'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -547,7 +547,7 @@ SELECT create_distributed_table('table1_group_none_3', 'id', colocate_with => 'N CREATE TABLE table1_group_default ( id int ); SELECT create_distributed_table('table1_group_default', 'id', colocate_with => 'DEFAULT'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -556,7 +556,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 3 | 4 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 5 | 2 | 1 | 23 | 0 @@ -569,7 +569,7 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid ----------------------------------+-------------- +--------------------------------------------------------------------- table1_groupe | 4 table2_groupe | 4 table3_groupe | 4 @@ -606,7 +606,7 @@ SELECT create_distributed_table('table_failing', 'id', colocate_with => ''); ERROR: invalid name syntax SELECT create_distributed_table('table_failing', 'id', colocate_with => NULL); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -619,14 +619,14 @@ DETAIL: Distribution column types don't match for table1_groupe and table_bigin \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table3_groupE_1300062'::regclass; Column | Type | Modifiers ---------------+---------+----------- +--------------------------------------------------------------------- dummy_column | text | id | integer | (2 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='schema_colocation.table4_groupE_1300064'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- id | integer | (1 row) @@ -635,14 +635,14 @@ SET citus.next_shard_id TO 1300080; CREATE TABLE table1_groupF ( id int ); SELECT create_reference_table('table1_groupF'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table2_groupF ( id int ); SELECT create_reference_table('table2_groupF'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -651,7 +651,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 3 | 4 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 5 | 2 | 1 | 23 | 0 @@ -677,7 +677,7 @@ ORDER BY table1, table2; table1 | table2 | colocated ----------------------------------+---------------------------------+----------- +--------------------------------------------------------------------- table1_group1 | table2_group1 | t table1_groupb | table2_groupb | t table1_groupc | table2_groupc | t @@ -718,7 +718,7 @@ ORDER BY shardid, nodeport; logicalrelid | shardid | shardstorage | nodeport | shardminvalue | shardmaxvalue ----------------------------------+---------+--------------+----------+---------------+--------------- +--------------------------------------------------------------------- table1_groupb | 1300026 | t | 57637 | -2147483648 | -1 table1_groupb | 1300027 | t | 57638 | 0 | 2147483647 table2_groupb | 1300028 | t | 57637 | -2147483648 | -1 @@ -840,14 +840,14 @@ SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- (0 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid ---------------+-------------- +--------------------------------------------------------------------- (0 rows) -- first check failing cases @@ -871,51 +871,51 @@ SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- (0 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid ---------------+-------------- +--------------------------------------------------------------------- (0 rows) -- check successfully cololated tables SELECT mark_tables_colocated('table1_groupB', ARRAY['table2_groupB']); mark_tables_colocated ------------------------ +--------------------------------------------------------------------- (1 row) SELECT mark_tables_colocated('table1_groupC', ARRAY['table2_groupC']); mark_tables_colocated ------------------------ +--------------------------------------------------------------------- (1 row) SELECT mark_tables_colocated('table1_groupD', ARRAY['table2_groupD']); mark_tables_colocated ------------------------ +--------------------------------------------------------------------- (1 row) SELECT mark_tables_colocated('table1_groupE', ARRAY['table2_groupE', 'table3_groupE']); mark_tables_colocated ------------------------ +--------------------------------------------------------------------- (1 row) SELECT mark_tables_colocated('table1_groupF', ARRAY['table2_groupF']); mark_tables_colocated ------------------------ +--------------------------------------------------------------------- (1 row) -- check to colocate with itself SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupB']); mark_tables_colocated ------------------------ +--------------------------------------------------------------------- (1 row) @@ -923,14 +923,14 @@ SET citus.shard_count = 2; CREATE TABLE table1_group_none ( id int ); SELECT create_distributed_table('table1_group_none', 'id', colocate_with => 'NONE'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table2_group_none ( id int ); SELECT create_distributed_table('table2_group_none', 'id', colocate_with => 'NONE'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -939,7 +939,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 2 | 2 | 1 | 23 | 0 3 | 2 | 2 | 25 | 100 4 | 8 | 2 | 23 | 0 @@ -950,7 +950,7 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid --------------------+-------------- +--------------------------------------------------------------------- table1_groupb | 2 table2_groupb | 2 table1_groupc | 3 @@ -967,14 +967,14 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition -- move the all tables in colocation group 5 to colocation group 7 SELECT mark_tables_colocated('table1_group_none', ARRAY['table1_groupE', 'table2_groupE', 'table3_groupE']); mark_tables_colocated ------------------------ +--------------------------------------------------------------------- (1 row) -- move a table with a colocation id which is already not in pg_dist_colocation SELECT mark_tables_colocated('table1_group_none', ARRAY['table2_group_none']); mark_tables_colocated ------------------------ +--------------------------------------------------------------------- (1 row) @@ -983,7 +983,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 2 | 2 | 1 | 23 | 0 3 | 2 | 2 | 25 | 100 4 | 8 | 2 | 23 | 0 @@ -993,7 +993,7 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid --------------------+-------------- +--------------------------------------------------------------------- table1_groupb | 2 table2_groupb | 2 table1_groupc | 3 @@ -1011,7 +1011,7 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition CREATE TABLE table1_groupG ( id int ); SELECT create_distributed_table('table1_groupG', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1025,7 +1025,7 @@ CREATE TABLE table2_groupG ( id int ); ERROR: relation "table2_groupg" already exists SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'NONE'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_complex_expressions.out b/src/test/regress/expected/multi_complex_expressions.out index 4321ea05f..b6d4e1042 100644 --- a/src/test/regress/expected/multi_complex_expressions.out +++ b/src/test/regress/expected/multi_complex_expressions.out @@ -4,44 +4,44 @@ -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; ?column? ------------------------- +--------------------------------------------------------------------- 12000.0000000000000000 (1 row) SELECT sum(l_quantity) / (10 * avg(l_quantity)) FROM lineitem; ?column? ------------------------ +--------------------------------------------------------------------- 1200.0000000000000000 (1 row) SELECT (sum(l_quantity) / (10 * avg(l_quantity))) + 11 FROM lineitem; ?column? ------------------------ +--------------------------------------------------------------------- 1211.0000000000000000 (1 row) SELECT avg(l_quantity) as average FROM lineitem; average ---------------------- +--------------------------------------------------------------------- 25.4462500000000000 (1 row) SELECT 100 * avg(l_quantity) as average_times_hundred FROM lineitem; average_times_hundred ------------------------ +--------------------------------------------------------------------- 2544.6250000000000000 (1 row) SELECT 100 * avg(l_quantity) / 10 as average_times_ten FROM lineitem; average_times_ten ----------------------- +--------------------------------------------------------------------- 254.4625000000000000 (1 row) SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; l_quantity | count_quantity -------------+---------------- +--------------------------------------------------------------------- 44.00 | 2150 38.00 | 2160 45.00 | 2180 @@ -98,42 +98,42 @@ SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem SELECT count(*) FROM lineitem WHERE octet_length(l_comment || l_comment) > 40; count -------- +--------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(concat(l_comment, l_comment)) > 40; count -------- +--------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; count -------- +--------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + 10 > 40; count -------- +--------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE (l_receiptdate::timestamp - l_shipdate::timestamp) > interval '5 days'; count -------- +--------------------------------------------------------------------- 10008 (1 row) -- can push down queries where no columns present on the WHERE clause SELECT count(*) FROM lineitem WHERE random() = -0.1; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -141,7 +141,7 @@ SELECT count(*) FROM lineitem WHERE random() = -0.1; SELECT count(*) FROM lineitem WHERE (l_partkey > 10000) is true; count -------- +--------------------------------------------------------------------- 11423 (1 row) @@ -149,7 +149,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE l_partkey = ANY(ARRAY[19353, 19354, 19355]); count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -157,7 +157,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE l_partkey = ALL(ARRAY[19353]); count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -165,7 +165,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE ARRAY[19353, 19354, 19355] @> ARRAY[l_partkey]; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -173,7 +173,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE (l_quantity/100)::int::bool::text::bool; count -------- +--------------------------------------------------------------------- 260 (1 row) @@ -181,7 +181,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE (CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END); count -------- +--------------------------------------------------------------------- 7948 (1 row) @@ -189,7 +189,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE COALESCE((l_partkey/50000)::bool, false); count -------- +--------------------------------------------------------------------- 9122 (1 row) @@ -197,7 +197,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE NULLIF((l_partkey/50000)::bool, false); count -------- +--------------------------------------------------------------------- 9122 (1 row) @@ -205,7 +205,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM orders WHERE o_comment IS NOT null; count -------- +--------------------------------------------------------------------- 2985 (1 row) @@ -213,7 +213,7 @@ SELECT count(*) FROM orders SELECT count(*) FROM lineitem WHERE isfinite(l_shipdate); count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -221,7 +221,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE 0 != 0; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -229,7 +229,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE l_partkey IS DISTINCT FROM 50040; count -------- +--------------------------------------------------------------------- 11999 (1 row) @@ -237,7 +237,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE row(l_partkey, 2, 3) > row(2000, 2, 3); count -------- +--------------------------------------------------------------------- 11882 (1 row) @@ -252,7 +252,7 @@ SELECT count(*) FROM lineitem l_partkey IS DISTINCT FROM 50040 AND row(l_partkey, 2, 3) > row(2000, 2, 3); count -------- +--------------------------------------------------------------------- 137 (1 row) @@ -264,7 +264,7 @@ SELECT l_linenumber FROM lineitem l_linenumber LIMIT 1; l_linenumber --------------- +--------------------------------------------------------------------- 1 (1 row) @@ -277,7 +277,7 @@ SELECT count(*) * l_discount as total_discount, count(*), sum(l_tax), l_discount ORDER BY total_discount DESC, sum(l_tax) DESC; total_discount | count | sum | l_discount -----------------+-------+-------+------------ +--------------------------------------------------------------------- 104.80 | 1048 | 41.08 | 0.10 98.55 | 1095 | 44.15 | 0.09 90.64 | 1133 | 45.94 | 0.08 @@ -300,7 +300,7 @@ SELECT l_linenumber FROM lineitem l_linenumber LIMIT 1; l_linenumber --------------- +--------------------------------------------------------------------- 2 (1 row) @@ -315,7 +315,7 @@ SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem l_receiptdate LIMIT 1; max | min | l_receiptdate ------+------+--------------- +--------------------------------------------------------------------- 3 | 0.07 | 01-09-1992 (1 row) @@ -323,21 +323,21 @@ SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem SELECT count(*) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; count -------- +--------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5; count -------- +--------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey WHERE l_quantity < 5; count -------- +--------------------------------------------------------------------- 951 (1 row) @@ -348,7 +348,7 @@ ERROR: complex joins are only supported when all distributed tables are joined -- the subquery is recursively planned since it contains OFFSET, which is not pushdownable SELECT * FROM (SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custkey OFFSET 20) sq ORDER BY 1 LIMIT 5; o_custkey ------------ +--------------------------------------------------------------------- 35 37 38 @@ -359,7 +359,7 @@ SELECT * FROM (SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custke -- the subquery is recursively planned since it contains OFFSET, which is not pushdownable SELECT * FROM (SELECT o_orderkey FROM orders ORDER BY o_orderkey OFFSET 20) sq ORDER BY 1 LIMIT 5; o_orderkey ------------- +--------------------------------------------------------------------- 69 70 71 @@ -370,7 +370,7 @@ SELECT * FROM (SELECT o_orderkey FROM orders ORDER BY o_orderkey OFFSET 20) sq O -- Simple LIMIT/OFFSET with ORDER BY SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; o_orderkey ------------- +--------------------------------------------------------------------- 69 70 71 @@ -397,7 +397,7 @@ ORDER BY customer_keys.o_custkey DESC LIMIT 10 OFFSET 20; o_custkey | total_order_count ------------+------------------- +--------------------------------------------------------------------- 1466 | 1 1465 | 2 1463 | 4 @@ -430,7 +430,7 @@ SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt -- OFFSET without LIMIT SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; o_custkey ------------ +--------------------------------------------------------------------- 1498 1498 1499 @@ -451,7 +451,7 @@ ORDER BY 1, 2, 3 LIMIT 10 OFFSET 20; DEBUG: push down of limit count: 30 l_partkey | o_custkey | l_quantity ------------+-----------+------------ +--------------------------------------------------------------------- 655 | 58 | 50.00 669 | 319 | 34.00 699 | 1255 | 50.00 @@ -479,7 +479,7 @@ SELECT ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | sum | sum | count | count | max | max -------------+-----------+-----------+-------+-------+-----------+---------- +--------------------------------------------------------------------- 12804 | 440012.71 | 45788.16 | 7 | 1 | 94398.00 | 45788.16 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 2567 | 412076.77 | 59722.26 | 7 | 1 | 94894.00 | 9784.02 @@ -506,7 +506,7 @@ SELECT ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | sum | sum | count | count | max | max -------------+-----------+-----------+-------+-------+----------+---------- +--------------------------------------------------------------------- 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 12039 | 407048.94 | 76406.30 | 7 | 2 | 94471.02 | 19679.30 5606 | 403595.91 | 36531.51 | 7 | 2 | 94890.18 | 30582.75 diff --git a/src/test/regress/expected/multi_complex_expressions_0.out b/src/test/regress/expected/multi_complex_expressions_0.out index eaf036a9f..ae407e54a 100644 --- a/src/test/regress/expected/multi_complex_expressions_0.out +++ b/src/test/regress/expected/multi_complex_expressions_0.out @@ -4,44 +4,44 @@ -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; ?column? ------------------------- +--------------------------------------------------------------------- 12000.0000000000000000 (1 row) SELECT sum(l_quantity) / (10 * avg(l_quantity)) FROM lineitem; ?column? ------------------------ +--------------------------------------------------------------------- 1200.0000000000000000 (1 row) SELECT (sum(l_quantity) / (10 * avg(l_quantity))) + 11 FROM lineitem; ?column? ------------------------ +--------------------------------------------------------------------- 1211.0000000000000000 (1 row) SELECT avg(l_quantity) as average FROM lineitem; average ---------------------- +--------------------------------------------------------------------- 25.4462500000000000 (1 row) SELECT 100 * avg(l_quantity) as average_times_hundred FROM lineitem; average_times_hundred ------------------------ +--------------------------------------------------------------------- 2544.6250000000000000 (1 row) SELECT 100 * avg(l_quantity) / 10 as average_times_ten FROM lineitem; average_times_ten ----------------------- +--------------------------------------------------------------------- 254.4625000000000000 (1 row) SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; l_quantity | count_quantity -------------+---------------- +--------------------------------------------------------------------- 44.00 | 2150 38.00 | 2160 45.00 | 2180 @@ -98,42 +98,42 @@ SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem SELECT count(*) FROM lineitem WHERE octet_length(l_comment || l_comment) > 40; count -------- +--------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(concat(l_comment, l_comment)) > 40; count -------- +--------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; count -------- +--------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + 10 > 40; count -------- +--------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE (l_receiptdate::timestamp - l_shipdate::timestamp) > interval '5 days'; count -------- +--------------------------------------------------------------------- 10008 (1 row) -- can push down queries where no columns present on the WHERE clause SELECT count(*) FROM lineitem WHERE random() = -0.1; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -141,7 +141,7 @@ SELECT count(*) FROM lineitem WHERE random() = -0.1; SELECT count(*) FROM lineitem WHERE (l_partkey > 10000) is true; count -------- +--------------------------------------------------------------------- 11423 (1 row) @@ -149,7 +149,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE l_partkey = ANY(ARRAY[19353, 19354, 19355]); count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -157,7 +157,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE l_partkey = ALL(ARRAY[19353]); count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -165,7 +165,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE ARRAY[19353, 19354, 19355] @> ARRAY[l_partkey]; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -173,7 +173,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE (l_quantity/100)::int::bool::text::bool; count -------- +--------------------------------------------------------------------- 260 (1 row) @@ -181,7 +181,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE (CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END); count -------- +--------------------------------------------------------------------- 7948 (1 row) @@ -189,7 +189,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE COALESCE((l_partkey/50000)::bool, false); count -------- +--------------------------------------------------------------------- 9122 (1 row) @@ -197,7 +197,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE NULLIF((l_partkey/50000)::bool, false); count -------- +--------------------------------------------------------------------- 9122 (1 row) @@ -205,7 +205,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM orders WHERE o_comment IS NOT null; count -------- +--------------------------------------------------------------------- 2985 (1 row) @@ -213,7 +213,7 @@ SELECT count(*) FROM orders SELECT count(*) FROM lineitem WHERE isfinite(l_shipdate); count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -221,7 +221,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE 0 != 0; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -229,7 +229,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE l_partkey IS DISTINCT FROM 50040; count -------- +--------------------------------------------------------------------- 11999 (1 row) @@ -237,7 +237,7 @@ SELECT count(*) FROM lineitem SELECT count(*) FROM lineitem WHERE row(l_partkey, 2, 3) > row(2000, 2, 3); count -------- +--------------------------------------------------------------------- 11882 (1 row) @@ -252,7 +252,7 @@ SELECT count(*) FROM lineitem l_partkey IS DISTINCT FROM 50040 AND row(l_partkey, 2, 3) > row(2000, 2, 3); count -------- +--------------------------------------------------------------------- 137 (1 row) @@ -264,7 +264,7 @@ SELECT l_linenumber FROM lineitem l_linenumber LIMIT 1; l_linenumber --------------- +--------------------------------------------------------------------- 1 (1 row) @@ -277,7 +277,7 @@ SELECT count(*) * l_discount as total_discount, count(*), sum(l_tax), l_discount ORDER BY total_discount DESC, sum(l_tax) DESC; total_discount | count | sum | l_discount -----------------+-------+-------+------------ +--------------------------------------------------------------------- 104.80 | 1048 | 41.08 | 0.10 98.55 | 1095 | 44.15 | 0.09 90.64 | 1133 | 45.94 | 0.08 @@ -300,7 +300,7 @@ SELECT l_linenumber FROM lineitem l_linenumber LIMIT 1; l_linenumber --------------- +--------------------------------------------------------------------- 2 (1 row) @@ -315,7 +315,7 @@ SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem l_receiptdate LIMIT 1; max | min | l_receiptdate ------+------+--------------- +--------------------------------------------------------------------- 3 | 0.07 | 01-09-1992 (1 row) @@ -323,21 +323,21 @@ SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem SELECT count(*) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; count -------- +--------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5; count -------- +--------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey WHERE l_quantity < 5; count -------- +--------------------------------------------------------------------- 951 (1 row) @@ -356,7 +356,7 @@ DETAIL: Subqueries with offset are not supported yet -- Simple LIMIT/OFFSET with ORDER BY SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; o_orderkey ------------- +--------------------------------------------------------------------- 69 70 71 @@ -383,7 +383,7 @@ ORDER BY customer_keys.o_custkey DESC LIMIT 10 OFFSET 20; o_custkey | total_order_count ------------+------------------- +--------------------------------------------------------------------- 1466 | 1 1465 | 2 1463 | 4 @@ -416,7 +416,7 @@ SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt -- OFFSET without LIMIT SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; o_custkey ------------ +--------------------------------------------------------------------- 1498 1498 1499 @@ -437,7 +437,7 @@ ORDER BY 1, 2, 3 LIMIT 10 OFFSET 20; DEBUG: push down of limit count: 30 l_partkey | o_custkey | l_quantity ------------+-----------+------------ +--------------------------------------------------------------------- 655 | 58 | 50.00 669 | 319 | 34.00 699 | 1255 | 50.00 @@ -465,7 +465,7 @@ SELECT ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | sum | sum | count | count | max | max -------------+-----------+-----------+-------+-------+-----------+---------- +--------------------------------------------------------------------- 12804 | 440012.71 | 45788.16 | 7 | 1 | 94398.00 | 45788.16 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 2567 | 412076.77 | 59722.26 | 7 | 1 | 94894.00 | 9784.02 @@ -492,7 +492,7 @@ SELECT ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | sum | sum | count | count | max | max -------------+-----------+-----------+-------+-------+----------+---------- +--------------------------------------------------------------------- 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 12039 | 407048.94 | 76406.30 | 7 | 2 | 94471.02 | 19679.30 5606 | 403595.91 | 36531.51 | 7 | 2 | 94890.18 | 30582.75 diff --git a/src/test/regress/expected/multi_count_type_conversion.out b/src/test/regress/expected/multi_count_type_conversion.out index a258ebef2..af703a5ca 100644 --- a/src/test/regress/expected/multi_count_type_conversion.out +++ b/src/test/regress/expected/multi_count_type_conversion.out @@ -11,7 +11,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity DESC; count_quantity | l_quantity -----------------+------------ +--------------------------------------------------------------------- 219 | 13.00 222 | 29.00 227 | 3.00 @@ -49,7 +49,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity ASC; count_quantity | l_quantity -----------------+------------ +--------------------------------------------------------------------- 273 | 28.00 264 | 30.00 261 | 23.00 diff --git a/src/test/regress/expected/multi_create_shards.out b/src/test/regress/expected/multi_create_shards.out index ed177108f..ce7bac903 100644 --- a/src/test/regress/expected/multi_create_shards.out +++ b/src/test/regress/expected/multi_create_shards.out @@ -64,14 +64,14 @@ DETAIL: Partition column types must have a hash function defined to use hash pa -- distribute table and inspect side effects SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT partmethod, partkey FROM pg_dist_partition WHERE logicalrelid = 'table_to_distribute'::regclass; partmethod | partkey -------------+-------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- h | {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} (1 row) @@ -88,7 +88,7 @@ HINT: Add more worker nodes or try again with a lower replication factor. -- finally, create shards and inspect metadata SELECT master_create_worker_shards('table_to_distribute', 16, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -96,7 +96,7 @@ SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'table_to_distribute'::regclass ORDER BY (shardminvalue::integer) ASC; shardstorage | shardminvalue | shardmaxvalue ---------------+---------------+--------------- +--------------------------------------------------------------------- t | -2147483648 | -1879048193 t | -1879048192 | -1610612737 t | -1610612736 | -1342177281 @@ -122,13 +122,13 @@ SELECT count(*) AS shard_count, WHERE logicalrelid='table_to_distribute'::regclass GROUP BY shard_size; shard_count | shard_size --------------+------------ +--------------------------------------------------------------------- 16 | 268435455 (1 row) SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relkind = 'r'; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -138,7 +138,7 @@ ERROR: table "table_to_distribute" has already had shards created for it -- test list sorting SELECT sort_names('sumedh', 'jason', 'ozgun'); sort_names ------------- +--------------------------------------------------------------------- jason + ozgun + sumedh + @@ -147,7 +147,7 @@ SELECT sort_names('sumedh', 'jason', 'ozgun'); SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'throwaway%' AND relkind = 'r'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -163,7 +163,7 @@ SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -171,7 +171,7 @@ SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass ORDER BY (shardminvalue::integer) ASC; shardstorage | shardminvalue | shardmaxvalue ---------------+---------------+--------------- +--------------------------------------------------------------------- f | -2147483648 | -1879048193 f | -1879048192 | -1610612737 f | -1610612736 | -1342177281 @@ -199,7 +199,7 @@ CREATE TABLE weird_shard_count SET citus.shard_count TO 7; SELECT create_distributed_table('weird_shard_count', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -209,7 +209,7 @@ SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size WHERE logicalrelid = 'weird_shard_count'::regclass ORDER BY shardminvalue::integer ASC; shard_size ------------- +--------------------------------------------------------------------- 613566755 613566755 613566755 diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index 431f190aa..1e89be657 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -30,7 +30,7 @@ WARNING: table "lineitem" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -51,7 +51,7 @@ WARNING: table "orders" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -68,7 +68,7 @@ CREATE TABLE orders_reference ( PRIMARY KEY(o_orderkey) ); SELECT create_reference_table('orders_reference'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -83,7 +83,7 @@ CREATE TABLE customer ( c_comment varchar(117) not null); SELECT create_reference_table('customer'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -98,7 +98,7 @@ CREATE TABLE customer_append ( c_comment varchar(117) not null); SELECT create_distributed_table('customer_append', 'c_custkey', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -109,7 +109,7 @@ CREATE TABLE nation ( n_comment varchar(152)); SELECT create_reference_table('nation'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -125,7 +125,7 @@ CREATE TABLE part ( p_comment varchar(23) not null); SELECT create_reference_table('part'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -141,7 +141,7 @@ CREATE TABLE part_append ( p_comment varchar(23) not null); SELECT create_distributed_table('part_append', 'p_partkey', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -157,7 +157,7 @@ CREATE TABLE supplier ); SELECT create_reference_table('supplier'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -175,7 +175,7 @@ CREATE TABLE supplier_single_shard ); SELECT create_distributed_table('supplier_single_shard', 's_suppkey', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -191,13 +191,13 @@ HINT: Try again after reducing "citus.shard_replication_factor" to one or setti SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('mx_table_test', 'col1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- s (1 row) @@ -209,13 +209,13 @@ NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='s_table'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -236,13 +236,13 @@ SELECT create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -252,13 +252,13 @@ SELECT create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -271,13 +271,13 @@ NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -288,13 +288,13 @@ NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -305,13 +305,13 @@ NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -323,13 +323,13 @@ SELECT create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -339,13 +339,13 @@ SELECT create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -356,13 +356,13 @@ NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -373,13 +373,13 @@ NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -390,13 +390,13 @@ NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- c (1 row) @@ -424,13 +424,13 @@ HINT: Empty your table before distributing it. SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM data_load_test ORDER BY col1; col1 | col2 | col3 -------+-------+------ +--------------------------------------------------------------------- 132 | hello | 1 243 | world | 2 (2 rows) @@ -440,39 +440,39 @@ DROP TABLE data_load_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT create_distributed_table('no_shard_test', 'col1', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; col1 | col2 -------+------ +--------------------------------------------------------------------- (0 rows) DROP TABLE no_shard_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT create_distributed_table('no_shard_test', 'col1', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; col1 | col2 -------+------ +--------------------------------------------------------------------- (0 rows) DROP TABLE no_shard_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT master_create_distributed_table('no_shard_test', 'col1', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; col1 | col2 -------+------ +--------------------------------------------------------------------- (0 rows) DROP TABLE no_shard_test; @@ -483,7 +483,7 @@ INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -491,7 +491,7 @@ INSERT INTO data_load_test VALUES (243, 'world'); END; SELECT * FROM data_load_test ORDER BY col1; col1 | col2 | col3 -------+-------+------ +--------------------------------------------------------------------- 132 | hello | 1 243 | world | 2 (2 rows) @@ -504,7 +504,7 @@ INSERT INTO data_load_test1 VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test1', 'col1'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -513,7 +513,7 @@ INSERT INTO data_load_test2 VALUES (132, 'world'); SELECT create_distributed_table('data_load_test2', 'col1'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -521,7 +521,7 @@ SELECT a.col2 ||' '|| b.col2 FROM data_load_test1 a JOIN data_load_test2 b USING (col1) WHERE col1 = 132; ?column? -------------- +--------------------------------------------------------------------- hello world (1 row) @@ -531,7 +531,7 @@ END; \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'data_load_test%'; relname ---------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -542,7 +542,7 @@ INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -556,7 +556,7 @@ INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -569,7 +569,7 @@ INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -584,13 +584,13 @@ ALTER TABLE data_load_test DROP COLUMN col1; SELECT create_distributed_table('data_load_test', 'col3'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM data_load_test ORDER BY col2; col2 | col3 | CoL4") --------+-------+-------- +--------------------------------------------------------------------- hello | world | world | hello | (2 rows) @@ -598,7 +598,7 @@ SELECT * FROM data_load_test ORDER BY col2; -- make sure the tuple went to the right shard SELECT * FROM data_load_test WHERE col3 = 'world'; col2 | col3 | CoL4") --------+-------+-------- +--------------------------------------------------------------------- hello | world | (1 row) @@ -608,14 +608,14 @@ SET citus.shard_count to 4; CREATE TABLE lineitem_hash_part (like lineitem); SELECT create_distributed_table('lineitem_hash_part', 'l_orderkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE orders_hash_part (like orders); SELECT create_distributed_table('orders_hash_part', 'o_orderkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -626,13 +626,13 @@ CREATE UNLOGGED TABLE unlogged_table ); SELECT create_distributed_table('unlogged_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM master_get_table_ddl_events('unlogged_table'); master_get_table_ddl_events --------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE UNLOGGED TABLE public.unlogged_table (key text, value text) ALTER TABLE public.unlogged_table OWNER TO postgres (2 rows) @@ -640,7 +640,7 @@ SELECT * FROM master_get_table_ddl_events('unlogged_table'); \c - - - :worker_1_port SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%'; relpersistence ----------------- +--------------------------------------------------------------------- u u u @@ -653,7 +653,7 @@ BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -662,7 +662,7 @@ ROLLBACK; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); Column | Type | Modifiers ---------+------+----------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -676,7 +676,7 @@ INSERT INTO rollback_table VALUES(3, 'Name_3'); SELECT create_distributed_table('rollback_table','id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -685,7 +685,7 @@ ROLLBACK; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); Column | Type | Modifiers ---------+------+----------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -693,7 +693,7 @@ BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -703,7 +703,7 @@ COMMIT; -- Check the table is created SELECT count(*) FROM rollback_table; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -712,7 +712,7 @@ BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -722,7 +722,7 @@ ROLLBACK; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); Column | Type | Modifiers ---------+------+----------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -730,14 +730,14 @@ BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE tt2(id int); SELECT create_distributed_table('tt2','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -748,13 +748,13 @@ COMMIT; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360069'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- id | integer | (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt2_360073'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- id | integer | (1 row) @@ -767,13 +767,13 @@ BEGIN; CREATE TABLE append_tt1(id int); SELECT create_distributed_table('append_tt1','id','append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('append_tt1'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 360077 (1 row) @@ -782,7 +782,7 @@ ROLLBACK; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.append_tt1_360077'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- id | integer | (1 row) @@ -791,7 +791,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.appen \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'public.tt1%'); Column | Type | Modifiers ---------+------+----------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -803,14 +803,14 @@ INSERT INTO tt1 VALUES(1); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO tt1 VALUES(2); SELECT * FROM tt1 WHERE id = 1; id ----- +--------------------------------------------------------------------- 1 (1 row) @@ -819,7 +819,7 @@ COMMIT; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360078'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- id | integer | (1 row) @@ -829,7 +829,7 @@ BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -839,7 +839,7 @@ COMMIT; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'tt1%'); Column | Type | Modifiers ---------+------+----------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -849,7 +849,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid f CREATE TABLE sample_table(id int); SELECT create_distributed_table('sample_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -859,7 +859,7 @@ CREATE TABLE stage_table (LIKE sample_table); SELECT create_distributed_table('stage_table', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -867,7 +867,7 @@ INSERT INTO sample_table SELECT * FROM stage_table; DROP TABLE stage_table; SELECT * FROM sample_table WHERE id = 3; id ----- +--------------------------------------------------------------------- 3 (1 row) @@ -875,7 +875,7 @@ COMMIT; -- Show that rows of sample_table are updated SELECT count(*) FROM sample_table; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -886,7 +886,7 @@ BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -894,7 +894,7 @@ SELECT create_distributed_table('tt1','id'); CREATE TABLE tt2(like tt1); SELECT create_distributed_table('tt2','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -902,20 +902,20 @@ SELECT create_distributed_table('tt2','id'); INSERT INTO tt1 SELECT * FROM tt2; SELECT * FROM tt1 WHERE id = 3; id ----- +--------------------------------------------------------------------- 3 (1 row) SELECT * FROM tt2 WHERE id = 6; id ----- +--------------------------------------------------------------------- 6 (1 row) END; SELECT count(*) FROM tt1; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -930,7 +930,7 @@ insert into sc.ref SELECT s FROM generate_series(0, 100) s; SELECT create_reference_table('sc.ref'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -939,7 +939,7 @@ insert into sc.hash SELECT s FROM generate_series(0, 100) s; SELECT create_distributed_table('sc.hash', 'a'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -952,7 +952,7 @@ insert into sc2.hash SELECT s FROM generate_series(0, 100) s; SELECT create_distributed_table('sc2.hash', 'a'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -961,7 +961,7 @@ insert into sc2.ref SELECT s FROM generate_series(0, 100) s; SELECT create_reference_table('sc2.ref'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -977,14 +977,14 @@ CREATE TABLE sc3.alter_replica_table ALTER TABLE sc3.alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey; SELECT create_distributed_table('sc3.alter_replica_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc3' LIMIT 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) (2 rows) @@ -1002,14 +1002,14 @@ ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table SELECT create_distributed_table('alter_replica_table', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc4' LIMIT 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) (2 rows) @@ -1027,14 +1027,14 @@ ALTER TABLE sc5.alter_replica_table REPLICA IDENTITY FULL; SELECT create_distributed_table('sc5.alter_replica_table', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc5' LIMIT 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,f) (localhost,57638,t,f) (2 rows) @@ -1052,14 +1052,14 @@ ALTER TABLE sc6.alter_replica_table REPLICA IDENTITY USING INDEX unique_idx; SELECT create_distributed_table('sc6.alter_replica_table', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc6' LIMIT 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) (2 rows) @@ -1076,14 +1076,14 @@ ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX unique_idx; SELECT create_distributed_table('alter_replica_table', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='public' LIMIT 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) (2 rows) diff --git a/src/test/regress/expected/multi_create_table_constraints.out b/src/test/regress/expected/multi_create_table_constraints.out index a7af3f7ba..514ec0cb3 100644 --- a/src/test/regress/expected/multi_create_table_constraints.out +++ b/src/test/regress/expected/multi_create_table_constraints.out @@ -13,7 +13,7 @@ WARNING: table "uniq_cns_append_tables" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -28,7 +28,7 @@ WARNING: table "excl_cns_append_tables" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -71,7 +71,7 @@ CREATE TABLE pk_on_part_col ); SELECT create_distributed_table('pk_on_part_col', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -82,7 +82,7 @@ CREATE TABLE uq_part_col ); SELECT create_distributed_table('uq_part_col', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -94,7 +94,7 @@ CREATE TABLE uq_two_columns ); SELECT create_distributed_table('uq_two_columns', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -111,7 +111,7 @@ CREATE TABLE ex_on_part_col ); SELECT create_distributed_table('ex_on_part_col', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -128,7 +128,7 @@ CREATE TABLE ex_on_two_columns ); SELECT create_distributed_table('ex_on_two_columns', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -145,7 +145,7 @@ CREATE TABLE ex_on_two_columns_prt ); SELECT create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -173,7 +173,7 @@ CREATE TABLE ex_overlaps ); SELECT create_distributed_table('ex_overlaps', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -194,7 +194,7 @@ CREATE TABLE pk_on_part_col_named ); SELECT create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -205,7 +205,7 @@ CREATE TABLE uq_part_col_named ); SELECT create_distributed_table('uq_part_col_named', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -217,7 +217,7 @@ CREATE TABLE uq_two_columns_named ); SELECT create_distributed_table('uq_two_columns_named', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -234,7 +234,7 @@ CREATE TABLE ex_on_part_col_named ); SELECT create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -251,7 +251,7 @@ CREATE TABLE ex_on_two_columns_named ); SELECT create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -270,7 +270,7 @@ CREATE TABLE ex_multiple_excludes ); SELECT create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -300,7 +300,7 @@ CREATE TABLE ex_overlaps_named ); SELECT create_distributed_table('ex_overlaps_named', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -317,7 +317,7 @@ CREATE TABLE uq_range_tables ); SELECT create_distributed_table('uq_range_tables', 'partition_col', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -330,7 +330,7 @@ CREATE TABLE check_example ); SELECT create_distributed_table('check_example', 'partition_col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -338,13 +338,13 @@ SELECT create_distributed_table('check_example', 'partition_col', 'hash'); SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'check_example_partition_col_key_365056'::regclass; Column | Type | Definition ----------------+---------+--------------- +--------------------------------------------------------------------- partition_col | integer | partition_col (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365056'::regclass; Constraint | Definition --------------------------------------+------------------------------------- +--------------------------------------------------------------------- check_example_other_col_check | CHECK (other_col >= 100) check_example_other_other_col_check | CHECK (abs(other_other_col) >= 100) (2 rows) @@ -376,21 +376,21 @@ SET citus.shard_replication_factor = 1; CREATE TABLE raw_table_1 (user_id int, UNIQUE(user_id)); SELECT create_distributed_table('raw_table_1', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE raw_table_2 (user_id int REFERENCES raw_table_1(user_id), UNIQUE(user_id)); SELECT create_distributed_table('raw_table_2', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- see that the constraint exists SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass; Constraint | Definition ---------------------------+------------------------------------------------------- +--------------------------------------------------------------------- raw_table_2_user_id_fkey | FOREIGN KEY (user_id) REFERENCES raw_table_1(user_id) (1 row) @@ -405,7 +405,7 @@ NOTICE: drop cascades to constraint raw_table_2_user_id_fkey on table raw_table -- see that the constraint also dropped SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass; Constraint | Definition -------------+------------ +--------------------------------------------------------------------- (0 rows) -- drop the table as well diff --git a/src/test/regress/expected/multi_cross_shard.out b/src/test/regress/expected/multi_cross_shard.out index ed2a0e4e7..009fe0851 100644 --- a/src/test/regress/expected/multi_cross_shard.out +++ b/src/test/regress/expected/multi_cross_shard.out @@ -11,7 +11,7 @@ CREATE TABLE multi_task_table ); SELECT create_distributed_table('multi_task_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -21,13 +21,13 @@ INSERT INTO multi_task_table VALUES(3, 'elem_3'); -- Shouldn't log anything when the log level is 'off' SHOW citus.multi_task_query_log_level; citus.multi_task_query_log_level ----------------------------------- +--------------------------------------------------------------------- off (1 row) SELECT * FROM multi_task_table ORDER BY 1; id | name -----+-------- +--------------------------------------------------------------------- 1 | elem_1 2 | elem_2 3 | elem_3 @@ -39,7 +39,7 @@ SELECT * FROM multi_task_table ORDER BY 1; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. id | name -----+-------- +--------------------------------------------------------------------- 1 | elem_1 2 | elem_2 3 | elem_3 @@ -49,7 +49,7 @@ SELECT AVG(id) AS avg_id FROM multi_task_table; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. avg_id --------------------- +--------------------------------------------------------------------- 2.0000000000000000 (1 row) @@ -71,13 +71,13 @@ CREATE TABLE summary_table ); SELECT create_distributed_table('raw_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('summary_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -103,7 +103,7 @@ INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table WHERE id = SET citus.multi_task_query_log_level to DEFAULT; SELECT * FROM summary_table ORDER BY 1,2; id | order_sum -----+----------- +--------------------------------------------------------------------- 1 | 35 1 | 35 2 | 40 @@ -127,7 +127,7 @@ ROLLBACK; SET citus.multi_task_query_log_level to DEFAULT; SELECT * FROM summary_table ORDER BY 1,2; id | order_sum -----+----------- +--------------------------------------------------------------------- 1 | 35 1 | 35 2 | 40 @@ -139,7 +139,7 @@ SET citus.multi_task_query_log_level TO notice; -- Shouldn't log since it is a router select query SELECT * FROM raw_table WHERE ID = 1; id | order_count -----+------------- +--------------------------------------------------------------------- 1 | 15 1 | 20 (2 rows) @@ -158,13 +158,13 @@ CREATE TABLE tt2 ); SELECT create_distributed_table('tt1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('tt2', 'name'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -178,7 +178,7 @@ SELECT tt1.id, tt2.count from tt1,tt2 where tt1.id = tt2.id; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. id | count -----+------- +--------------------------------------------------------------------- 1 | 5 2 | 15 (2 rows) diff --git a/src/test/regress/expected/multi_data_types.out b/src/test/regress/expected/multi_data_types.out index 4f814656f..b63653538 100644 --- a/src/test/regress/expected/multi_data_types.out +++ b/src/test/regress/expected/multi_data_types.out @@ -17,7 +17,7 @@ SELECT run_command_on_coordinator_and_workers($cf$ RETURNS NULL ON NULL INPUT; $cf$); run_command_on_coordinator_and_workers ----------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -29,7 +29,7 @@ SELECT run_command_on_coordinator_and_workers($cf$ RETURNS NULL ON NULL INPUT; $cf$); run_command_on_coordinator_and_workers ----------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -43,7 +43,7 @@ SELECT run_command_on_coordinator_and_workers($co$ ); $co$); run_command_on_coordinator_and_workers ----------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -75,7 +75,7 @@ CREATE TABLE composite_type_partitioned_table SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -87,14 +87,14 @@ INSERT INTO composite_type_partitioned_table VALUES (4, '(7, 8)'::test_composit INSERT INTO composite_type_partitioned_table VALUES (5, '(9, 10)'::test_composite_type); SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_composite_type; id | col -----+------- +--------------------------------------------------------------------- 4 | (7,8) (1 row) UPDATE composite_type_partitioned_table SET id = 6 WHERE col = '(7, 8)'::test_composite_type; SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_composite_type; id | col -----+------- +--------------------------------------------------------------------- 6 | (7,8) (1 row) @@ -106,7 +106,7 @@ CREATE TABLE bugs ( ); SELECT create_distributed_table('bugs', 'status', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -118,7 +118,7 @@ INSERT INTO bugs VALUES (4, 'closed'); INSERT INTO bugs VALUES (5, 'open'); SELECT * FROM bugs WHERE status = 'closed'::bug_status; id | status -----+-------- +--------------------------------------------------------------------- 3 | closed 4 | closed (2 rows) @@ -127,7 +127,7 @@ UPDATE bugs SET status = 'closed'::bug_status WHERE id = 2; ERROR: modifying the partition value of rows is not allowed SELECT * FROM bugs WHERE status = 'open'::bug_status; id | status -----+-------- +--------------------------------------------------------------------- 2 | open 5 | open (2 rows) @@ -140,7 +140,7 @@ CREATE TABLE varchar_hash_partitioned_table ); SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -152,14 +152,14 @@ INSERT INTO varchar_hash_partitioned_table VALUES (4, 'Sumedh'); INSERT INTO varchar_hash_partitioned_table VALUES (5, 'Marco'); SELECT * FROM varchar_hash_partitioned_table WHERE id = 1; id | name -----+------- +--------------------------------------------------------------------- 1 | Jason (1 row) UPDATE varchar_hash_partitioned_table SET id = 6 WHERE name = 'Jason'; SELECT * FROM varchar_hash_partitioned_table WHERE id = 6; id | name -----+------- +--------------------------------------------------------------------- 6 | Jason (1 row) diff --git a/src/test/regress/expected/multi_deparse_function.out b/src/test/regress/expected/multi_deparse_function.out index bc878bcaf..ecbad5946 100644 --- a/src/test/regress/expected/multi_deparse_function.out +++ b/src/test/regress/expected/multi_deparse_function.out @@ -63,7 +63,7 @@ CREATE FUNCTION add(integer, integer) RETURNS integer RETURNS NULL ON NULL INPUT; SELECT create_distributed_function('add(int,int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -73,7 +73,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) CALLED ON NULL INPUT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -86,7 +86,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) STRICT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -97,7 +97,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) STRICT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -108,7 +108,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) IMMUTABLE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -119,7 +119,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) STABLE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -130,7 +130,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) VOLATILE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -141,7 +141,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) LEAKPROOF; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -152,7 +152,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) NOT LEAKPROOF; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -165,7 +165,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY INVOKER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -176,7 +176,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY INVOKER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -187,7 +187,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY DEFINER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -198,7 +198,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY DEFINER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -209,7 +209,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) PARALLEL UNSAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -220,7 +220,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) PARALLEL RESTRICTED; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -231,7 +231,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) PARALLEL SAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -243,7 +243,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) COST 1234.000000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -254,7 +254,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) COST 1234.500000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -265,7 +265,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET log_min_messages = 'error'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -276,7 +276,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET log_min_messages TO DEFAULT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -287,7 +287,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET log_min_messages FROM CURRENT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -298,7 +298,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET TIME ZONE INTERVAL '@ 8 hours ago'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -309,7 +309,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET timezone = '-7'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -320,7 +320,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = 'hello '' world'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -331,7 +331,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = -3.2; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -342,7 +342,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = -32; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -355,7 +355,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = 'hello '' world', 'second '' item'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers ---------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: SET citus.setting;' takes only one argument") (localhost,57638,f,"ERROR: SET citus.setting;' takes only one argument") (2 rows) @@ -366,7 +366,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) RESET log_min_messages; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -377,7 +377,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) RESET ALL; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -389,7 +389,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) RENAME TO summation; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -404,7 +404,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.summation(integer, integer) RENAME TO add; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -416,7 +416,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers('CREATE ROLE function_role'); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -427,7 +427,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) OWNER TO function_role; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -438,7 +438,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) OWNER TO missing_role; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: role ""missing_role"" does not exist") (localhost,57638,f,"ERROR: role ""missing_role"" does not exist") (2 rows) @@ -450,7 +450,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET SCHEMA public; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -463,7 +463,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION public.add(integer, integer) SET SCHEMA function_tests; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -475,7 +475,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) DEPENDS ON EXTENSION citus; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -487,7 +487,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION pg_catalog.get_shard_id_for_distribution_column(table_name regclass, distribution_value "any") PARALLEL SAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -497,7 +497,7 @@ SELECT deparse_test($cmd$ DROP FUNCTION add(int,int); $cmd$); deparse_test ------------------------------------------------------ +--------------------------------------------------------------------- DROP FUNCTION function_tests.add(integer, integer); (1 row) @@ -508,7 +508,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) VOLATILE LEAKPROOF SECURITY DEFINER PARALLEL UNSAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -521,7 +521,7 @@ $cmd$); INFO: Propagating deparsed query: DROP FUNCTION missing_function(pg_catalog.int4,text); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers ---------------------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: function missing_function(integer, text) does not exist") (localhost,57638,f,"ERROR: function missing_function(integer, text) does not exist") (2 rows) @@ -534,7 +534,7 @@ $cmd$); INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_function(pg_catalog.int4,text); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") (2 rows) @@ -545,7 +545,7 @@ $cmd$); INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_schema.missing_function(pg_catalog.int4,pg_catalog.float8); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") (2 rows) @@ -556,7 +556,7 @@ $cmd$); INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_func_without_args; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") (2 rows) @@ -569,7 +569,7 @@ SELECT run_command_on_workers($$ CREATE SCHEMA IF NOT EXISTS "CiTUS.TEEN2"; $$); run_command_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") (2 rows) @@ -583,13 +583,13 @@ CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text) RETURNS TEXT LANGUAGE SQL; SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"()'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -599,7 +599,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2"; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -611,7 +611,7 @@ $cmd$); INFO: Propagating deparsed query: DROP FUNCTION "CiTUS.TEEN2"."TeeNFunCT10N.1!?!"(), "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") (2 rows) @@ -622,7 +622,7 @@ CREATE FUNCTION func_default_param(param INT DEFAULT 0) RETURNS TEXT LANGUAGE SQL; SELECT create_distributed_function('func_default_param(INT)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -632,7 +632,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_default_param(param integer) RENAME TO func_with_default_param; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -643,7 +643,7 @@ CREATE FUNCTION func_out_param(IN param INT, OUT result TEXT) LANGUAGE SQL; SELECT create_distributed_function('func_out_param(INT)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -653,7 +653,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_out_param(param integer, OUT result text) RENAME TO func_in_and_out_param; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -667,7 +667,7 @@ END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('square(NUMERIC)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -677,7 +677,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.square(INOUT a numeric) SET search_path TO DEFAULT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -698,7 +698,7 @@ END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('sum_avg(NUMERIC[])'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -708,7 +708,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.sum_avg(VARIADIC list numeric[], OUT total numeric, OUT average numeric) COST 10000.000000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -720,7 +720,7 @@ CREATE FUNCTION func_custom_param(IN param intpair, OUT total INT) LANGUAGE SQL; SELECT create_distributed_function('func_custom_param(intpair)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -730,7 +730,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_custom_param(param function_tests.intpair, OUT total integer) RENAME TO func_with_custom_param; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -742,7 +742,7 @@ CREATE FUNCTION func_returns_table(IN count INT) LANGUAGE SQL; SELECT create_distributed_function('func_returns_table(INT)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -752,7 +752,7 @@ $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_returns_table(count integer) ROWS 100.000000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") (2 rows) @@ -768,7 +768,7 @@ SELECT run_command_on_workers($$ DROP SCHEMA function_tests CASCADE; $$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") (2 rows) diff --git a/src/test/regress/expected/multi_deparse_procedure.out b/src/test/regress/expected/multi_deparse_procedure.out index 1e370a15e..c7138d557 100644 --- a/src/test/regress/expected/multi_deparse_procedure.out +++ b/src/test/regress/expected/multi_deparse_procedure.out @@ -51,7 +51,7 @@ END; $proc$; SELECT create_distributed_function('raise_info(text)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -59,7 +59,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info CALLED ON NULL INPUT $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -68,7 +68,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RETURNS NULL ON NULL INPUT $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -77,7 +77,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info STRICT $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -86,7 +86,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info IMMUTABLE $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -95,7 +95,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info STABLE $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -104,7 +104,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info VOLATILE $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -113,7 +113,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info LEAKPROOF $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -122,7 +122,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info NOT LEAKPROOF $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -131,7 +131,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info EXTERNAL SECURITY INVOKER $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -140,7 +140,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SECURITY INVOKER $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -149,7 +149,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info EXTERNAL SECURITY DEFINER $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -158,7 +158,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SECURITY DEFINER $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -167,7 +167,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info PARALLEL UNSAFE $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -176,7 +176,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info PARALLEL RESTRICTED $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -185,7 +185,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info PARALLEL SAFE $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -195,7 +195,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info COST 1234 $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -204,7 +204,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info COST 1234.5 $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -213,7 +213,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info ROWS 10 $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -222,7 +222,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info ROWS 10.8 $cmd$); deparse_and_run_on_workers -------------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") (2 rows) @@ -231,7 +231,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SECURITY INVOKER SET client_min_messages TO warning; $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -240,7 +240,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET log_min_messages = ERROR $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -249,7 +249,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET log_min_messages TO DEFAULT $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -258,7 +258,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET log_min_messages FROM CURRENT $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -267,7 +267,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RESET log_min_messages $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -276,7 +276,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RESET ALL $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -286,7 +286,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RENAME TO summation; $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -296,7 +296,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE summation RENAME TO raise_info; $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -307,7 +307,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE procedure_role;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -316,7 +316,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info OWNER TO procedure_role $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -325,7 +325,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info OWNER TO missing_role $cmd$); deparse_and_run_on_workers --------------------------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,f,"ERROR: role ""missing_role"" does not exist") (localhost,57638,f,"ERROR: role ""missing_role"" does not exist") (2 rows) @@ -335,7 +335,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET SCHEMA public; $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -345,7 +345,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE public.raise_info SET SCHEMA procedure_tests; $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -355,7 +355,7 @@ SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info DEPENDS ON EXTENSION citus $cmd$); deparse_and_run_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") (2 rows) @@ -364,7 +364,7 @@ SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE raise_info(text); $cmd$); deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") (2 rows) @@ -374,7 +374,7 @@ SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE IF EXISTS missing_PROCEDURE(int, text); $cmd$); deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") (2 rows) @@ -383,7 +383,7 @@ SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE IF EXISTS missing_schema.missing_PROCEDURE(int,float); $cmd$); deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") (2 rows) @@ -392,7 +392,7 @@ SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE IF EXISTS missing_schema.missing_PROCEDURE(int,float) CASCADE; $cmd$); deparse_and_run_on_workers --------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") (2 rows) @@ -403,7 +403,7 @@ DROP SCHEMA procedure_tests CASCADE; DROP ROLE procedure_role; SELECT run_command_on_workers($$DROP ROLE procedure_role;$$); run_command_on_workers ---------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") (2 rows) diff --git a/src/test/regress/expected/multi_deparse_shard_query.out b/src/test/regress/expected/multi_deparse_shard_query.out index 7f2369914..6816d9e04 100644 --- a/src/test/regress/expected/multi_deparse_shard_query.out +++ b/src/test/regress/expected/multi_deparse_shard_query.out @@ -21,7 +21,7 @@ CREATE TABLE raw_events_1 ); SELECT create_distributed_table('raw_events_1', 'tenant_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -39,7 +39,7 @@ CREATE TABLE raw_events_2 ); SELECT create_distributed_table('raw_events_2', 'tenant_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -54,7 +54,7 @@ CREATE TABLE aggregated_events rollup_hour date); SELECT create_distributed_table('aggregated_events', 'tenant_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -65,7 +65,7 @@ SELECT * FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM public.raw_events_1 deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -78,7 +78,7 @@ FROM '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -92,7 +92,7 @@ FROM '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -106,7 +106,7 @@ FROM '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2 deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -122,7 +122,7 @@ GROUP BY '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone)) deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -139,7 +139,7 @@ WHERE '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -155,7 +155,7 @@ WHERE '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1.event_at deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -175,7 +175,7 @@ ORDER BY '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM public.raw_events_1 r1, public.raw_events_2 r2, public.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -192,7 +192,7 @@ GROUP BY '); INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM public.raw_events_1 GROUP BY event_at, tenant_id deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -208,7 +208,7 @@ GROUP BY '); INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY event_at, tenant_id deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -227,7 +227,7 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level OPERATOR(pg_catalog.<=) 2) deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -240,7 +240,7 @@ FROM '); INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM public.raw_events_1 deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -253,7 +253,7 @@ SELECT value_3, value_2, tenant_id '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM public.raw_events_1 WHERE (((value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (tenant_id OPERATOR(pg_catalog.=) 1) AND ((value_6 OPERATOR(pg_catalog.<) 3000) OR (value_3 OPERATOR(pg_catalog.>) (8000)::double precision))) deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -265,7 +265,7 @@ SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -278,7 +278,7 @@ SELECT random(), int4eq(1, max(value_1))::int, value_6 '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) GROUP BY event_at, value_7, value_6 deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -299,7 +299,7 @@ SELECT '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 OPERATOR(pg_catalog.>) 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM public.raw_events_1 deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -316,7 +316,7 @@ FROM '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -337,7 +337,7 @@ GROUP BY '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone)) deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -354,7 +354,7 @@ FROM '); INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -371,7 +371,7 @@ FROM '); INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -387,7 +387,7 @@ ORDER BY '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 ORDER BY value_2, value_1 deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) @@ -402,7 +402,7 @@ FROM '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 deparse_shard_query_test --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_distributed_transaction_id.out b/src/test/regress/expected/multi_distributed_transaction_id.out index d2c3148b8..1e58c6a65 100644 --- a/src/test/regress/expected/multi_distributed_transaction_id.out +++ b/src/test/regress/expected/multi_distributed_transaction_id.out @@ -12,7 +12,7 @@ SET TIME ZONE 'PST8PDT'; -- should return uninitialized values if not in a transaction SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp ----------------------------+--------------------+------------------- +--------------------------------------------------------------------- 0 | 0 | (1 row) @@ -20,21 +20,21 @@ BEGIN; -- we should still see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- +--------------------------------------------------------------------- 0 | 0 | | t (1 row) -- now assign a value SELECT assign_distributed_transaction_id(50, 50, '2016-01-01 00:00:00+0'); assign_distributed_transaction_id ------------------------------------ +--------------------------------------------------------------------- (1 row) -- see the assigned value SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+------------------------------+---------- +--------------------------------------------------------------------- 50 | 50 | Thu Dec 31 16:00:00 2015 PST | t (1 row) @@ -45,7 +45,7 @@ ROLLBACK; -- since the transaction finished, we should see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- +--------------------------------------------------------------------- 0 | 0 | | t (1 row) @@ -54,14 +54,14 @@ BEGIN; -- we should still see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- +--------------------------------------------------------------------- 0 | 0 | | t (1 row) -- now assign a value SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id ------------------------------------ +--------------------------------------------------------------------- (1 row) @@ -71,7 +71,7 @@ COMMIT; -- since the transaction errored, we should see the uninitialized values again SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- +--------------------------------------------------------------------- 0 | 0 | | t (1 row) @@ -79,20 +79,20 @@ COMMIT; BEGIN; SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id ------------------------------------ +--------------------------------------------------------------------- (1 row) SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+------------------------------+---------- +--------------------------------------------------------------------- 52 | 52 | Wed Dec 31 16:00:00 2014 PST | t (1 row) \c - - - :master_port SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- +--------------------------------------------------------------------- 0 | 0 | | t (1 row) @@ -100,13 +100,13 @@ BEGIN; BEGIN; SELECT assign_distributed_transaction_id(120, 120, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id ------------------------------------ +--------------------------------------------------------------------- (1 row) SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+------------------------------+---------- +--------------------------------------------------------------------- 120 | 120 | Wed Dec 31 16:00:00 2014 PST | t (1 row) @@ -114,7 +114,7 @@ BEGIN; -- after the prepare we should see that transaction id is cleared SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ----------------------------+--------------------+-------------------+---------- +--------------------------------------------------------------------- 0 | 0 | | t (1 row) @@ -131,7 +131,7 @@ $$ LANGUAGE sql; BEGIN; SELECT assign_distributed_transaction_id(50, 1234567, '2016-01-01 00:00:00+0'); assign_distributed_transaction_id ------------------------------------ +--------------------------------------------------------------------- (1 row) @@ -145,7 +145,7 @@ SET LOCAL parallel_tuple_cost TO 0; EXPLAIN (COSTS OFF) SELECT a FROM parallel_id_test WHERE a = parallel_worker_transaction_id_test(); QUERY PLAN -------------------------------------------------------------- +--------------------------------------------------------------------- Gather Workers Planned: 1 -> Parallel Seq Scan on parallel_id_test @@ -154,7 +154,7 @@ SELECT a FROM parallel_id_test WHERE a = parallel_worker_transaction_id_test(); SELECT a FROM parallel_id_test WHERE a = parallel_worker_transaction_id_test(); a ---------- +--------------------------------------------------------------------- 1234567 1234567 (2 rows) diff --git a/src/test/regress/expected/multi_distribution_metadata.out b/src/test/regress/expected/multi_distribution_metadata.out index b1c49a78c..f375fa071 100644 --- a/src/test/regress/expected/multi_distribution_metadata.out +++ b/src/test/regress/expected/multi_distribution_metadata.out @@ -52,7 +52,7 @@ CREATE TABLE events_hash ( ); SELECT create_distributed_table('events_hash', 'name', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -62,14 +62,14 @@ UPDATE pg_dist_placement SET shardstate = 0 WHERE shardid BETWEEN 540000 AND 540 -- should see above shard identifiers SELECT load_shard_id_array('events_hash'); load_shard_id_array -------------------------------- +--------------------------------------------------------------------- {540000,540001,540002,540003} (1 row) -- should see array with first shard range SELECT load_shard_interval_array(540000, 0); load_shard_interval_array ---------------------------- +--------------------------------------------------------------------- {-2147483648,-1073741825} (1 row) @@ -81,14 +81,14 @@ CREATE TABLE events_range ( ); SELECT master_create_distributed_table('events_range', 'name', 'range'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) -- create empty shard SELECT master_create_empty_shard('events_range'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 540004 (1 row) @@ -98,7 +98,7 @@ UPDATE pg_dist_shard SET WHERE shardid = 540004; SELECT load_shard_interval_array(540004, ''::text); load_shard_interval_array ---------------------------- +--------------------------------------------------------------------- {Aardvark,Zebra} (1 row) @@ -108,35 +108,35 @@ ERROR: could not find valid entry for shard xxxxx -- should see two placements SELECT load_shard_placement_array(540001, false); load_shard_placement_array ------------------------------------ +--------------------------------------------------------------------- {localhost:xxxxx,localhost:xxxxx} (1 row) -- only one of which is finalized SELECT load_shard_placement_array(540001, true); load_shard_placement_array ----------------------------- +--------------------------------------------------------------------- {localhost:xxxxx} (1 row) -- should see error for non-existent shard SELECT load_shard_placement_array(540001, false); load_shard_placement_array ------------------------------------ +--------------------------------------------------------------------- {localhost:xxxxx,localhost:xxxxx} (1 row) -- should see column id of 'name' SELECT partition_column_id('events_hash'); partition_column_id ---------------------- +--------------------------------------------------------------------- 2 (1 row) -- should see hash partition type and fail for non-distributed tables SELECT partition_type('events_hash'); partition_type ----------------- +--------------------------------------------------------------------- h (1 row) @@ -145,26 +145,26 @@ ERROR: relation pg_type is not distributed -- should see true for events_hash, false for others SELECT is_distributed_table('events_hash'); is_distributed_table ----------------------- +--------------------------------------------------------------------- t (1 row) SELECT is_distributed_table('pg_type'); is_distributed_table ----------------------- +--------------------------------------------------------------------- f (1 row) SELECT is_distributed_table('pg_dist_shard'); is_distributed_table ----------------------- +--------------------------------------------------------------------- f (1 row) -- test underlying column name-id translation SELECT column_name_to_column_id('events_hash', 'name'); column_name_to_column_id --------------------------- +--------------------------------------------------------------------- 2 (1 row) @@ -182,7 +182,7 @@ DELETE FROM pg_dist_shard -- verify that an eager load shows them missing SELECT load_shard_id_array('events_hash'); load_shard_id_array ---------------------- +--------------------------------------------------------------------- {} (1 row) @@ -199,7 +199,7 @@ VALUES SELECT partmethod, column_to_column_name(logicalrelid, partkey) FROM pg_dist_partition WHERE logicalrelid = 'customers'::regclass; partmethod | column_to_column_name -------------+----------------------- +--------------------------------------------------------------------- h | id (1 row) @@ -210,7 +210,7 @@ SELECT column_to_column_name('customers',''); ERROR: not a valid column SELECT column_to_column_name('pg_dist_node'::regclass, NULL); column_to_column_name ------------------------ +--------------------------------------------------------------------- (1 row) @@ -227,7 +227,7 @@ SELECT create_monolithic_shard_row('customers') AS new_shard_id SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; shardstorage | shardminvalue | shardmaxvalue ---------------+---------------+--------------- +--------------------------------------------------------------------- t | -2147483648 | 2147483647 (1 row) @@ -237,13 +237,13 @@ BEGIN; -- pick up a shard lock and look for it in pg_locks SELECT acquire_shared_shard_lock(5); acquire_shared_shard_lock ---------------------------- +--------------------------------------------------------------------- (1 row) SELECT objid, mode FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; objid | mode --------+----------- +--------------------------------------------------------------------- 5 | ShareLock (1 row) @@ -252,7 +252,7 @@ COMMIT; -- lock should be gone now SELECT COUNT(*) FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -261,26 +261,26 @@ SET citus.shard_count TO 4; CREATE TABLE get_shardid_test_table1(column1 int, column2 int); SELECT create_distributed_table('get_shardid_test_table1', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) \COPY get_shardid_test_table1 FROM STDIN with delimiter '|'; SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 1); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540006 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 2); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540009 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 3); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540007 (1 row) @@ -288,19 +288,19 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 3); \c - - - :worker_1_port SELECT * FROM get_shardid_test_table1_540006; column1 | column2 ----------+--------- +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM get_shardid_test_table1_540009; column1 | column2 ----------+--------- +--------------------------------------------------------------------- 2 | 2 (1 row) SELECT * FROM get_shardid_test_table1_540007; column1 | column2 ----------+--------- +--------------------------------------------------------------------- 3 | 3 (1 row) @@ -308,7 +308,7 @@ SELECT * FROM get_shardid_test_table1_540007; -- test non-existing value SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 4); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540007 (1 row) @@ -317,20 +317,20 @@ SET citus.shard_count TO 4; CREATE TABLE get_shardid_test_table2(column1 text[], column2 int); SELECT create_distributed_table('get_shardid_test_table2', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) \COPY get_shardid_test_table2 FROM STDIN with delimiter '|'; SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{a, b, c}'); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540013 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{d, e, f}'); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540011 (1 row) @@ -338,13 +338,13 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{d, e, f \c - - - :worker_1_port SELECT * FROM get_shardid_test_table2_540013; column1 | column2 ----------+--------- +--------------------------------------------------------------------- {a,b,c} | 1 (1 row) SELECT * FROM get_shardid_test_table2_540011; column1 | column2 ----------+--------- +--------------------------------------------------------------------- {d,e,f} | 2 (1 row) @@ -365,7 +365,7 @@ ERROR: relation is not distributed -- test append distributed table SELECT create_distributed_table('get_shardid_test_table3', 'column1', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -375,39 +375,39 @@ ERROR: finding shard id of given distribution value is only supported for hash CREATE TABLE get_shardid_test_table4(column1 int, column2 int); SELECT create_reference_table('get_shardid_test_table4'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) -- test NULL distribution column value for reference table SELECT get_shard_id_for_distribution_column('get_shardid_test_table4'); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', NULL); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540014 (1 row) -- test different data types for reference table SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', 1); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', 'a'); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', '{a, b, c}'); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540014 (1 row) @@ -415,32 +415,32 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', '{a, b, c CREATE TABLE get_shardid_test_table5(column1 int, column2 int); SELECT create_distributed_table('get_shardid_test_table5', 'column1', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- create worker shards SELECT master_create_empty_shard('get_shardid_test_table5'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 540015 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 540016 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 540017 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 540018 (1 row) @@ -451,38 +451,38 @@ UPDATE pg_dist_shard SET shardminvalue = 2001, shardmaxvalue = 3000 WHERE shardi UPDATE pg_dist_shard SET shardminvalue = 3001, shardmaxvalue = 4000 WHERE shardid = 540018; SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 5); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540015 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 1111); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540016 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 2689); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540017 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 3248); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 540018 (1 row) -- test non-existing value for range distributed tables SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 4001); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', -999); get_shard_id_for_distribution_column --------------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -490,14 +490,14 @@ SET citus.shard_count TO 2; CREATE TABLE events_table_count (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint); SELECT create_distributed_table('events_table_count', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE users_table_count (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint); SELECT create_distributed_table('users_table_count', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -573,7 +573,7 @@ GROUP BY ORDER BY types;$$); relation_count_in_query -------------------------- +--------------------------------------------------------------------- 6 (1 row) diff --git a/src/test/regress/expected/multi_drop_extension.out b/src/test/regress/expected/multi_drop_extension.out index 384536c2e..6dc707a22 100644 --- a/src/test/regress/expected/multi_drop_extension.out +++ b/src/test/regress/expected/multi_drop_extension.out @@ -6,7 +6,7 @@ SET citus.next_shard_id TO 550000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -21,13 +21,13 @@ CREATE EXTENSION citus; -- re-add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -35,19 +35,19 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_create_empty_shard('testtableddl'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM testtableddl; somecol | distributecol ----------+--------------- +--------------------------------------------------------------------- (0 rows) DROP TABLE testtableddl; diff --git a/src/test/regress/expected/multi_dropped_column_aliases.out b/src/test/regress/expected/multi_dropped_column_aliases.out index 3744d71db..930c8e3a6 100644 --- a/src/test/regress/expected/multi_dropped_column_aliases.out +++ b/src/test/regress/expected/multi_dropped_column_aliases.out @@ -3,13 +3,13 @@ SET citus.next_shard_id TO 620000; SELECT count(*) FROM customer; count -------- +--------------------------------------------------------------------- 1000 (1 row) SELECT * FROM customer LIMIT 2; c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment ------------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+----------------------------------------------------------------- +--------------------------------------------------------------------- 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref (2 rows) @@ -18,13 +18,13 @@ ALTER TABLE customer ADD COLUMN new_column1 INTEGER; ALTER TABLE customer ADD COLUMN new_column2 INTEGER; SELECT count(*) FROM customer; count -------- +--------------------------------------------------------------------- 1000 (1 row) SELECT * FROM customer LIMIT 2; c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | new_column1 | new_column2 ------------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+-----------------------------------------------------------------+-------------+------------- +--------------------------------------------------------------------- 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e | | 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref | | (2 rows) @@ -33,13 +33,13 @@ ALTER TABLE customer DROP COLUMN new_column1; ALTER TABLE customer DROP COLUMN new_column2; SELECT count(*) FROM customer; count -------- +--------------------------------------------------------------------- 1000 (1 row) SELECT * FROM customer LIMIT 2; c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment ------------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+----------------------------------------------------------------- +--------------------------------------------------------------------- 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref (2 rows) @@ -47,7 +47,7 @@ SELECT * FROM customer LIMIT 2; -- Verify joins work with dropped columns. SELECT count(*) FROM customer, orders WHERE c_custkey = o_custkey; count -------- +--------------------------------------------------------------------- 1956 (1 row) diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 7a5cc7137..bd50bd04e 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -32,7 +32,7 @@ SELECT datname, usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); datname | ?column? | ?column? -------------+----------+---------- +--------------------------------------------------------------------- regression | t | t (1 row) @@ -46,7 +46,7 @@ WHERE pgd.refclassid = 'pg_extension'::regclass AND pge.extname = 'citus' AND pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test'); count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -115,7 +115,7 @@ ALTER EXTENSION citus UPDATE TO '9.2-1'; -- show running version SHOW citus.version; citus.version ---------------- +--------------------------------------------------------------------- 9.2devel (1 row) @@ -129,7 +129,7 @@ WHERE pgd.refclassid = 'pg_extension'::regclass AND pge.extname = 'citus' AND pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test'); count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -153,7 +153,7 @@ INSERT INTO version_mismatch_table(column1) VALUES(5); -- Test SELECT SELECT * FROM version_mismatch_table ORDER BY column1; column1 ---------- +--------------------------------------------------------------------- 0 1 2 @@ -169,7 +169,7 @@ SELECT d.datname as "Name", FROM pg_catalog.pg_database d ORDER BY 1; Name | Owner | Access privileges -------------+----------+----------------------- +--------------------------------------------------------------------- postgres | postgres | regression | postgres | template0 | postgres | =c/postgres + @@ -217,7 +217,7 @@ ALTER EXTENSION citus UPDATE; \d List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -231,7 +231,7 @@ SELECT datname, usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); datname | ?column? | ?column? -------------+----------+---------- +--------------------------------------------------------------------- regression | t | t (1 row) @@ -272,7 +272,7 @@ SELECT datname, usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); datname | ?column? | ?column? ----------+----------+---------- +--------------------------------------------------------------------- another | t | t (1 row) @@ -309,7 +309,7 @@ SELECT FROM test_deamon.maintenance_deamon_died('another'); maintenance_deamon_died -------------------------- +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_follower_configure_followers.out b/src/test/regress/expected/multi_follower_configure_followers.out index 27d157ef2..e5ca57c0e 100644 --- a/src/test/regress/expected/multi_follower_configure_followers.out +++ b/src/test/regress/expected/multi_follower_configure_followers.out @@ -4,7 +4,7 @@ ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -14,7 +14,7 @@ ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -23,7 +23,7 @@ ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_follower_dml.out b/src/test/regress/expected/multi_follower_dml.out index 6fdb91e71..bab93291a 100644 --- a/src/test/regress/expected/multi_follower_dml.out +++ b/src/test/regress/expected/multi_follower_dml.out @@ -2,7 +2,7 @@ CREATE TABLE the_table (a int, b int, z bigserial); SELECT create_distributed_table('the_table', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -17,21 +17,21 @@ SET citus.writable_standby_coordinator TO on; INSERT INTO the_table (a, b, z) VALUES (1, 2, 2); SELECT * FROM the_table; a | b | z ----+---+--- +--------------------------------------------------------------------- 1 | 2 | 2 (1 row) UPDATE the_table SET z = 3 WHERE a = 1; SELECT * FROM the_table; a | b | z ----+---+--- +--------------------------------------------------------------------- 1 | 2 | 3 (1 row) DELETE FROM the_table WHERE a = 1; SELECT * FROM the_table; a | b | z ----+---+--- +--------------------------------------------------------------------- (0 rows) -- drawing from a sequence is not possible @@ -48,7 +48,7 @@ SET citus.multi_shard_commit_protocol TO '1pc'; INSERT INTO the_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7); SELECT * FROM the_table ORDER BY a; a | b | z ----+---+--- +--------------------------------------------------------------------- 2 | 3 | 4 5 | 6 | 7 (2 rows) @@ -57,7 +57,7 @@ SELECT * FROM the_table ORDER BY a; WITH del AS (DELETE FROM the_table RETURNING *) SELECT * FROM del ORDER BY a; a | b | z ----+---+--- +--------------------------------------------------------------------- 2 | 3 | 4 5 | 6 | 7 (2 rows) @@ -66,7 +66,7 @@ SELECT * FROM del ORDER BY a; COPY the_table (a, b, z) FROM STDIN WITH CSV; SELECT * FROM the_table ORDER BY a; a | b | z -----+----+---- +--------------------------------------------------------------------- 10 | 10 | 10 11 | 11 | 11 (2 rows) @@ -84,7 +84,7 @@ INSERT INTO the_table (a, b, z) VALUES (1, 2, 2); ROLLBACK; SELECT * FROM the_table ORDER BY a; a | b | z ----+---+--- +--------------------------------------------------------------------- (0 rows) -- we should still disallow writes to local tables @@ -104,7 +104,7 @@ ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' SELECT * FROM the_table ORDER BY a; a | b | z ----+---+--- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port diff --git a/src/test/regress/expected/multi_follower_select_statements.out b/src/test/regress/expected/multi_follower_select_statements.out index b9eeb6777..b4f87de0a 100644 --- a/src/test/regress/expected/multi_follower_select_statements.out +++ b/src/test/regress/expected/multi_follower_select_statements.out @@ -2,20 +2,20 @@ -- do some setup SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) CREATE TABLE the_table (a int, b int); SELECT create_distributed_table('the_table', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -28,7 +28,7 @@ CREATE TABLE stock ( ); SELECT create_distributed_table('stock','s_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -38,7 +38,7 @@ INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; \c - - - :follower_master_port SELECT * FROM the_table; a | b ----+--- +--------------------------------------------------------------------- 1 | 1 1 | 2 (2 rows) @@ -49,7 +49,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -67,7 +67,7 @@ SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_1_port), noderole => 'secondary'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -75,7 +75,7 @@ SELECT 1 FROM master_add_node('localhost', :follower_worker_2_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port), noderole => 'secondary'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -83,7 +83,7 @@ SELECT 1 FROM master_add_node('localhost', :follower_worker_2_port, -- now that we've added secondaries this should work SELECT * FROM the_table; a | b ----+--- +--------------------------------------------------------------------- 1 | 1 1 | 2 (2 rows) @@ -94,7 +94,7 @@ group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; s_i_id | ordercount ---------+------------ +--------------------------------------------------------------------- 3 | 3 4 | 4 5 | 5 @@ -107,7 +107,7 @@ FROM ORDER BY node_name, node_port; node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 9071 localhost | 9072 (2 rows) @@ -131,7 +131,7 @@ UPDATE pg_dist_node SET nodecluster = 'second-cluster' WHERE noderole = 'seconda \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" SELECT * FROM the_table; a | b ----+--- +--------------------------------------------------------------------- 1 | 1 1 | 2 (2 rows) diff --git a/src/test/regress/expected/multi_follower_task_tracker.out b/src/test/regress/expected/multi_follower_task_tracker.out index 8b09eb664..56b479e51 100644 --- a/src/test/regress/expected/multi_follower_task_tracker.out +++ b/src/test/regress/expected/multi_follower_task_tracker.out @@ -3,7 +3,7 @@ CREATE TABLE tab(a int, b int); SELECT create_distributed_table('tab', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -13,7 +13,7 @@ INSERT INTO tab (a, b) VALUES (1, 2); RESET citus.task_executor_type; SELECT * FROM tab; a | b ----+--- +--------------------------------------------------------------------- 1 | 1 1 | 2 (2 rows) diff --git a/src/test/regress/expected/multi_foreign_key.out b/src/test/regress/expected/multi_foreign_key.out index d388bc902..158f48faa 100644 --- a/src/test/regress/expected/multi_foreign_key.out +++ b/src/test/regress/expected/multi_foreign_key.out @@ -8,7 +8,7 @@ SET citus.shard_count TO 32; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -48,7 +48,7 @@ DROP TABLE self_referencing_table; CREATE TABLE self_referencing_table(id int, ref_id int, PRIMARY KEY (id, ref_id)); SELECT create_distributed_table('self_referencing_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -92,7 +92,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -105,7 +105,7 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -120,13 +120,13 @@ CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -155,7 +155,7 @@ TRUNCATE referenced_table CASCADE; NOTICE: truncate cascades to table "referencing_table" SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- (0 rows) -- drop table for next tests @@ -167,13 +167,13 @@ CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE CASCADE); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -183,12 +183,12 @@ INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; id | test_column -----+------------- +--------------------------------------------------------------------- (0 rows) -- multi shard cascading delete @@ -197,7 +197,7 @@ INSERT INTO referencing_table VALUES(2, 2); DELETE FROM referenced_table; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- (0 rows) -- multi shard cascading delete with alter table @@ -214,13 +214,13 @@ CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -236,12 +236,12 @@ DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; id | test_column -----+------------- +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -251,13 +251,13 @@ CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE RESTRICT); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -273,13 +273,13 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM referenced_table; id | test_column -----+------------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -290,13 +290,13 @@ CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -312,13 +312,13 @@ UPDATE referencing_table SET id = 10 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- 10 | 1 (1 row) SELECT * FROM referenced_table; id | test_column -----+------------- +--------------------------------------------------------------------- 1 | 10 (1 row) @@ -329,13 +329,13 @@ CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE RESTRICT); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -351,13 +351,13 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM referenced_table; id | test_column -----+------------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -368,20 +368,20 @@ CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH SIMPLE); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- | 2 (1 row) @@ -393,13 +393,13 @@ CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH FULL); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -409,7 +409,7 @@ DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. CONTEXT: while executing command on localhost:xxxxx SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; @@ -420,14 +420,14 @@ SET citus.shard_count TO 4; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -473,14 +473,14 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash', colocate_with => 'none'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -494,13 +494,13 @@ CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, te CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -555,12 +555,12 @@ INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; id | test_column -----+------------- +--------------------------------------------------------------------- (0 rows) ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; @@ -578,12 +578,12 @@ DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; id | test_column -----+------------- +--------------------------------------------------------------------- (0 rows) ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; @@ -601,13 +601,13 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM referenced_table; id | test_column -----+------------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -624,13 +624,13 @@ UPDATE referencing_table SET id = 10 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- 10 | 1 (1 row) SELECT * FROM referenced_table; id | test_column -----+------------- +--------------------------------------------------------------------- 1 | 10 (1 row) @@ -647,13 +647,13 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- 10 | 1 (1 row) SELECT * FROM referenced_table; id | test_column -----+------------- +--------------------------------------------------------------------- 1 | 10 (1 row) @@ -663,7 +663,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table ORDER BY 1,2; id | ref_id -----+-------- +--------------------------------------------------------------------- 10 | 1 | 2 (2 rows) @@ -678,7 +678,7 @@ DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. CONTEXT: while executing command on localhost:xxxxx SELECT * FROM referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- 10 | 1 (1 row) @@ -691,13 +691,13 @@ CREATE TABLE cyclic_reference_table1(id int, table2_id int, PRIMARY KEY(id, tabl CREATE TABLE cyclic_reference_table2(id int, table1_id int, PRIMARY KEY(id, table1_id)); SELECT create_distributed_table('cyclic_reference_table1', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('cyclic_reference_table2', 'table1_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -716,13 +716,13 @@ COMMIT; -- verify that rows are actually inserted SELECT * FROM cyclic_reference_table1; id | table2_id -----+----------- +--------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM cyclic_reference_table2; id | table1_id -----+----------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -756,13 +756,13 @@ INSERT INTO transaction_referencing_table VALUES(1, 1); -- verify that rows are actually inserted SELECT * FROM transaction_referenced_table; id ----- +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM transaction_referencing_table; id | ref_id -----+-------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -779,7 +779,7 @@ CREATE TABLE self_referencing_table1( ); SELECT create_distributed_table('self_referencing_table1', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -793,7 +793,7 @@ CONTEXT: while executing command on localhost:xxxxx -- verify that rows are actually inserted SELECT * FROM self_referencing_table1; id | other_column | other_column_ref -----+--------------+------------------ +--------------------------------------------------------------------- 1 | 1 | 1 (1 row) @@ -803,7 +803,7 @@ DROP TABLE self_referencing_table1; CREATE TABLE self_referencing_table2(id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column)); SELECT create_distributed_table('self_referencing_table2', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -818,7 +818,7 @@ CONTEXT: while executing command on localhost:xxxxx -- verify that rows are actually inserted SELECT * FROM self_referencing_table2; id | other_column | other_column_ref -----+--------------+------------------ +--------------------------------------------------------------------- 1 | 1 | 1 (1 row) @@ -829,7 +829,7 @@ DROP TABLE self_referencing_table2; CREATE TABLE referenced_by_reference_table(id int PRIMARY KEY, other_column int); SELECT create_distributed_table('referenced_by_reference_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -842,14 +842,14 @@ DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE reference_table_second(id int, referencing_column int REFERENCES reference_table(id)); SELECT create_reference_table('reference_table_second'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -871,7 +871,7 @@ CREATE TABLE self_referencing_reference_table( ); SELECT create_reference_table('self_referencing_reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -880,7 +880,7 @@ DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -891,7 +891,7 @@ DETAIL: A reference table can only have reference keys to other reference table CREATE TABLE references_to_reference_table(id int, referencing_column int); SELECT create_distributed_table('references_to_reference_table', 'referencing_column'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -901,7 +901,7 @@ DROP TABLE reference_table_second; CREATE TABLE reference_table_second(id int, referencing_column int); SELECT create_reference_table('reference_table_second'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -914,7 +914,7 @@ drop cascades to constraint fk on table reference_table_second CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -931,7 +931,7 @@ CREATE TABLE self_referencing_reference_table( ); SELECT create_reference_table('self_referencing_reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_foreign_key_relation_graph.out b/src/test/regress/expected/multi_foreign_key_relation_graph.out index fa7b48e3c..1dd36db04 100644 --- a/src/test/regress/expected/multi_foreign_key_relation_graph.out +++ b/src/test/regress/expected/multi_foreign_key_relation_graph.out @@ -14,81 +14,81 @@ CREATE FUNCTION get_referenced_relation_id_list(Oid) CREATE TABLE dtt1(id int PRIMARY KEY); SELECT create_distributed_table('dtt1','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE dtt2(id int PRIMARY KEY REFERENCES dtt1(id)); SELECT create_distributed_table('dtt2','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE dtt3(id int PRIMARY KEY REFERENCES dtt2(id)); SELECT create_distributed_table('dtt3','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt1'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt2'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- dtt1 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt3'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- dtt1 dtt2 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt1'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- dtt2 dtt3 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt2'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- dtt3 (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt3'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- (0 rows) CREATE TABLE dtt4(id int PRIMARY KEY); SELECT create_distributed_table('dtt4', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt4'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt4'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- (0 rows) ALTER TABLE dtt4 ADD CONSTRAINT dtt4_fkey FOREIGN KEY (id) REFERENCES dtt3(id); SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt4'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- dtt1 dtt2 dtt3 @@ -96,30 +96,30 @@ SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt4'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt1'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt2'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- dtt1 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt3'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- dtt1 dtt2 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt1'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- dtt2 dtt3 dtt4 @@ -127,38 +127,38 @@ SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_ SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt2'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- dtt3 dtt4 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt3'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- dtt4 (1 row) ALTER TABLE dtt4 DROP CONSTRAINT dtt4_fkey; SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt3'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- dtt1 dtt2 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt3'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt4'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt4'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- (0 rows) -- some tests within transction blocks to make sure that @@ -170,31 +170,31 @@ CREATE TABLE test_4 (id int UNIQUE); CREATE TABLE test_5 (id int UNIQUE); SELECT create_distributed_Table('test_1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_Table('test_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_Table('test_3', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_Table('test_4', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_Table('test_5', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -219,7 +219,7 @@ BEGIN; ALTER TABLE test_2 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_1(id); SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} 3 | test_3 | | @@ -230,7 +230,7 @@ BEGIN; ALTER TABLE test_3 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_2(id); SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2,test_3} | 2 | test_2 | {test_3} | {test_1} 3 | test_3 | | {test_2,test_1} @@ -241,7 +241,7 @@ BEGIN; ALTER TABLE test_4 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_3(id); SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+------------------------+------------------------ +--------------------------------------------------------------------- 1 | test_1 | {test_2,test_3,test_4} | 2 | test_2 | {test_3,test_4} | {test_1} 3 | test_3 | {test_4} | {test_2,test_1} @@ -252,7 +252,7 @@ BEGIN; ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-------------------------------+------------------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} @@ -266,7 +266,7 @@ BEGIN; ALTER TABLE test_2 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_1(id); SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} 3 | test_3 | | @@ -277,7 +277,7 @@ BEGIN; ALTER TABLE test_4 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_3(id); SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} 3 | test_3 | {test_4} | @@ -288,7 +288,7 @@ BEGIN; ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} 3 | test_3 | {test_4,test_5} | @@ -299,7 +299,7 @@ BEGIN; ALTER TABLE test_3 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_2(id); SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-------------------------------+------------------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} @@ -316,7 +316,7 @@ BEGIN; ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-------------------------------+------------------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} @@ -327,7 +327,7 @@ BEGIN; ALTER TABLE test_3 DROP CONSTRAINT fkey_1; SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} 3 | test_3 | {test_4,test_5} | @@ -342,20 +342,20 @@ BEGIN; CREATE TABLE test_1 (id int UNIQUE); SELECT create_distributed_Table('test_1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_2 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_1(id)); SELECT create_distributed_Table('test_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} (2 rows) @@ -363,13 +363,13 @@ BEGIN; CREATE TABLE test_3 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_2(id)); SELECT create_distributed_Table('test_3', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2,test_3} | 2 | test_2 | {test_3} | {test_1} 3 | test_3 | | {test_2,test_1} @@ -378,13 +378,13 @@ BEGIN; CREATE TABLE test_4 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_3(id)); SELECT create_distributed_Table('test_4', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+------------------------+------------------------ +--------------------------------------------------------------------- 1 | test_1 | {test_2,test_3,test_4} | 2 | test_2 | {test_3,test_4} | {test_1} 3 | test_3 | {test_4} | {test_2,test_1} @@ -394,13 +394,13 @@ BEGIN; CREATE TABLE test_5 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_4(id)); SELECT create_distributed_Table('test_5', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-------------------------------+------------------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} @@ -418,7 +418,7 @@ BEGIN; ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-------------------------------+------------------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} @@ -432,7 +432,7 @@ DETAIL: drop cascades to constraint test_4_id_fkey on table test_4 drop cascades to constraint fkey_1 on table test_4 SELECT * FROM referential_integrity_summary; n | table_name | referencing_relations | referenced_relations ----+------------+-----------------------+---------------------- +--------------------------------------------------------------------- 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} (2 rows) @@ -446,56 +446,56 @@ BEGIN; CREATE TABLE fkey_intermediate_schema_1.test_6(id int PRIMARY KEY); SELECT create_distributed_table('fkey_intermediate_schema_1.test_6', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE fkey_intermediate_schema_2.test_7(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_1.test_6(id)); SELECT create_distributed_table('fkey_intermediate_schema_2.test_7','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE fkey_intermediate_schema_1.test_8(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_2.test_7(id)); SELECT create_distributed_table('fkey_intermediate_schema_1.test_8', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_6'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- test_7 test_8 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_7'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- test_8 (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_8'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_6'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_7'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- test_6 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_8'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- test_6 test_7 (2 rows) @@ -506,22 +506,22 @@ DETAIL: drop cascades to table test_7 drop cascades to constraint test_8_id_fkey on table test_8 SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_6'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_8'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_6'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_8'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- (0 rows) ROLLBACK; @@ -532,56 +532,56 @@ BEGIN; CREATE TABLE fkey_intermediate_schema_1.test_6(id int PRIMARY KEY); SELECT create_distributed_table('fkey_intermediate_schema_1.test_6', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE fkey_intermediate_schema_2.test_7(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_1.test_6(id)); SELECT create_distributed_table('fkey_intermediate_schema_2.test_7','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE fkey_intermediate_schema_1.test_8(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_2.test_7(id)); SELECT create_distributed_table('fkey_intermediate_schema_1.test_8', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_6'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- test_7 test_8 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_7'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- test_8 (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_8'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_6'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_7'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- test_6 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_8'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- test_6 test_7 (2 rows) @@ -593,12 +593,12 @@ drop cascades to constraint test_7_id_fkey on table test_7 drop cascades to table test_8 SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_7'::regclass) ORDER BY 1; get_referencing_relation_id_list ----------------------------------- +--------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_7'::regclass) ORDER BY 1; get_referenced_relation_id_list ---------------------------------- +--------------------------------------------------------------------- (0 rows) ROLLBACK; diff --git a/src/test/regress/expected/multi_function_evaluation.out b/src/test/regress/expected/multi_function_evaluation.out index 52bedb6fc..784d76bf8 100644 --- a/src/test/regress/expected/multi_function_evaluation.out +++ b/src/test/regress/expected/multi_function_evaluation.out @@ -10,21 +10,21 @@ SET citus.enable_fast_path_router_planner TO false; CREATE TABLE example (key INT, value INT); SELECT master_create_distributed_table('example', 'key', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) CREATE SEQUENCE example_value_seq; SELECT master_create_worker_shards('example', 1, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) INSERT INTO example VALUES (1, nextval('example_value_seq')); SELECT * FROM example; key | value ------+------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -34,7 +34,7 @@ EXECUTE stmt; EXECUTE stmt; SELECT * FROM example; key | value ------+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 | @@ -107,14 +107,14 @@ INSERT INTO example VALUES (3, now()); UPDATE example SET value = timestamp '10-10-2000 00:00' WHERE key = 3 AND value > now() - interval '1 hour'; SELECT * FROM example WHERE key = 3; key | value ------+------------------------------ +--------------------------------------------------------------------- 3 | Tue Oct 10 00:00:00 2000 PDT (1 row) DELETE FROM example WHERE key = 3 AND value < now() - interval '1 hour'; SELECT * FROM example WHERE key = 3; key | value ------+------- +--------------------------------------------------------------------- (0 rows) -- test that function evaluation descends into expressions @@ -134,7 +134,7 @@ NOTICE: stable_fn called CONTEXT: PL/pgSQL function stable_fn() line 3 at RAISE SELECT * FROM example WHERE key = 44; key | value ------+------------------------------ +--------------------------------------------------------------------- 44 | Tue Oct 10 00:00:00 2000 PDT (1 row) diff --git a/src/test/regress/expected/multi_function_in_join.out b/src/test/regress/expected/multi_function_in_join.out index 886fe70c5..b6a4d1ee1 100644 --- a/src/test/regress/expected/multi_function_in_join.out +++ b/src/test/regress/expected/multi_function_in_join.out @@ -15,7 +15,7 @@ SET citus.next_shard_id TO 2500000; CREATE TABLE table1 (id int, data int); SELECT create_distributed_table('table1','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -30,7 +30,7 @@ SELECT * FROM table1 JOIN nextval('numbers') n ON (id = n) ORDER BY id ASC; DEBUG: generating subplan 2_1 for subquery SELECT n FROM nextval('functions_in_joins.numbers'::regclass) n(n) DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.n FROM read_intermediate_result('2_1'::text, 'binary'::citus_copy_format) intermediate_result(n bigint)) n ON ((table1.id OPERATOR(pg_catalog.=) n.n))) ORDER BY table1.id id | data | n -----+------+--- +--------------------------------------------------------------------- 1 | 1 | 1 (1 row) @@ -42,7 +42,7 @@ SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC; DEBUG: generating subplan 3_1 for subquery SELECT sum FROM functions_in_joins.add(3, 5) sum(sum) DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, sum.sum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.sum FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(sum integer)) sum ON ((table1.id OPERATOR(pg_catalog.=) sum.sum))) ORDER BY table1.id id | data | sum -----+------+----- +--------------------------------------------------------------------- 8 | 64 | 8 (1 row) @@ -57,7 +57,7 @@ SELECT * FROM table1 JOIN increment(2) val ON (id = val) ORDER BY id ASC; DEBUG: generating subplan 4_1 for subquery SELECT val FROM functions_in_joins.increment(2) val(val) DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, val.val FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.val FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(val integer)) val ON ((table1.id OPERATOR(pg_catalog.=) val.val))) ORDER BY table1.id id | data | val -----+------+----- +--------------------------------------------------------------------- 3 | 9 | 3 (1 row) @@ -76,7 +76,7 @@ ORDER BY id ASC; DEBUG: generating subplan 5_1 for subquery SELECT result FROM functions_in_joins.next_k_integers(3, 2) next_integers(result) DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, next_integers.result FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.result FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(result integer)) next_integers ON ((table1.id OPERATOR(pg_catalog.=) next_integers.result))) ORDER BY table1.id id | data | result -----+------+-------- +--------------------------------------------------------------------- 3 | 9 | 3 4 | 16 | 4 (2 rows) @@ -90,7 +90,7 @@ SELECT * FROM table1 JOIN get_set_of_records() AS t2(x int, y int) ON (id = x) O DEBUG: generating subplan 6_1 for subquery SELECT x, y FROM functions_in_joins.get_set_of_records() t2(x integer, y integer) DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, t2.x, t2.y FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) t2 ON ((table1.id OPERATOR(pg_catalog.=) t2.x))) ORDER BY table1.id id | data | x | y -----+------+---+--- +--------------------------------------------------------------------- 1 | 1 | 1 | 2 2 | 4 | 2 | 3 3 | 9 | 3 | 4 @@ -105,7 +105,7 @@ SELECT f.* FROM table1 t JOIN dup(32) f ON (f1 = id); DEBUG: generating subplan 7_1 for subquery SELECT f1, f2 FROM functions_in_joins.dup(32) f(f1, f2) DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FROM (functions_in_joins.table1 t JOIN (SELECT intermediate_result.f1, intermediate_result.f2 FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(f1 integer, f2 text)) f ON ((f.f1 OPERATOR(pg_catalog.=) t.id))) f1 | f2 -----+------------ +--------------------------------------------------------------------- 32 | 32 is text (1 row) @@ -116,14 +116,14 @@ SELECT * FROM table1 JOIN the_minimum_id() min_id ON (id = min_id); DEBUG: generating subplan 8_1 for subquery SELECT min_id FROM functions_in_joins.the_minimum_id() min_id(min_id) DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, min_id.min_id FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.min_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(min_id integer)) min_id ON ((table1.id OPERATOR(pg_catalog.=) min_id.min_id))) id | data | min_id -----+------+-------- +--------------------------------------------------------------------- 1 | 1 | 1 (1 row) -- a built-in immutable function SELECT * FROM table1 JOIN abs(100) as hundred ON (id = hundred) ORDER BY id ASC; id | data | hundred ------+-------+--------- +--------------------------------------------------------------------- 100 | 10000 | 100 (1 row) @@ -140,7 +140,7 @@ DEBUG: generating subplan 12_1 for subquery SELECT n FROM nextval('functions_in DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.n FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(n bigint)) n ON ((table1.id OPERATOR(pg_catalog.=) n.n))) DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, next_row_to_process.id, next_row_to_process.data, next_row_to_process.n FROM functions_in_joins.table1, (SELECT intermediate_result.id, intermediate_result.data, intermediate_result.n FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, data integer, n bigint)) next_row_to_process WHERE (table1.data OPERATOR(pg_catalog.<=) next_row_to_process.data) ORDER BY table1.id, table1.data id | data | id | data | n -----+------+----+------+--- +--------------------------------------------------------------------- 1 | 1 | 2 | 4 | 2 2 | 4 | 2 | 4 | 2 (2 rows) @@ -151,7 +151,7 @@ SELECT * FROM ROWS FROM (next_k_integers(5), next_k_integers(10)) AS f(a, b), DEBUG: generating subplan 13_1 for subquery SELECT a, b FROM ROWS FROM(functions_in_joins.next_k_integers(5), functions_in_joins.next_k_integers(10)) f(a, b) DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT f.a, f.b, table1.id, table1.data FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) f(a, b), functions_in_joins.table1 WHERE (table1.id OPERATOR(pg_catalog.=) f.a) ORDER BY table1.id a | b | id | data ----+----+----+------ +--------------------------------------------------------------------- 5 | 10 | 5 | 25 6 | 11 | 6 | 36 7 | 12 | 7 | 49 @@ -177,7 +177,7 @@ SELECT * FROM table1 JOIN max_and_min() m ON (m.maximum = data OR m.minimum = da DEBUG: generating subplan 14_1 for subquery SELECT minimum, maximum FROM functions_in_joins.max_and_min() m(minimum, maximum) DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, m.minimum, m.maximum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.minimum, intermediate_result.maximum FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(minimum integer, maximum integer)) m ON (((m.maximum OPERATOR(pg_catalog.=) table1.data) OR (m.minimum OPERATOR(pg_catalog.=) table1.data)))) ORDER BY table1.id, table1.data, m.minimum, m.maximum id | data | minimum | maximum ------+-------+---------+--------- +--------------------------------------------------------------------- 1 | 1 | 1 | 10000 100 | 10000 | 1 | 10000 (2 rows) diff --git a/src/test/regress/expected/multi_generate_ddl_commands.out b/src/test/regress/expected/multi_generate_ddl_commands.out index aa949d167..44270f63e 100644 --- a/src/test/regress/expected/multi_generate_ddl_commands.out +++ b/src/test/regress/expected/multi_generate_ddl_commands.out @@ -10,7 +10,7 @@ CREATE TABLE simple_table ( ); SELECT master_get_table_ddl_events('simple_table'); master_get_table_ddl_events -------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.simple_table (first_name text, last_name text, id bigint) ALTER TABLE public.simple_table OWNER TO postgres (2 rows) @@ -22,7 +22,7 @@ CREATE TABLE not_null_table ( ); SELECT master_get_table_ddl_events('not_null_table'); master_get_table_ddl_events --------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.not_null_table (city text, id bigint NOT NULL) ALTER TABLE public.not_null_table OWNER TO postgres (2 rows) @@ -35,7 +35,7 @@ CREATE TABLE column_constraint_table ( ); SELECT master_get_table_ddl_events('column_constraint_table'); master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.column_constraint_table (first_name text, last_name text, age integer, CONSTRAINT non_negative_age CHECK (age >= 0)) ALTER TABLE public.column_constraint_table OWNER TO postgres (2 rows) @@ -49,7 +49,7 @@ CREATE TABLE table_constraint_table ( ); SELECT master_get_table_ddl_events('table_constraint_table'); master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.table_constraint_table (bid_item_id bigint, min_bid numeric NOT NULL, max_bid numeric NOT NULL, CONSTRAINT bids_ordered CHECK (min_bid > max_bid)) ALTER TABLE public.table_constraint_table OWNER TO postgres (2 rows) @@ -61,7 +61,7 @@ CREATE TABLE default_value_table ( ); SELECT master_get_table_ddl_events('default_value_table'); master_get_table_ddl_events ---------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.default_value_table (name text, price numeric DEFAULT 0.00) ALTER TABLE public.default_value_table OWNER TO postgres (2 rows) @@ -74,7 +74,7 @@ CREATE TABLE pkey_table ( ); SELECT master_get_table_ddl_events('pkey_table'); master_get_table_ddl_events --------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.pkey_table (first_name text, last_name text, id bigint NOT NULL) ALTER TABLE public.pkey_table OWNER TO postgres ALTER TABLE public.pkey_table ADD CONSTRAINT pkey_table_pkey PRIMARY KEY (id) @@ -87,7 +87,7 @@ CREATE TABLE unique_table ( ); SELECT master_get_table_ddl_events('unique_table'); master_get_table_ddl_events --------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.unique_table (user_id bigint NOT NULL, username text NOT NULL) ALTER TABLE public.unique_table OWNER TO postgres ALTER TABLE public.unique_table ADD CONSTRAINT unique_table_username_key UNIQUE (username) @@ -102,7 +102,7 @@ CREATE INDEX clustered_time_idx ON clustered_table (received_at); CLUSTER clustered_table USING clustered_time_idx; SELECT master_get_table_ddl_events('clustered_table'); master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.clustered_table (data json NOT NULL, received_at timestamp without time zone NOT NULL) ALTER TABLE public.clustered_table OWNER TO postgres CREATE INDEX clustered_time_idx ON public.clustered_table USING btree (received_at) TABLESPACE pg_default @@ -124,7 +124,7 @@ ALTER TABLE fiddly_table ALTER ip_addr SET STATISTICS 500; SELECT master_get_table_ddl_events('fiddly_table'); master_get_table_ddl_events --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.fiddly_table (hostname character(255) NOT NULL, os character(255) NOT NULL, ip_addr inet NOT NULL, traceroute text NOT NULL) ALTER TABLE ONLY public.fiddly_table ALTER COLUMN hostname SET STORAGE PLAIN, ALTER COLUMN os SET STORAGE MAIN, ALTER COLUMN ip_addr SET STORAGE EXTENDED, ALTER COLUMN ip_addr SET STATISTICS 500, ALTER COLUMN traceroute SET STORAGE EXTERNAL ALTER TABLE public.fiddly_table OWNER TO postgres @@ -138,7 +138,7 @@ CREATE FOREIGN TABLE foreign_table ( SELECT create_distributed_table('foreign_table', 'id'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -151,7 +151,7 @@ from information_schema.columns where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id' order by table_name; table_name | column_name | data_type -------------------------------+-------------+----------- +--------------------------------------------------------------------- renamed_foreign_table_610000 | rename_name | character renamed_foreign_table_610001 | rename_name | character renamed_foreign_table_610002 | rename_name | character @@ -162,7 +162,7 @@ order by table_name; SELECT master_get_table_ddl_events('renamed_foreign_table'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined master_get_table_ddl_events --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw CREATE FOREIGN TABLE public.renamed_foreign_table (id bigint NOT NULL, rename_name character(8) DEFAULT ''::text NOT NULL) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true') ALTER TABLE public.renamed_foreign_table OWNER TO postgres @@ -181,7 +181,7 @@ from information_schema.columns where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id' order by table_name; table_name | column_name | data_type -------------+-------------+----------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port diff --git a/src/test/regress/expected/multi_hash_pruning.out b/src/test/regress/expected/multi_hash_pruning.out index 759fe66db..01331abfb 100644 --- a/src/test/regress/expected/multi_hash_pruning.out +++ b/src/test/regress/expected/multi_hash_pruning.out @@ -25,7 +25,7 @@ CREATE TABLE orders_hash_partitioned ( o_comment varchar(79) ); SELECT create_distributed_table('orders_hash_partitioned', 'o_orderkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -35,7 +35,7 @@ SET client_min_messages TO DEBUG2; SELECT count(*) FROM orders_hash_partitioned; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -44,7 +44,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -53,7 +53,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -62,7 +62,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 3 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -71,7 +71,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -81,7 +81,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -90,7 +90,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -99,35 +99,35 @@ SET citus.enable_router_execution TO 'false'; SELECT count(*) FROM orders_hash_partitioned; DEBUG: Router planner not enabled. count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; DEBUG: Router planner not enabled. count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; DEBUG: Router planner not enabled. count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; DEBUG: Router planner not enabled. count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; DEBUG: Router planner not enabled. count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -135,14 +135,14 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 AND o_clerk = 'aaa'; DEBUG: Router planner not enabled. count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); DEBUG: Router planner not enabled. count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -150,21 +150,21 @@ SET citus.enable_router_execution TO DEFAULT; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is not NULL; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -172,7 +172,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey = 2; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -180,7 +180,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_clerk = 'aaa'; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -188,7 +188,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR (o_orderkey = 3 AND o_clerk = 'aaa'); DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -196,7 +196,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey is NULL; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -206,7 +206,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -215,14 +215,14 @@ SET client_min_messages TO DEFAULT; SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY ('{1,2,3}'); count -------- +--------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (1,2,3); count -------- +--------------------------------------------------------------------- 13 (1 row) @@ -230,41 +230,41 @@ SELECT count(*) FROM lineitem_hash_part SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (NULL); count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (NULL); count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (NULL) OR TRUE; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (NULL) OR TRUE; count -------- +--------------------------------------------------------------------- 12000 (1 row) -- Check whether we support IN/ANY in subquery SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (SELECT l_orderkey FROM lineitem_hash_part); count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (SELECT l_orderkey FROM lineitem_hash_part); count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -272,42 +272,42 @@ SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (SELECT l_orderke SELECT count(*) FROM lineitem WHERE l_orderkey = ANY ('{1,2,3}'); count -------- +--------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem WHERE l_orderkey IN (1,2,3); count -------- +--------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem WHERE l_orderkey = ANY(NULL) OR TRUE; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey = ANY ('{1,2,3}'); count -------- +--------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey IN (1,2,3); count -------- +--------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey = ANY(NULL) OR TRUE; count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -318,7 +318,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey < ALL ('{1,2,3}'); DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -328,7 +328,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_totalprice IN (2, 5); DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -336,7 +336,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random(); DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -344,7 +344,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random() OR o_orderkey = 1; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -354,7 +354,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -376,7 +376,7 @@ DEBUG: join prunable for intervals [1073741824,2147483647] and [-2147483648,-10 DEBUG: join prunable for intervals [1073741824,2147483647] and [-1073741824,-1] DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823] count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -389,7 +389,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count -------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_having_pushdown.out b/src/test/regress/expected/multi_having_pushdown.out index 2a45969ee..25944b69a 100644 --- a/src/test/regress/expected/multi_having_pushdown.out +++ b/src/test/regress/expected/multi_having_pushdown.out @@ -5,14 +5,14 @@ SET citus.next_shard_id TO 590000; CREATE TABLE lineitem_hash (LIKE lineitem); SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE orders_hash (LIKE orders); SELECT create_distributed_table('orders_hash', 'o_orderkey', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -23,7 +23,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_orderkey HAVING sum(l_quantity) > 24 ORDER BY 2 DESC, 1 ASC LIMIT 3; QUERY PLAN --------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.revenue DESC, remote_scan.l_orderkey @@ -48,7 +48,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_orderkey HAVING sum(l_quantity) > 24 ORDER BY 2 DESC, 1 ASC LIMIT 3; QUERY PLAN ------------------------------------------------------------------------------ +--------------------------------------------------------------------- Limit -> Sort Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.l_orderkey @@ -72,7 +72,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_shipmode HAVING sum(l_quantity) > 24 ORDER BY 2 DESC, 1 ASC LIMIT 3; QUERY PLAN ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.l_shipmode @@ -96,7 +96,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_shipmode, l_orderkey HAVING sum(l_quantity) > 24 ORDER BY 3 DESC, 1, 2 LIMIT 3; QUERY PLAN --------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.revenue DESC, remote_scan.l_shipmode, remote_scan.l_orderkey @@ -122,7 +122,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24 ORDER BY 1 DESC LIMIT 3; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.revenue DESC @@ -151,7 +151,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_shipmode, o_clerk HAVING sum(l_quantity) > 24 ORDER BY 1 DESC LIMIT 3; QUERY PLAN ------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: (sum(remote_scan.revenue)) DESC @@ -180,7 +180,7 @@ GROUP BY user_id HAVING max(value_2) > 4 AND min(value_2) < 1 ORDER BY 1; max ------ +--------------------------------------------------------------------- 4 5 5 @@ -192,7 +192,7 @@ GROUP BY user_id HAVING max(value_2) > 4 AND min(value_2) < 1 OR count(*) > 10 ORDER BY 1; max ------ +--------------------------------------------------------------------- 4 5 5 @@ -205,7 +205,7 @@ GROUP BY user_id HAVING max(value_2) > 4 AND min(value_2) < 1 AND count(*) > 20 ORDER BY 1; max ------ +--------------------------------------------------------------------- 5 5 (2 rows) @@ -215,7 +215,7 @@ FROM users_table GROUP BY user_id HAVING max(value_2) > 0 AND count(*) FILTER (WHERE value_3=2) > 3 AND min(value_2) IN (0,1,2,3); max ------ +--------------------------------------------------------------------- 5 (1 row) diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index 1c4e5b7d2..8a38f9e1f 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -10,19 +10,19 @@ SET citus.next_shard_id TO 102080; CREATE TABLE index_test_range(a int, b int, c int); SELECT create_distributed_table('index_test_range', 'a', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('index_test_range'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 102080 (1 row) SELECT master_create_empty_shard('index_test_range'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 102081 (1 row) @@ -31,26 +31,26 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE index_test_hash(a int, b int, c int); SELECT create_distributed_table('index_test_hash', 'a', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE index_test_append(a int, b int, c int); SELECT create_distributed_table('index_test_append', 'a', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('index_test_append'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 102090 (1 row) SELECT master_create_empty_shard('index_test_append'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 102091 (1 row) @@ -100,7 +100,7 @@ DROP TABLE local_table; -- Verify that all indexes got created on the master node and one of the workers SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; schemaname | tablename | indexname | tablespace | indexdef -------------+------------------+------------------------------------+------------+---------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a) public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b) public | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON public.index_test_hash USING btree (a) INCLUDE (b, c) @@ -122,25 +122,25 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t \c - - - :worker_1_port SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1); count -------- +--------------------------------------------------------------------- 9 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%'; count -------- +--------------------------------------------------------------------- 32 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%'; count -------- +--------------------------------------------------------------------- 6 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -178,7 +178,7 @@ ERROR: creating index without a name on a distributed table is currently unsupp -- Verify that none of failed indexes got created on the master node SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; schemaname | tablename | indexname | tablespace | indexdef -------------+------------------+------------------------------------+------------+---------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a) public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b) public | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON public.index_test_hash USING btree (a) INCLUDE (b, c) @@ -238,24 +238,24 @@ DROP INDEX CONCURRENTLY lineitem_concurrently_index; -- As there's a primary key, so exclude those from this check. SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; indrelid | indexrelid -----------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; schemaname | tablename | indexname | tablespace | indexdef -------------+-----------------+-----------------------------+------------+---------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON public.index_test_hash USING btree (a) INCLUDE (b, c) (1 row) \c - - - :worker_1_port SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; indrelid | indexrelid -----------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; schemaname | tablename | indexname | tablespace | indexdef -------------+------------------------+------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- public | index_test_hash_102082 | index_test_hash_index_a_b_c_102082 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102082 ON public.index_test_hash_102082 USING btree (a) INCLUDE (b, c) public | index_test_hash_102083 | index_test_hash_index_a_b_c_102083 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102083 ON public.index_test_hash_102083 USING btree (a) INCLUDE (b, c) public | index_test_hash_102084 | index_test_hash_index_a_b_c_102084 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102084 ON public.index_test_hash_102084 USING btree (a) INCLUDE (b, c) @@ -277,7 +277,7 @@ HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then r -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; Index Valid? --------------- +--------------------------------------------------------------------- f (1 row) @@ -286,7 +286,7 @@ DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; Index Valid? --------------- +--------------------------------------------------------------------- t (1 row) @@ -301,7 +301,7 @@ HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then r -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; Index Valid? --------------- +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out index 3f7fc776c..fffa21f08 100644 --- a/src/test/regress/expected/multi_insert_select.out +++ b/src/test/regress/expected/multi_insert_select.out @@ -12,21 +12,21 @@ SET citus.shard_replication_factor = 2; CREATE TABLE raw_events_first (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint, UNIQUE(user_id, value_1)); SELECT create_distributed_table('raw_events_first', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE raw_events_second (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint, UNIQUE(user_id, value_1)); SELECT create_distributed_table('raw_events_second', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE agg_events (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp, UNIQUE(user_id, value_1_agg)); SELECT create_distributed_table('agg_events', 'user_id');; create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -34,14 +34,14 @@ SELECT create_distributed_table('agg_events', 'user_id');; CREATE TABLE reference_table (user_id int); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE insert_select_varchar_test (key varchar, value int); SELECT create_distributed_table('insert_select_varchar_test', 'key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -79,7 +79,7 @@ WHERE ORDER BY user_id DESC; user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -219,7 +219,7 @@ DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300003 raw_events_first WHERE ((value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 2147483647))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: Plan is router executable user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- 9 | | 90 | | 9000 | (1 row) @@ -492,7 +492,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS ae (use DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 2147483647)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg DEBUG: Plan is router executable user_id | value_1_agg ----------+------------- +--------------------------------------------------------------------- 7 | (1 row) @@ -555,7 +555,7 @@ SELECT t1.user_id AS col1, ORDER BY t1.user_id, t2.user_id; col1 | col2 -------+------ +--------------------------------------------------------------------- 1 | 1 2 | 3 | 3 @@ -591,7 +591,7 @@ FROM ORDER BY user_id, value_1_agg; user_id | value_1_agg ----------+------------- +--------------------------------------------------------------------- 1 | 1 2 | 3 | 3 @@ -636,7 +636,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT user_id, value_1_agg FROM agg_events ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries user_id | value_1_agg ----------+------------- +--------------------------------------------------------------------- 1 | 10 2 | 20 3 | 30 @@ -667,7 +667,7 @@ DEBUG: Plan is router executable SELECT user_id, value_1_agg FROM agg_events ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries user_id | value_1_agg ----------+------------- +--------------------------------------------------------------------- 1 | 10 2 | 20 3 | 30 @@ -1748,7 +1748,7 @@ COPY raw_events_second (user_id, value_1) FROM STDIN DELIMITER ','; INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 101; SELECT user_id FROM raw_events_first WHERE user_id = 101; user_id ---------- +--------------------------------------------------------------------- 101 (1 row) @@ -1773,7 +1773,7 @@ INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) (16, now(), 60, 600, 6000.1, 60000); SELECT count(*) FROM raw_events_second; count -------- +--------------------------------------------------------------------- 36 (1 row) @@ -1783,7 +1783,7 @@ INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) INSERT INTO raw_events_second SELECT * FROM test_view WHERE user_id = 17 GROUP BY 1,2,3,4,5,6; SELECT count(*) FROM raw_events_second; count -------- +--------------------------------------------------------------------- 38 (1 row) @@ -1802,7 +1802,7 @@ inserts AS ( ) SELECT count(*) FROM inserts; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -1891,7 +1891,7 @@ FROM (SELECT f1.key GROUP BY 1) AS foo; SELECT * FROM insert_select_varchar_test ORDER BY 1 DESC, 2 DESC; key | value ---------+------- +--------------------------------------------------------------------- test_2 | 100 test_2 | 30 test_1 | 10 @@ -1912,7 +1912,7 @@ CREATE TABLE table_with_defaults SET citus.shard_count = 2; SELECT create_distributed_table('table_with_defaults', 'store_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -2045,7 +2045,7 @@ CREATE TABLE table_with_serial ( ); SELECT create_distributed_table('table_with_serial', 'store_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -2065,19 +2065,19 @@ CREATE TABLE char_table (part_col char[], val int); create table table_with_starts_with_defaults (a int DEFAULT 5, b int, c int); SELECT create_distributed_table('text_table', 'part_col'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('char_table','part_col'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('table_with_starts_with_defaults', 'c'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -2160,13 +2160,13 @@ CREATE TABLE summary_table ); SELECT create_distributed_table('raw_table', 'time'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('summary_table', 'time'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -2174,7 +2174,7 @@ INSERT INTO raw_table VALUES(1, '11-11-1980'); INSERT INTO summary_table SELECT time, COUNT(*) FROM raw_table GROUP BY time; SELECT * FROM summary_table; time | count -------------+------- +--------------------------------------------------------------------- 11-11-1980 | 1 (1 row) @@ -2185,7 +2185,7 @@ INSERT INTO raw_events_first (user_id, value_1) SELECT * FROM (VALUES (1,2), (3,4), (5,6)) AS v(int,int); SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 2 3 | 4 5 | 6 @@ -2205,7 +2205,7 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; DEBUG: Router planner cannot handle multi-shard select queries user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -2234,7 +2234,7 @@ DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS c DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_207_13300002'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_207_13300003'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | | 11 | | | 2 | | 12 | | | 3 | | 13 | | | @@ -2250,7 +2250,7 @@ INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first ORDER BY 1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -2266,7 +2266,7 @@ INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first WHERE user_id = 1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -2279,7 +2279,7 @@ INSERT INTO raw_events_first (user_id, value_1) SELECT u, v FROM raw_events_first_local; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 @@ -2293,7 +2293,7 @@ INSERT INTO raw_events_first (value_1, user_id) SELECT u, v FROM raw_events_first_local; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 2 | 1 4 | 2 6 | 3 @@ -2309,7 +2309,7 @@ UNION ALL ( SELECT v, u FROM raw_events_first_local ); SELECT user_id, value_3 FROM raw_events_first ORDER BY user_id, value_3; user_id | value_3 ----------+--------- +--------------------------------------------------------------------- 1 | 2 1 | 2 2 | 4 @@ -2331,7 +2331,7 @@ INSERT INTO raw_events_first (user_id, value_1) SELECT user_id, value_4 FROM raw_events_second LIMIT 5; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 3 2 | 6 3 | 9 @@ -2349,7 +2349,7 @@ WITH value AS (SELECT 1) SELECT * FROM removed_rows, value; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -2381,7 +2381,7 @@ WITH ultra_rows AS ( SELECT u, v FROM ultra_rows; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 @@ -2401,7 +2401,7 @@ WITH super_rows AS ( SELECT u, 5 FROM super_rows; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 0 | 5 (1 row) @@ -2414,7 +2414,7 @@ INSERT INTO raw_events_first (user_id, value_1) SELECT * FROM user_two; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 2 | 6 (1 row) @@ -2431,7 +2431,7 @@ SELECT * FROM numbers; CREATE TABLE "CaseSensitiveTable" ("UserID" int, "Value1" int); SELECT create_distributed_table('"CaseSensitiveTable"', 'UserID'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -2439,7 +2439,7 @@ INSERT INTO "CaseSensitiveTable" SELECT s, s FROM generate_series(1,10) s; SELECT * FROM "CaseSensitiveTable" ORDER BY "UserID"; UserID | Value1 ---------+-------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -2457,7 +2457,7 @@ DROP TABLE "CaseSensitiveTable"; CREATE TABLE dist_table_with_sequence (user_id serial, value_1 serial); SELECT create_distributed_table('dist_table_with_sequence', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -2466,7 +2466,7 @@ INSERT INTO dist_table_with_sequence (value_1) SELECT s FROM generate_series(1,5) s; SELECT * FROM dist_table_with_sequence ORDER BY user_id; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -2480,7 +2480,7 @@ SELECT value_1 FROM dist_table_with_sequence; ERROR: INSERT ... SELECT cannot generate sequence values when selecting from a distributed table SELECT * FROM dist_table_with_sequence ORDER BY user_id; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -2492,7 +2492,7 @@ SELECT * FROM dist_table_with_sequence ORDER BY user_id; CREATE TABLE ref_table (user_id int, value_1 int); SELECT create_reference_table('ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -2500,7 +2500,7 @@ INSERT INTO ref_table SELECT user_id, value_1 FROM raw_events_second; SELECT * FROM ref_table ORDER BY user_id, value_1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 2 | 3 | @@ -2513,14 +2513,14 @@ DROP TABLE ref_table; CREATE TABLE ref1 (d timestamptz); SELECT create_reference_table('ref1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE ref2 (d date); SELECT create_reference_table('ref2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -2528,7 +2528,7 @@ INSERT INTO ref2 VALUES ('2017-10-31'); INSERT INTO ref1 SELECT * FROM ref2; SELECT count(*) from ref1; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -2536,7 +2536,7 @@ SELECT count(*) from ref1; INSERT INTO ref1 SELECT now() FROM ref2; SELECT count(*) from ref1; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -2546,7 +2546,7 @@ DROP TABLE ref2; CREATE TABLE insert_append_table (user_id int, value_4 bigint); SELECT create_distributed_table('insert_append_table', 'user_id', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -2567,7 +2567,7 @@ EXECUTE insert_prep(5); EXECUTE insert_prep(6); SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 3 2 | 3 3 | 3 @@ -2582,7 +2582,7 @@ INSERT INTO test_view SELECT * FROM raw_events_second; SELECT user_id, value_4 FROM test_view ORDER BY user_id, value_4; user_id | value_4 ----------+--------- +--------------------------------------------------------------------- 1 | 3 2 | 6 3 | 9 @@ -2596,7 +2596,7 @@ DROP VIEW test_view; CREATE TABLE drop_col_table (col1 text, col2 text, col3 text); SELECT create_distributed_table('drop_col_table', 'col2'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -2605,7 +2605,7 @@ INSERT INTO drop_col_table (col3, col2) SELECT value_4, user_id FROM raw_events_second LIMIT 5; SELECT * FROM drop_col_table ORDER BY col2, col3; col2 | col3 -------+------ +--------------------------------------------------------------------- 1 | 3 2 | 6 3 | 9 @@ -2616,7 +2616,7 @@ SELECT * FROM drop_col_table ORDER BY col2, col3; -- make sure the tuple went to the right shard SELECT * FROM drop_col_table WHERE col2 = '1'; col2 | col3 -------+------ +--------------------------------------------------------------------- 1 | 3 (1 row) @@ -2625,14 +2625,14 @@ RESET client_min_messages; CREATE TABLE coerce_events(user_id int, time timestamp, value_1 numeric); SELECT create_distributed_table('coerce_events', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE coerce_agg (user_id int, value_1_agg int); SELECT create_distributed_table('coerce_agg', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -2657,7 +2657,7 @@ FROM ( LIMIT 5; SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; user_id | value_1_agg ----------+------------- +--------------------------------------------------------------------- 10 | 10 10 | 10 2 | 2 @@ -2679,7 +2679,7 @@ LIMIT 5; ERROR: value too long for type character(1) SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; user_id | value_1_agg ----------+------------- +--------------------------------------------------------------------- (0 rows) TRUNCATE coerce_agg; @@ -2707,7 +2707,7 @@ FROM ( LIMIT 5; SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; user_id | value_1_agg ----------+------------- +--------------------------------------------------------------------- 2 | b 1 | a (2 rows) @@ -2730,7 +2730,7 @@ ERROR: new row for relation "coerce_agg_13300060" violates check constraint "sm \set VERBOSITY DEFAULT SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; user_id | value_1_agg ----------+------------- +--------------------------------------------------------------------- (0 rows) -- integer[3] -> text[3] @@ -2748,7 +2748,7 @@ FROM ( LIMIT 5; SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; user_id | value_1_agg ----------+------------- +--------------------------------------------------------------------- 2 | {2,2,2} 1 | {1,1,1} (2 rows) diff --git a/src/test/regress/expected/multi_insert_select_conflict.out b/src/test/regress/expected/multi_insert_select_conflict.out index 252d0bf79..f136998f3 100644 --- a/src/test/regress/expected/multi_insert_select_conflict.out +++ b/src/test/regress/expected/multi_insert_select_conflict.out @@ -5,7 +5,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE target_table(col_1 int primary key, col_2 int); SELECT create_distributed_table('target_table','col_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -13,7 +13,7 @@ INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6); CREATE TABLE source_table_1(col_1 int primary key, col_2 int, col_3 int); SELECT create_distributed_table('source_table_1','col_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -21,7 +21,7 @@ INSERT INTO source_table_1 VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5); CREATE TABLE source_table_2(col_1 int, col_2 int, col_3 int); SELECT create_distributed_table('source_table_2','col_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -72,7 +72,7 @@ DETAIL: The target table's partition column should correspond to a partition co DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1 DEBUG: Collecting INSERT ... SELECT results on coordinator col_1 | col_2 --------+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -120,7 +120,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 16_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5 DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo col_1 | col_2 --------+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -159,7 +159,7 @@ DEBUG: generating subplan 20_2 for subquery SELECT col_1, col_2, col_3 FROM on_ DEBUG: generating subplan 20_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('20_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('20_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo col_1 | col_2 --------+------- +--------------------------------------------------------------------- 1 | 0 2 | 0 3 | 0 @@ -192,7 +192,7 @@ DEBUG: generating subplan 28_1 for CTE cte: SELECT col_1, col_2 FROM on_conflic DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte) citus_insert_select_subquery SELECT * FROM target_table ORDER BY 1; col_1 | col_2 --------+------- +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 @@ -220,7 +220,7 @@ DEBUG: generating subplan 32_3 for subquery SELECT cte.col_1, cte.col_2 FROM (S DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('32_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) citus_insert_select_subquery SELECT * FROM target_table ORDER BY 1; col_1 | col_2 --------+------- +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 @@ -249,7 +249,7 @@ DEBUG: generating subplan 39_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_ DEBUG: generating subplan 39_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery col_1 | col_2 --------+------- +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 @@ -291,7 +291,7 @@ DETAIL: Select query cannot be pushed down to the worker. CREATE TABLE test_ref_table (key int PRIMARY KEY); SELECT create_reference_table('test_ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -319,7 +319,7 @@ BEGIN; col_1 FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 1 RETURNING *; col_1 | col_2 --------+------- +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -340,7 +340,7 @@ NOTICE: truncate cascades to table "target_table" col_1 FROM target_table ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *; col_1 | col_2 | col_3 --------+-------+------- +--------------------------------------------------------------------- (0 rows) ROLLBACK; @@ -353,7 +353,7 @@ BEGIN; col_1 FROM target_table ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *; col_1 | col_2 | col_3 --------+-------+------- +--------------------------------------------------------------------- (0 rows) ROLLBACK; @@ -361,7 +361,7 @@ ROLLBACK; CREATE TABLE source_table_3(col_1 numeric, col_2 numeric, col_3 numeric); SELECT create_distributed_table('source_table_3','col_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -369,7 +369,7 @@ INSERT INTO source_table_3 VALUES(1,11,1),(2,22,2),(3,33,3),(4,44,4),(5,55,5); CREATE TABLE source_table_4(id int, arr_val text[]); SELECT create_distributed_table('source_table_4','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -377,7 +377,7 @@ INSERT INTO source_table_4 VALUES(1, '{"abc","cde","efg"}'), (2, '{"xyz","tvu"}' CREATE TABLE target_table_2(id int primary key, arr_val char(10)[]); SELECT create_distributed_table('target_table_2','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -394,7 +394,7 @@ DETAIL: The data type of the target table's partition column should exactly mat DEBUG: Collecting INSERT ... SELECT results on coordinator SELECT * FROM target_table ORDER BY 1; col_1 | col_2 --------+------- +--------------------------------------------------------------------- 1 | 11 2 | 22 3 | 33 @@ -415,7 +415,7 @@ FROM ON CONFLICT DO NOTHING; SELECT * FROM target_table_2 ORDER BY 1; id | arr_val -----+------------------------------------------ +--------------------------------------------------------------------- 1 | {"abc ","def ","gyx "} 2 | {"xyz ","tvu "} (2 rows) @@ -427,7 +427,7 @@ DROP TABLE target_table, source_table_1, source_table_2; CREATE TABLE target_table(col_1 int primary key, col_2 int); SELECT create_distributed_table('target_table','col_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -435,7 +435,7 @@ INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6); CREATE TABLE source_table_1(col_1 int, col_2 int, col_3 int); SELECT create_distributed_table('source_table_1','col_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -443,7 +443,7 @@ INSERT INTO source_table_1 VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5); CREATE TABLE source_table_2(col_1 int, col_2 int, col_3 int); SELECT create_distributed_table('source_table_2','col_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -486,7 +486,7 @@ DEBUG: generating subplan 71_3 for subquery SELECT intermediate_result.col_1, i DEBUG: Plan 71 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('71_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo SELECT * FROM target_table ORDER BY 1; col_1 | col_2 --------+------- +--------------------------------------------------------------------- 1 | 0 2 | 0 3 | 0 @@ -512,7 +512,7 @@ DEBUG: generating subplan 77_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('77_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery SELECT * FROM target_table ORDER BY 1; col_1 | col_2 --------+------- +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 diff --git a/src/test/regress/expected/multi_insert_select_non_pushable_queries.out b/src/test/regress/expected/multi_insert_select_non_pushable_queries.out index cff82f757..c48326dbf 100644 --- a/src/test/regress/expected/multi_insert_select_non_pushable_queries.out +++ b/src/test/regress/expected/multi_insert_select_non_pushable_queries.out @@ -1,19 +1,19 @@ ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Insert into local table ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE test_table_1(id int); INSERT INTO test_table_1 SELECT user_id FROM users_table; ERROR: cannot INSERT rows from a distributed query into a local table HINT: Consider using CREATE TEMPORARY TABLE tmp AS SELECT ... and inserting from the temporary table. DROP TABLE test_table_1; ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Vanilla funnel query ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the JOIN is not an equi join INSERT INTO agg_results_third (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) @@ -32,11 +32,11 @@ FROM ( ) q; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel grouped by whether or not a user has done an event ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the JOIN is not an equi join left part of the UNION -- is not equi join INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg ) @@ -167,11 +167,11 @@ DEBUG: generating subplan 9_1 for subquery SELECT u.user_id, 'step=>1'::text AS ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join RESET client_min_messages; ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Funnel, grouped by the number of times a user has done an event ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the right of the UNION query is not joined on -- the partition key INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg) @@ -318,13 +318,13 @@ DEBUG: generating subplan 18_1 for subquery SELECT users_table.user_id, 'action ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join RESET client_min_messages; ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Most recently seen users_table events_table ------------------------------------- +--------------------------------------------------------------------- -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since lateral join is not an equi join INSERT INTO agg_results_third (user_id, agg_time, value_2_agg) SELECT @@ -412,11 +412,11 @@ FROM ( ORDER BY user_lastseen DESC; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since partition key is NOT IN INSERT INTO agg_results_third (user_id) SELECT DISTINCT user_id @@ -444,11 +444,11 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_ AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since join is not an euqi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -465,11 +465,11 @@ SELECT user_id, value_2 FROM users_table WHERE AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND event_type = users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the join is not an equi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -486,11 +486,11 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND event_type=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the second join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -500,11 +500,11 @@ SELECT user_id, value_2 FROM users_table WHERE AND EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id!=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the first join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE @@ -513,11 +513,11 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the second join is not an equi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, @@ -572,11 +572,11 @@ INSERT INTO agg_results_third(user_id, value_2_agg) HAVING Count(*) > 2); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find me all users_table who has done some event and has filters ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable due to NOT IN INSERT INTO agg_results_third(user_id) Select user_id @@ -617,11 +617,11 @@ And event_type in And value_2 > 25); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Which events_table did people who has done some specific events_table ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable due to NOT IN INSERT INTO agg_results_third(user_id, value_1_agg) SELECT user_id, event_type FROM events_table @@ -643,11 +643,11 @@ WHERE event_type IN (SELECT user_id from events_table WHERE event_type > 500 and GROUP BY user_id, event_type; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- Find my assets that have the highest probability and fetch their metadata ------------------------------------- ------------------------------------- +--------------------------------------------------------------------- +--------------------------------------------------------------------- -- not pushable since the join is not an equi join INSERT INTO agg_results_third(user_id, value_1_agg, value_3_agg) SELECT diff --git a/src/test/regress/expected/multi_insert_select_window.out b/src/test/regress/expected/multi_insert_select_window.out index 6df46b924..c069c4421 100644 --- a/src/test/regress/expected/multi_insert_select_window.out +++ b/src/test/regress/expected/multi_insert_select_window.out @@ -15,7 +15,7 @@ FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 101 | 6 | 3.2079207920792079 (1 row) @@ -34,7 +34,7 @@ FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 101 | 6 | 3.2079207920792079 (1 row) @@ -53,7 +53,7 @@ FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 101 | 6 | 3.2079207920792079 (1 row) @@ -75,7 +75,7 @@ FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 12 | 6 | 3.5000000000000000 (1 row) @@ -96,7 +96,7 @@ GROUP BY -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 8 | 2 | 1.1250000000000000 (1 row) @@ -117,7 +117,7 @@ SELECT * FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 30 | 6 | 3.4000000000000000 (1 row) @@ -139,7 +139,7 @@ SELECT * FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 20 | 6 | 3.3500000000000000 (1 row) @@ -176,7 +176,7 @@ JOIN -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -202,7 +202,7 @@ GROUP BY -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 1 | 1 | 4.0000000000000000 (1 row) @@ -228,7 +228,7 @@ GROUP BY -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 2 | 2 | 3.5000000000000000 (1 row) @@ -253,7 +253,7 @@ GROUP BY -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 1 | 1 | 4.0000000000000000 (1 row) @@ -275,7 +275,7 @@ LIMIT -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -293,7 +293,7 @@ GROUP BY user_id; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -319,7 +319,7 @@ GROUP BY -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 4 | 4 | 2.5000000000000000 (1 row) @@ -339,7 +339,7 @@ SELECT * FROM ( -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -362,7 +362,7 @@ GROUP BY -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 32 | 6 | 3.5937500000000000 (1 row) @@ -393,7 +393,7 @@ WHERE -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -411,7 +411,7 @@ FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -429,7 +429,7 @@ SELECT * FROM ( -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 26 | 6 | 3.7692307692307692 (1 row) @@ -451,7 +451,7 @@ LIMIT -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 10 | 5 | 3.8000000000000000 (1 row) @@ -472,7 +472,7 @@ FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 26 | 6 | 3.7692307692307692 (1 row) @@ -488,7 +488,7 @@ LIMIT -- since there is a limit but not order, we cannot run avg(user_id) SELECT count(*) FROM agg_results_window; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -518,7 +518,7 @@ LIMIT -- since there is a limit but not order, we cannot test avg or distinct count SELECT count(*) FROM agg_results_window; count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -545,7 +545,7 @@ GROUP BY -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -590,7 +590,7 @@ LIMIT -- since there is a limit but not order, we cannot test avg or distinct count SELECT count(*) FROM agg_results_window; count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -632,7 +632,7 @@ FROM ( -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) diff --git a/src/test/regress/expected/multi_join_order_additional.out b/src/test/regress/expected/multi_join_order_additional.out index 8765c9735..6b4e7e6d3 100644 --- a/src/test/regress/expected/multi_join_order_additional.out +++ b/src/test/regress/expected/multi_join_order_additional.out @@ -31,7 +31,7 @@ CREATE TABLE lineitem_hash ( PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT create_distributed_table('lineitem_hash', 'l_orderkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -49,7 +49,7 @@ CREATE TABLE orders_hash ( PRIMARY KEY(o_orderkey) ); SELECT create_distributed_table('orders_hash', 'o_orderkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -64,7 +64,7 @@ CREATE TABLE customer_hash ( c_comment varchar(117) not null); SELECT create_distributed_table('customer_hash', 'c_custkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -77,7 +77,7 @@ LOG: join order: [ "lineitem" ][ local partition join "lineitem" ] DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] QUERY PLAN --------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (2 rows) @@ -91,7 +91,7 @@ EXPLAIN SELECT count(*) FROM lineitem, orders OR (l_orderkey = o_orderkey AND l_quantity < 10); LOG: join order: [ "lineitem" ][ local partition join "orders" ] QUERY PLAN --------------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -104,7 +104,7 @@ EXPLAIN SELECT count(*) FROM orders, lineitem_hash WHERE o_orderkey = l_orderkey; LOG: join order: [ "orders" ][ single range partition join "lineitem_hash" ] QUERY PLAN --------------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -115,7 +115,7 @@ EXPLAIN SELECT count(*) FROM orders_hash, lineitem_hash WHERE o_orderkey = l_orderkey; LOG: join order: [ "orders_hash" ][ local partition join "lineitem_hash" ] QUERY PLAN --------------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -126,7 +126,7 @@ EXPLAIN SELECT count(*) FROM customer_hash, nation WHERE c_nationkey = n_nationkey; LOG: join order: [ "customer_hash" ][ reference join "nation" ] QUERY PLAN --------------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -138,7 +138,7 @@ EXPLAIN SELECT count(*) FROM orders, lineitem, customer_append WHERE o_custkey = l_partkey AND o_custkey = c_nationkey; LOG: join order: [ "orders" ][ dual partition join "lineitem" ][ dual partition join "customer_append" ] QUERY PLAN --------------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -150,7 +150,7 @@ EXPLAIN SELECT count(*) FROM orders, customer_hash WHERE c_custkey = o_custkey; LOG: join order: [ "orders" ][ dual partition join "customer_hash" ] QUERY PLAN --------------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -162,7 +162,7 @@ EXPLAIN SELECT count(*) FROM orders_hash, customer_append WHERE c_custkey = o_custkey; LOG: join order: [ "orders_hash" ][ single range partition join "customer_append" ] QUERY PLAN --------------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled diff --git a/src/test/regress/expected/multi_join_order_tpch_repartition.out b/src/test/regress/expected/multi_join_order_tpch_repartition.out index 51bca0641..dc60b221d 100644 --- a/src/test/regress/expected/multi_join_order_tpch_repartition.out +++ b/src/test/regress/expected/multi_join_order_tpch_repartition.out @@ -22,7 +22,7 @@ WHERE and l_quantity < 24; LOG: join order: [ "lineitem" ] QUERY PLAN --------------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -53,7 +53,7 @@ ORDER BY o_orderdate; LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single range partition join "customer_append" ] QUERY PLAN ------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.o_orderdate -> HashAggregate (cost=0.00..0.00 rows=0 width=0) @@ -96,7 +96,7 @@ ORDER BY revenue DESC; LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single range partition join "customer_append" ][ reference join "nation" ] QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC -> HashAggregate (cost=0.00..0.00 rows=0 width=0) @@ -137,7 +137,7 @@ WHERE ); LOG: join order: [ "lineitem" ][ single range partition join "part_append" ] QUERY PLAN --------------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -156,7 +156,7 @@ GROUP BY l_partkey; LOG: join order: [ "lineitem" ][ local partition join "orders" ][ single range partition join "part_append" ][ single range partition join "customer_append" ] QUERY PLAN --------------------------------------------------------------------------- +--------------------------------------------------------------------- HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.l_partkey -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) diff --git a/src/test/regress/expected/multi_join_order_tpch_small.out b/src/test/regress/expected/multi_join_order_tpch_small.out index 032d46a7d..1f0b58a14 100644 --- a/src/test/regress/expected/multi_join_order_tpch_small.out +++ b/src/test/regress/expected/multi_join_order_tpch_small.out @@ -17,7 +17,7 @@ WHERE and l_quantity < 24; LOG: join order: [ "lineitem" ] QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -48,7 +48,7 @@ ORDER BY o_orderdate; LOG: join order: [ "orders" ][ reference join "customer" ][ local partition join "lineitem" ] QUERY PLAN ------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.o_orderdate -> HashAggregate (cost=0.00..0.00 rows=0 width=0) @@ -91,7 +91,7 @@ ORDER BY revenue DESC; LOG: join order: [ "orders" ][ reference join "customer" ][ reference join "nation" ][ local partition join "lineitem" ] QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC -> HashAggregate (cost=0.00..0.00 rows=0 width=0) @@ -132,7 +132,7 @@ WHERE ); LOG: join order: [ "lineitem" ][ reference join "part" ] QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled diff --git a/src/test/regress/expected/multi_join_pruning.out b/src/test/regress/expected/multi_join_pruning.out index 7a0e933d9..fbd706f29 100644 --- a/src/test/regress/expected/multi_join_pruning.out +++ b/src/test/regress/expected/multi_join_pruning.out @@ -12,7 +12,7 @@ DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] sum | avg --------+-------------------- +--------------------------------------------------------------------- 36089 | 3.0074166666666667 (1 row) @@ -21,7 +21,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [8997,14947] and [1,5986] sum | avg --------+-------------------- +--------------------------------------------------------------------- 17999 | 3.0189533713518953 (1 row) @@ -31,7 +31,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 20000; DEBUG: Router planner does not support append-partitioned tables. sum | avg ------+----- +--------------------------------------------------------------------- | (1 row) @@ -42,7 +42,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 6000 AND o_orderkey < 6000; DEBUG: Router planner does not support append-partitioned tables. sum | avg ------+----- +--------------------------------------------------------------------- | (1 row) @@ -51,7 +51,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND false; DEBUG: Router planner does not support append-partitioned tables. sum | avg ------+----- +--------------------------------------------------------------------- | (1 row) @@ -60,7 +60,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) WHERE false; DEBUG: Router planner does not support append-partitioned tables. sum | avg ------+----- +--------------------------------------------------------------------- | (1 row) @@ -75,7 +75,7 @@ DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [{},{AZZXSP27F21T6,AZZXSP27F21T6}] and [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] DEBUG: join prunable for intervals [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] and [{},{AZZXSP27F21T6,AZZXSP27F21T6}] QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -88,7 +88,7 @@ DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [(a,3,b),(b,4,c)] and [(c,5,d),(d,6,e)] DEBUG: join prunable for intervals [(c,5,d),(d,6,e)] and [(a,3,b),(b,4,c)] QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -102,7 +102,7 @@ DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [AA1000U2AMO4ZGX,AZZXSP27F21T6] and [BA1000U2AMO4ZGX,BZZXSP27F21T6] DEBUG: join prunable for intervals [BA1000U2AMO4ZGX,BZZXSP27F21T6] and [AA1000U2AMO4ZGX,AZZXSP27F21T6] QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled diff --git a/src/test/regress/expected/multi_json_agg.out b/src/test/regress/expected/multi_json_agg.out index 9bdd79fd4..397ccc609 100644 --- a/src/test/regress/expected/multi_json_agg.out +++ b/src/test/regress/expected/multi_json_agg.out @@ -13,7 +13,7 @@ $$; SELECT json_cat_agg(i) FROM (VALUES ('[1,{"a":2}]'::json), ('[null]'::json), (NULL), ('["3",5,4]'::json)) AS t(i); json_cat_agg -------------------------------- +--------------------------------------------------------------------- [1, {"a":2}, null, "3", 5, 4] (1 row) @@ -28,7 +28,7 @@ ERROR: json_agg with order by is unsupported SELECT array_sort(json_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort --------------------------------------------------------- +--------------------------------------------------------------------- [2132, 15635, 24027, 63700, 67310, 155190] [106170] [4297, 19036, 29380, 62143, 128449, 183095] @@ -44,7 +44,7 @@ SELECT array_sort(json_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(json_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort ------------------------------------------------------------------------ +--------------------------------------------------------------------- [13309.60, 21168.23, 22824.48, 28955.64, 45983.16, 49620.16] [44694.46] [2618.76, 28733.64, 32986.52, 39890.88, 46796.47, 54058.05] @@ -60,7 +60,7 @@ SELECT array_sort(json_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(json_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort ----------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ["1996-01-29", "1996-01-30", "1996-03-13", "1996-03-30", "1996-04-12", "1996-04-21"] ["1997-01-28"] ["1993-10-29", "1993-11-09", "1993-12-04", "1993-12-14", "1994-01-16", "1994-02-02"] @@ -76,7 +76,7 @@ SELECT array_sort(json_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(json_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort ----------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ["AIR ", "FOB ", "MAIL ", "MAIL ", "REG AIR ", "TRUCK "] ["RAIL "] ["AIR ", "FOB ", "RAIL ", "RAIL ", "SHIP ", "TRUCK "] @@ -92,7 +92,7 @@ SELECT array_sort(json_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey -- Check that we can execute json_agg() within other functions SELECT json_array_length(json_agg(l_orderkey)) FROM lineitem; json_array_length -------------------- +--------------------------------------------------------------------- 12000 (1 row) @@ -104,7 +104,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(json_agg(l_orderke WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | count | avg | array_sort -------------+-------+-----------------------+-------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 1.00 | 17 | 1477.1258823529411765 | [5543, 5633, 5634, 5698, 5766, 5856, 5857, 5986, 8997, 9026, 9158, 9184, 9220, 9222, 9348, 9383, 9476] 2.00 | 19 | 3078.4242105263157895 | [5506, 5540, 5573, 5669, 5703, 5730, 5798, 5831, 5893, 5920, 5923, 9030, 9058, 9123, 9124, 9188, 9344, 9441, 9476] 3.00 | 14 | 4714.0392857142857143 | [5509, 5543, 5605, 5606, 5827, 9124, 9157, 9184, 9223, 9254, 9349, 9414, 9475, 9477] @@ -115,7 +115,7 @@ SELECT l_quantity, array_sort(json_agg(extract (month FROM o_orderdate))) AS my_ FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | my_month -------------+------------------------------------------------------------------ +--------------------------------------------------------------------- 1.00 | [2, 3, 4, 4, 4, 5, 5, 5, 6, 7, 7, 7, 7, 9, 9, 11, 11] 2.00 | [1, 3, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 10, 10, 11, 11, 11, 12, 12] 3.00 | [3, 4, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10, 11, 11] @@ -126,7 +126,7 @@ SELECT l_quantity, array_sort(json_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | array_sort -------------+--------------------------------------------------- +--------------------------------------------------------------------- 1.00 | [11269, 11397, 11713, 11715, 11973, 18317, 18445] 2.00 | [11847, 18061, 18247, 18953] 3.00 | [18249, 18315, 18699, 18951, 18955] @@ -137,7 +137,7 @@ SELECT l_quantity, array_sort(json_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE SELECT json_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 5; json_agg -------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- [null, 36.00, null, 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, null, 28.00, 26.00, 30.00] (1 row) @@ -145,7 +145,7 @@ SELECT json_agg(case when l_quantity > 20 then l_quantity else NULL end) SELECT json_agg(case when l_quantity > 20 then to_json(l_quantity) else '"f"'::json end) FROM lineitem WHERE l_orderkey < 5; json_agg ----------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ["f", 36.00, "f", 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, "f", 28.00, 26.00, 30.00] (1 row) @@ -153,7 +153,7 @@ SELECT json_agg(case when l_quantity > 20 then to_json(l_quantity) else '"f"'::j SELECT json_agg(json_build_array(l_quantity, l_shipdate)) FROM lineitem WHERE l_orderkey < 3; json_agg ------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- [[17.00, "1996-03-13"], [36.00, "1996-04-12"], [8.00, "1996-01-29"], [28.00, "1996-04-21"], [24.00, "1996-03-30"], [32.00, "1996-01-30"], [38.00, "1997-01-28"]] (1 row) @@ -161,7 +161,7 @@ SELECT json_agg(json_build_array(l_quantity, l_shipdate)) SELECT json_agg(ARRAY[l_quantity, l_orderkey]) FROM lineitem WHERE l_orderkey < 3; json_agg --------------- +--------------------------------------------------------------------- [[17.00,1], + [36.00,1], + [8.00,1], + @@ -174,7 +174,7 @@ SELECT json_agg(ARRAY[l_quantity, l_orderkey]) -- Check that we return NULL in case there are no input rows to json_agg() SELECT json_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; json_agg ----------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_json_object_agg.out b/src/test/regress/expected/multi_json_object_agg.out index 79b79dee5..b4f65eee8 100644 --- a/src/test/regress/expected/multi_json_object_agg.out +++ b/src/test/regress/expected/multi_json_object_agg.out @@ -18,7 +18,7 @@ $$; SELECT json_cat_agg(i) FROM (VALUES ('{"c":[], "b":2}'::json), (NULL), ('{"d":null, "a":{"b":3}, "b":2}'::json)) AS t(i); json_cat_agg ------------------------------------------------------------ +--------------------------------------------------------------------- { "c" : [], "b" : 2, "d" : null, "a" : {"b":3}, "b" : 2 } (1 row) @@ -34,7 +34,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_partk FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; keys_sort ------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- { "11" : 155190, "12" : 67310, "13" : 63700, "14" : 2132, "15" : 24027, "16" : 15635 } { "21" : 106170 } { "31" : 4297, "32" : 19036, "33" : 128449, "34" : 29380, "35" : 183095, "36" : 62143 } @@ -51,7 +51,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_exten FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; keys_sort --------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- { "11" : 21168.23, "12" : 45983.16, "13" : 13309.60, "14" : 28955.64, "15" : 22824.48, "16" : 49620.16 } { "21" : 44694.46 } { "31" : 54058.05, "32" : 46796.47, "33" : 39890.88, "34" : 2618.76, "35" : 32986.52, "36" : 28733.64 } @@ -68,7 +68,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipm FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; keys_sort -------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- { "11" : "TRUCK ", "12" : "MAIL ", "13" : "REG AIR ", "14" : "AIR ", "15" : "FOB ", "16" : "MAIL " } { "21" : "RAIL " } { "31" : "AIR ", "32" : "RAIL ", "33" : "SHIP ", "34" : "TRUCK ", "35" : "FOB ", "36" : "RAIL " } @@ -85,7 +85,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipd FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; keys_sort -------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- { "11" : "1996-03-13", "12" : "1996-04-12", "13" : "1996-01-29", "14" : "1996-04-21", "15" : "1996-03-30", "16" : "1996-01-30" } { "21" : "1997-01-28" } { "31" : "1994-02-02", "32" : "1993-11-09", "33" : "1994-01-16", "34" : "1993-12-04", "35" : "1993-12-14", "36" : "1993-10-29" } @@ -101,7 +101,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipd -- Check that we can execute json_object_agg() within other functions SELECT count_keys(json_object_agg(l_shipdate, l_orderkey)) FROM lineitem; count_keys ------------- +--------------------------------------------------------------------- 12000 (1 row) @@ -115,7 +115,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), WHERE l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | count | avg | keys_sort -------------+-------+-----------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 1.00 | 8 | 1748.3387500000000000 | { "50635" : "1997-09-03", "51551" : "1994-07-03", "51872" : "1997-08-08", "52221" : "1994-08-19", "52832" : "1994-06-20", "52855" : "1994-03-14", "52856" : "1994-02-08", "52861" : "1997-11-25" } 2.00 | 8 | 2990.9825000000000000 | { "50292" : "1992-11-25", "50633" : "1997-06-17", "50904" : "1997-04-07", "50952" : "1992-07-09", "51216" : "1992-08-10", "52191" : "1997-06-26", "52501" : "1995-08-09", "52551" : "1996-09-27" } 3.00 | 2 | 4744.8000000000000000 | { "50275" : "1997-09-30", "52863" : "1997-12-04" } @@ -127,7 +127,7 @@ SELECT l_quantity, keys_sort(json_object_agg(l_orderkey::text || l_linenumber::t FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | keys_sort -------------+------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 1.00 | { "50635" : 5, "51551" : 6, "51872" : 7, "52221" : 5, "52832" : 6, "52855" : 1, "52856" : 1, "52861" : 9 } 2.00 | { "50292" : 11, "50633" : 5, "50904" : 3, "50952" : 4, "51216" : 5, "52191" : 2, "52501" : 7, "52551" : 7 } 3.00 | { "50275" : 8, "52863" : 9 } @@ -139,7 +139,7 @@ SELECT l_quantity, keys_sort(json_object_agg(l_orderkey::text || l_linenumber::t AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5000 AND l_orderkey < 6000 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | keys_sort -------------+-------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 1.00 | { "51551" : 10311, "52221" : 10445, "52855" : 10571, "56345" : 11269, "56986" : 11397, "58561" : 11713, "58573" : 11715, "59863" : 11973 } 2.00 | { "52191" : 10439, "53513" : 10703, "59233" : 11847 } 3.00 | { "54401" : 10881 } @@ -151,7 +151,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then l_quantity else NULL end)) FROM lineitem WHERE l_orderkey < 5; keys_sort ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- { "11" : null, "12" : 36.00, "13" : null, "14" : 28.00, "15" : 24.00, "16" : 32.00, "21" : 38.00, "31" : 45.00, "32" : 49.00, "33" : 27.00, "34" : null, "35" : 28.00, "36" : 26.00, "41" : 30.00 } (1 row) @@ -160,7 +160,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then to_json(l_quantity) else '"f"'::json end)) FROM lineitem WHERE l_orderkey < 5; keys_sort --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- { "11" : "f", "12" : 36.00, "13" : "f", "14" : 28.00, "15" : 24.00, "16" : 32.00, "21" : 38.00, "31" : 45.00, "32" : 49.00, "33" : 27.00, "34" : "f", "35" : 28.00, "36" : 26.00, "41" : 30.00 } (1 row) @@ -168,7 +168,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, json_build_array(l_quantity, l_shipdate))) FROM lineitem WHERE l_orderkey < 3; keys_sort ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- { "11" : [17.00, "1996-03-13"], "12" : [36.00, "1996-04-12"], "13" : [8.00, "1996-01-29"], "14" : [28.00, "1996-04-21"], "15" : [24.00, "1996-03-30"], "16" : [32.00, "1996-01-30"], "21" : [38.00, "1997-01-28"] } (1 row) @@ -176,14 +176,14 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, json_bu SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, ARRAY[l_quantity, l_orderkey])) FROM lineitem WHERE l_orderkey < 3; keys_sort ---------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- { "11" : [17.00,1], "12" : [36.00,1], "13" : [8.00,1], "14" : [28.00,1], "15" : [24.00,1], "16" : [32.00,1], "21" : [38.00,2] } (1 row) -- Check that we return NULL in case there are no input rows to json_object_agg() SELECT json_object_agg(l_shipdate, l_orderkey) FROM lineitem WHERE l_quantity < 0; json_object_agg ------------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_jsonb_agg.out b/src/test/regress/expected/multi_jsonb_agg.out index 0fee5e6e7..3c1c63322 100644 --- a/src/test/regress/expected/multi_jsonb_agg.out +++ b/src/test/regress/expected/multi_jsonb_agg.out @@ -13,7 +13,7 @@ $$; SELECT jsonb_cat_agg(i) FROM (VALUES ('[1,{"a":2}]'::jsonb), ('[null]'::jsonb), (NULL), ('["3",5,4]'::jsonb)) AS t(i); jsonb_cat_agg --------------------------------- +--------------------------------------------------------------------- [1, {"a": 2}, null, "3", 5, 4] (1 row) @@ -28,7 +28,7 @@ ERROR: jsonb_agg with order by is unsupported SELECT array_sort(jsonb_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort --------------------------------------------------------- +--------------------------------------------------------------------- [2132, 15635, 24027, 63700, 67310, 155190] [106170] [4297, 19036, 29380, 62143, 128449, 183095] @@ -44,7 +44,7 @@ SELECT array_sort(jsonb_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(jsonb_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort ------------------------------------------------------------------------ +--------------------------------------------------------------------- [13309.60, 21168.23, 22824.48, 28955.64, 45983.16, 49620.16] [44694.46] [2618.76, 28733.64, 32986.52, 39890.88, 46796.47, 54058.05] @@ -60,7 +60,7 @@ SELECT array_sort(jsonb_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(jsonb_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort ----------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ["1996-01-29", "1996-01-30", "1996-03-13", "1996-03-30", "1996-04-12", "1996-04-21"] ["1997-01-28"] ["1993-10-29", "1993-11-09", "1993-12-04", "1993-12-14", "1994-01-16", "1994-02-02"] @@ -76,7 +76,7 @@ SELECT array_sort(jsonb_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(jsonb_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort ----------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ["AIR ", "FOB ", "MAIL ", "MAIL ", "REG AIR ", "TRUCK "] ["RAIL "] ["AIR ", "FOB ", "RAIL ", "RAIL ", "SHIP ", "TRUCK "] @@ -92,7 +92,7 @@ SELECT array_sort(jsonb_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey -- Check that we can execute jsonb_agg() within other functions SELECT jsonb_array_length(jsonb_agg(l_orderkey)) FROM lineitem; jsonb_array_length --------------------- +--------------------------------------------------------------------- 12000 (1 row) @@ -104,7 +104,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(jsonb_agg(l_orderk WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | count | avg | array_sort -------------+-------+-----------------------+-------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 1.00 | 17 | 1477.1258823529411765 | [5543, 5633, 5634, 5698, 5766, 5856, 5857, 5986, 8997, 9026, 9158, 9184, 9220, 9222, 9348, 9383, 9476] 2.00 | 19 | 3078.4242105263157895 | [5506, 5540, 5573, 5669, 5703, 5730, 5798, 5831, 5893, 5920, 5923, 9030, 9058, 9123, 9124, 9188, 9344, 9441, 9476] 3.00 | 14 | 4714.0392857142857143 | [5509, 5543, 5605, 5606, 5827, 9124, 9157, 9184, 9223, 9254, 9349, 9414, 9475, 9477] @@ -115,7 +115,7 @@ SELECT l_quantity, array_sort(jsonb_agg(extract (month FROM o_orderdate))) AS my FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | my_month -------------+------------------------------------------------------------------ +--------------------------------------------------------------------- 1.00 | [2, 3, 4, 4, 4, 5, 5, 5, 6, 7, 7, 7, 7, 9, 9, 11, 11] 2.00 | [1, 3, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 10, 10, 11, 11, 11, 12, 12] 3.00 | [3, 4, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10, 11, 11] @@ -126,7 +126,7 @@ SELECT l_quantity, array_sort(jsonb_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | array_sort -------------+--------------------------------------------------- +--------------------------------------------------------------------- 1.00 | [11269, 11397, 11713, 11715, 11973, 18317, 18445] 2.00 | [11847, 18061, 18247, 18953] 3.00 | [18249, 18315, 18699, 18951, 18955] @@ -137,7 +137,7 @@ SELECT l_quantity, array_sort(jsonb_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE SELECT jsonb_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 5; jsonb_agg -------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- [null, 36.00, null, 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, null, 28.00, 26.00, 30.00] (1 row) @@ -145,7 +145,7 @@ SELECT jsonb_agg(case when l_quantity > 20 then l_quantity else NULL end) SELECT jsonb_agg(case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"'::jsonb end) FROM lineitem WHERE l_orderkey < 5; jsonb_agg ----------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ["f", 36.00, "f", 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, "f", 28.00, 26.00, 30.00] (1 row) @@ -153,7 +153,7 @@ SELECT jsonb_agg(case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"': SELECT jsonb_agg(jsonb_build_array(l_quantity, l_shipdate)) FROM lineitem WHERE l_orderkey < 3; jsonb_agg ------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- [[17.00, "1996-03-13"], [36.00, "1996-04-12"], [8.00, "1996-01-29"], [28.00, "1996-04-21"], [24.00, "1996-03-30"], [32.00, "1996-01-30"], [38.00, "1997-01-28"]] (1 row) @@ -161,14 +161,14 @@ SELECT jsonb_agg(jsonb_build_array(l_quantity, l_shipdate)) SELECT jsonb_agg(ARRAY[l_quantity, l_orderkey]) FROM lineitem WHERE l_orderkey < 3; jsonb_agg -------------------------------------------------------------------------------------- +--------------------------------------------------------------------- [[17.00, 1], [36.00, 1], [8.00, 1], [28.00, 1], [24.00, 1], [32.00, 1], [38.00, 2]] (1 row) -- Check that we return NULL in case there are no input rows to jsonb_agg() SELECT jsonb_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; jsonb_agg ------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_jsonb_object_agg.out b/src/test/regress/expected/multi_jsonb_object_agg.out index 215920fe8..25882488a 100644 --- a/src/test/regress/expected/multi_jsonb_object_agg.out +++ b/src/test/regress/expected/multi_jsonb_object_agg.out @@ -11,7 +11,7 @@ $$; SELECT jsonb_cat_agg(i) FROM (VALUES ('{"c":[], "b":2}'::jsonb), (NULL), ('{"d":null, "a":{"b":3}, "b":2}'::jsonb)) AS t(i); jsonb_cat_agg ---------------------------------------------- +--------------------------------------------------------------------- {"a": {"b": 3}, "b": 2, "c": [], "d": null} (1 row) @@ -27,7 +27,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_partkey) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; jsonb_object_agg --------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- {"11": 155190, "12": 67310, "13": 63700, "14": 2132, "15": 24027, "16": 15635} {"21": 106170} {"31": 4297, "32": 19036, "33": 128449, "34": 29380, "35": 183095, "36": 62143} @@ -44,7 +44,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_extendedprice) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; jsonb_object_agg ------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- {"11": 21168.23, "12": 45983.16, "13": 13309.60, "14": 28955.64, "15": 22824.48, "16": 49620.16} {"21": 44694.46} {"31": 54058.05, "32": 46796.47, "33": 39890.88, "34": 2618.76, "35": 32986.52, "36": 28733.64} @@ -61,7 +61,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipmode) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; jsonb_object_agg ----------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- {"11": "TRUCK ", "12": "MAIL ", "13": "REG AIR ", "14": "AIR ", "15": "FOB ", "16": "MAIL "} {"21": "RAIL "} {"31": "AIR ", "32": "RAIL ", "33": "SHIP ", "34": "TRUCK ", "35": "FOB ", "36": "RAIL "} @@ -78,7 +78,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipdate) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; jsonb_object_agg ----------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- {"11": "1996-03-13", "12": "1996-04-12", "13": "1996-01-29", "14": "1996-04-21", "15": "1996-03-30", "16": "1996-01-30"} {"21": "1997-01-28"} {"31": "1994-02-02", "32": "1993-11-09", "33": "1994-01-16", "34": "1993-12-04", "35": "1993-12-14", "36": "1993-10-29"} @@ -94,7 +94,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipdate) -- Check that we can execute jsonb_object_agg() within other functions SELECT count_keys(jsonb_object_agg(l_shipdate, l_orderkey)) FROM lineitem; count_keys ------------- +--------------------------------------------------------------------- 2470 (1 row) @@ -108,7 +108,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), WHERE l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | count | avg | jsonb_object_agg -------------+-------+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1.00 | 8 | 1748.3387500000000000 | {"50635": "1997-09-03", "51551": "1994-07-03", "51872": "1997-08-08", "52221": "1994-08-19", "52832": "1994-06-20", "52855": "1994-03-14", "52856": "1994-02-08", "52861": "1997-11-25"} 2.00 | 8 | 2990.9825000000000000 | {"50292": "1992-11-25", "50633": "1997-06-17", "50904": "1997-04-07", "50952": "1992-07-09", "51216": "1992-08-10", "52191": "1997-06-26", "52501": "1995-08-09", "52551": "1996-09-27"} 3.00 | 2 | 4744.8000000000000000 | {"50275": "1997-09-30", "52863": "1997-12-04"} @@ -120,7 +120,7 @@ SELECT l_quantity, jsonb_object_agg(l_orderkey::text || l_linenumber::text, FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | jsonb_object_agg -------------+--------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 1.00 | {"50635": 5, "51551": 6, "51872": 7, "52221": 5, "52832": 6, "52855": 1, "52856": 1, "52861": 9} 2.00 | {"50292": 11, "50633": 5, "50904": 3, "50952": 4, "51216": 5, "52191": 2, "52501": 7, "52551": 7} 3.00 | {"50275": 8, "52863": 9} @@ -132,7 +132,7 @@ SELECT l_quantity, jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_or AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5000 AND l_orderkey < 6000 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | jsonb_object_agg -------------+---------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 1.00 | {"51551": 10311, "52221": 10445, "52855": 10571, "56345": 11269, "56986": 11397, "58561": 11713, "58573": 11715, "59863": 11973} 2.00 | {"52191": 10439, "53513": 10703, "59233": 11847} 3.00 | {"54401": 10881} @@ -144,7 +144,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 5; jsonb_object_agg -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- {"11": null, "12": 36.00, "13": null, "14": 28.00, "15": 24.00, "16": 32.00, "21": 38.00, "31": 45.00, "32": 49.00, "33": 27.00, "34": null, "35": 28.00, "36": 26.00, "41": 30.00} (1 row) @@ -153,7 +153,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"'::jsonb end) FROM lineitem WHERE l_orderkey < 5; jsonb_object_agg ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- {"11": "f", "12": 36.00, "13": "f", "14": 28.00, "15": 24.00, "16": 32.00, "21": 38.00, "31": 45.00, "32": 49.00, "33": 27.00, "34": "f", "35": 28.00, "36": 26.00, "41": 30.00} (1 row) @@ -161,7 +161,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, jsonb_build_array(l_quantity, l_shipdate)) FROM lineitem WHERE l_orderkey < 3; jsonb_object_agg ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- {"11": [17.00, "1996-03-13"], "12": [36.00, "1996-04-12"], "13": [8.00, "1996-01-29"], "14": [28.00, "1996-04-21"], "15": [24.00, "1996-03-30"], "16": [32.00, "1996-01-30"], "21": [38.00, "1997-01-28"]} (1 row) @@ -169,14 +169,14 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, jsonb_build_arra SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, ARRAY[l_quantity, l_orderkey]) FROM lineitem WHERE l_orderkey < 3; jsonb_object_agg -------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- {"11": [17.00, 1], "12": [36.00, 1], "13": [8.00, 1], "14": [28.00, 1], "15": [24.00, 1], "16": [32.00, 1], "21": [38.00, 2]} (1 row) -- Check that we return NULL in case there are no input rows to jsonb_object_agg() SELECT jsonb_object_agg(l_shipdate, l_orderkey) FROM lineitem WHERE l_quantity < 0; jsonb_object_agg ------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_limit_clause.out b/src/test/regress/expected/multi_limit_clause.out index 9905d925b..c58df4b2e 100644 --- a/src/test/regress/expected/multi_limit_clause.out +++ b/src/test/regress/expected/multi_limit_clause.out @@ -4,7 +4,7 @@ CREATE TABLE lineitem_hash (LIKE lineitem); SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -18,7 +18,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC; count_quantity | l_quantity -----------------+------------ +--------------------------------------------------------------------- 219 | 13.00 222 | 29.00 227 | 3.00 @@ -56,7 +56,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity DESC; count_quantity | l_quantity -----------------+------------ +--------------------------------------------------------------------- 273 | 28.00 264 | 30.00 261 | 23.00 @@ -94,7 +94,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC LIMIT 5; count_quantity | l_quantity -----------------+------------ +--------------------------------------------------------------------- 219 | 13.00 222 | 29.00 227 | 3.00 @@ -106,7 +106,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC LIMIT 10; count_quantity | l_quantity -----------------+------------ +--------------------------------------------------------------------- 219 | 13.00 222 | 29.00 227 | 3.00 @@ -123,7 +123,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity DESC LIMIT 10; count_quantity | l_quantity -----------------+------------ +--------------------------------------------------------------------- 273 | 28.00 264 | 30.00 261 | 23.00 @@ -140,34 +140,34 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 -- in the first two tests, and then by a simple expression in the last test. SELECT min(l_orderkey) FROM lineitem; min ------ +--------------------------------------------------------------------- 1 (1 row) SELECT l_orderkey FROM lineitem ORDER BY l_orderkey ASC LIMIT 1; DEBUG: push down of limit count: 1 l_orderkey ------------- +--------------------------------------------------------------------- 1 (1 row) SELECT max(l_orderkey) FROM lineitem; max -------- +--------------------------------------------------------------------- 14947 (1 row) SELECT l_orderkey FROM lineitem ORDER BY l_orderkey DESC LIMIT 1; DEBUG: push down of limit count: 1 l_orderkey ------------- +--------------------------------------------------------------------- 14947 (1 row) SELECT * FROM lineitem ORDER BY l_orderkey DESC, l_linenumber DESC LIMIT 3; DEBUG: push down of limit count: 3 l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment -------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+--------------------------------- +--------------------------------------------------------------------- 14947 | 107098 | 7099 | 2 | 29.00 | 32047.61 | 0.04 | 0.06 | N | O | 11-08-1995 | 08-30-1995 | 12-03-1995 | TAKE BACK RETURN | FOB | inal sentiments t 14947 | 31184 | 3688 | 1 | 14.00 | 15612.52 | 0.09 | 0.02 | N | O | 11-05-1995 | 09-25-1995 | 11-27-1995 | TAKE BACK RETURN | RAIL | bout the even, iro 14946 | 79479 | 4494 | 2 | 37.00 | 53963.39 | 0.01 | 0.01 | N | O | 11-27-1996 | 02-01-1997 | 11-29-1996 | COLLECT COD | AIR | sleep furiously after the furio @@ -175,7 +175,7 @@ DEBUG: push down of limit count: 3 SELECT max(extract(epoch from l_shipdate)) FROM lineitem; max ------------ +--------------------------------------------------------------------- 912124800 (1 row) @@ -183,7 +183,7 @@ SELECT * FROM lineitem ORDER BY extract(epoch from l_shipdate) DESC, l_orderkey DESC LIMIT 3; DEBUG: push down of limit count: 3 l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment -------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+-------------------------------------- +--------------------------------------------------------------------- 4678 | 57388 | 9894 | 1 | 35.00 | 47088.30 | 0.04 | 0.08 | N | O | 11-27-1998 | 10-02-1998 | 12-17-1998 | TAKE BACK RETURN | AIR | he accounts. fluffily bold sheaves b 12384 | 84161 | 1686 | 5 | 6.00 | 6870.96 | 0.04 | 0.00 | N | O | 11-26-1998 | 10-04-1998 | 12-08-1998 | COLLECT COD | RAIL | ep blithely. blithely ironic r 1124 | 92298 | 4808 | 3 | 35.00 | 45160.15 | 0.10 | 0.05 | N | O | 11-25-1998 | 10-08-1998 | 12-25-1998 | TAKE BACK RETURN | AIR | ut the slyly bold pinto beans; fi @@ -197,7 +197,7 @@ SELECT l_quantity, l_discount, avg(l_partkey) FROM lineitem ORDER BY l_quantity LIMIT 1; DEBUG: push down of limit count: 1 l_quantity | l_discount | avg -------------+------------+-------------------- +--------------------------------------------------------------------- 1.00 | 0.00 | 99167.304347826087 (1 row) @@ -207,7 +207,7 @@ SELECT l_quantity, l_discount, avg(l_partkey) FROM lineitem ORDER BY l_quantity, l_discount LIMIT 1; DEBUG: push down of limit count: 1 l_quantity | l_discount | avg -------------+------------+-------------------- +--------------------------------------------------------------------- 1.00 | 0.00 | 99167.304347826087 (1 row) @@ -219,7 +219,7 @@ SELECT l_orderkey, count(DISTINCT l_partkey) ORDER BY 2 DESC, 1 DESC LIMIT 5; DEBUG: push down of limit count: 5 l_orderkey | count -------------+------- +--------------------------------------------------------------------- 14885 | 7 14884 | 7 14821 | 7 @@ -233,7 +233,7 @@ SELECT l_orderkey ORDER BY l_orderkey LIMIT 5; DEBUG: push down of limit count: 5 l_orderkey ------------- +--------------------------------------------------------------------- 1 2 3 @@ -247,7 +247,7 @@ SELECT max(l_orderkey) GROUP BY l_linestatus ORDER BY 1 DESC LIMIT 2; max -------- +--------------------------------------------------------------------- 14947 14916 (2 rows) @@ -258,7 +258,7 @@ SELECT l_orderkey, max(l_shipdate) GROUP BY l_orderkey ORDER BY 2 DESC, 1 LIMIT 5; l_orderkey | max -------------+------------ +--------------------------------------------------------------------- 4678 | 11-27-1998 12384 | 11-26-1998 1124 | 11-25-1998 @@ -274,7 +274,7 @@ SELECT ORDER BY 3 DESC, 1, 2 LIMIT 5; DEBUG: push down of limit count: 5 l_linestatus | l_orderkey | max ---------------+------------+------------ +--------------------------------------------------------------------- O | 4678 | 11-27-1998 O | 12384 | 11-26-1998 O | 1124 | 11-25-1998 @@ -289,7 +289,7 @@ SELECT GROUP BY l_linestatus, l_shipmode ORDER BY 3 DESC, 1, 2 LIMIT 5; l_linestatus | l_shipmode | max ---------------+------------+------------ +--------------------------------------------------------------------- O | AIR | 11-27-1998 O | RAIL | 11-26-1998 O | SHIP | 11-21-1998 @@ -306,7 +306,7 @@ SELECT LIMIT 5; DEBUG: push down of limit count: 5 l_orderkey | l_linenumber -------------+-------------- +--------------------------------------------------------------------- 1 | 1 1 | 2 1 | 3 @@ -322,7 +322,7 @@ SELECT ORDER BY l_linenumber, l_orderkey LIMIT 5; l_orderkey | l_linenumber -------------+-------------- +--------------------------------------------------------------------- 1 | 1 1 | 2 1 | 3 @@ -341,7 +341,7 @@ SELECT LIMIT 5; DEBUG: push down of limit count: 5 l_orderkey | l_linenumber -------------+-------------- +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -360,7 +360,7 @@ SELECT ORDER BY l_linenumber, (1+1), l_orderkey LIMIT 5; l_orderkey | l_linenumber -------------+-------------- +--------------------------------------------------------------------- 1 | 1 1 | 2 1 | 3 @@ -378,7 +378,7 @@ SELECT ORDER BY l_linenumber, l_orderkey LIMIT 5; l_orderkey | l_linenumber -------------+-------------- +--------------------------------------------------------------------- 1 | 1 1 | 2 1 | 3 @@ -397,7 +397,7 @@ SELECT LIMIT 5; DEBUG: push down of limit count: 5 ?column? ----------- +--------------------------------------------------------------------- 2 3 4 @@ -415,7 +415,7 @@ SELECT ORDER BY l_orderkey + 1 , 2 LIMIT 5; ?column? | count -----------+------- +--------------------------------------------------------------------- 2 | 6 3 | 1 4 | 6 @@ -433,7 +433,7 @@ SELECT LIMIT 5; DEBUG: push down of limit count: 5 l_orderkey | count -------------+------- +--------------------------------------------------------------------- 1 | 6 2 | 1 3 | 6 @@ -450,7 +450,7 @@ SELECT ORDER BY 2 DESC, 1 LIMIT 2; l_orderkey | count -------------+------- +--------------------------------------------------------------------- 7 | 7 1 | 6 (2 rows) @@ -464,7 +464,7 @@ SELECT LIMIT 5; DEBUG: push down of limit count: 5 l_orderkey | rank -------------+------ +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -483,7 +483,7 @@ SELECT ORDER BY l_orderkey , 3, 2 LIMIT 5; l_orderkey | count | rank -------------+-------+------ +--------------------------------------------------------------------- 1 | 6 | 1 2 | 1 | 1 3 | 6 | 1 @@ -499,7 +499,7 @@ SELECT ORDER BY l_orderkey , count(*) OVER (partition by l_orderkey), count(*), l_linenumber LIMIT 5; l_orderkey | l_linenumber | count | count -------------+--------------+-------+------- +--------------------------------------------------------------------- 1 | 1 | 1 | 6 2 | 1 | 1 | 1 3 | 1 | 1 | 6 @@ -515,7 +515,7 @@ SELECT ORDER BY 2 DESC, 1 LIMIT 5; l_orderkey | rank -------------+------ +--------------------------------------------------------------------- 1 | 1 (1 row) diff --git a/src/test/regress/expected/multi_limit_clause_approximate.out b/src/test/regress/expected/multi_limit_clause_approximate.out index 864a5df6d..69d84c6d1 100644 --- a/src/test/regress/expected/multi_limit_clause_approximate.out +++ b/src/test/regress/expected/multi_limit_clause_approximate.out @@ -10,7 +10,7 @@ SELECT l_partkey, sum(l_partkey * (1 + l_suppkey)) AS aggregate FROM lineitem GROUP BY l_partkey ORDER BY aggregate DESC LIMIT 10; l_partkey | aggregate ------------+------------ +--------------------------------------------------------------------- 194541 | 3727794642 160895 | 3671463005 183486 | 3128069328 @@ -30,7 +30,7 @@ SELECT l_partkey, sum(l_partkey * (1 + l_suppkey)) AS aggregate FROM lineitem ORDER BY aggregate DESC LIMIT 10; DEBUG: push down of limit count: 600 l_partkey | aggregate ------------+------------ +--------------------------------------------------------------------- 194541 | 3727794642 160895 | 3671463005 183486 | 3128069328 @@ -52,7 +52,7 @@ SELECT c_custkey, c_name, count(*) as lineitem_count GROUP BY c_custkey, c_name ORDER BY lineitem_count DESC, c_custkey LIMIT 10; c_custkey | c_name | lineitem_count ------------+--------------------+---------------- +--------------------------------------------------------------------- 43 | Customer#000000043 | 42 370 | Customer#000000370 | 40 79 | Customer#000000079 | 38 @@ -74,7 +74,7 @@ SELECT c_custkey, c_name, count(*) as lineitem_count ORDER BY lineitem_count DESC, c_custkey LIMIT 10; DEBUG: push down of limit count: 150 c_custkey | c_name | lineitem_count ------------+--------------------+---------------- +--------------------------------------------------------------------- 43 | Customer#000000043 | 42 370 | Customer#000000370 | 40 79 | Customer#000000079 | 38 @@ -94,7 +94,7 @@ SELECT l_partkey, avg(l_suppkey) AS average FROM lineitem GROUP BY l_partkey ORDER BY average DESC, l_partkey LIMIT 10; l_partkey | average ------------+----------------------- +--------------------------------------------------------------------- 9998 | 9999.0000000000000000 102466 | 9997.0000000000000000 184959 | 9996.0000000000000000 @@ -113,7 +113,7 @@ SELECT l_partkey, round(sum(l_suppkey)) AS complex_expression FROM lineitem GROUP BY l_partkey ORDER BY complex_expression DESC LIMIT 10; l_partkey | complex_expression ------------+-------------------- +--------------------------------------------------------------------- 160895 | 22816 194541 | 19160 37018 | 19044 @@ -131,7 +131,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 10.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC; count_quantity | l_quantity -----------------+------------ +--------------------------------------------------------------------- 227 | 3.00 232 | 7.00 237 | 2.00 diff --git a/src/test/regress/expected/multi_master_protocol.out b/src/test/regress/expected/multi_master_protocol.out index 0381679aa..2c203847c 100644 --- a/src/test/regress/expected/multi_master_protocol.out +++ b/src/test/regress/expected/multi_master_protocol.out @@ -6,13 +6,13 @@ SET citus.next_shard_id TO 740000; SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('lineitem'); part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy --------------------+------------+--------------------+---------------+----------------------- +--------------------------------------------------------------------- t | l_orderkey | 2 | 1536000 | 2 (1 row) SELECT * FROM master_get_table_ddl_events('lineitem') order by 1; master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber) ALTER TABLE public.lineitem OWNER TO postgres CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) TABLESPACE pg_default @@ -21,13 +21,13 @@ SELECT * FROM master_get_table_ddl_events('lineitem') order by 1; SELECT * FROM master_get_new_shardid(); master_get_new_shardid ------------------------- +--------------------------------------------------------------------- 740000 (1 row) SELECT * FROM master_get_active_worker_nodes(); node_name | node_port ------------+----------- +--------------------------------------------------------------------- localhost | 57638 localhost | 57637 (2 rows) diff --git a/src/test/regress/expected/multi_metadata_access.out b/src/test/regress/expected/multi_metadata_access.out index fa6e09250..0567aa595 100644 --- a/src/test/regress/expected/multi_metadata_access.out +++ b/src/test/regress/expected/multi_metadata_access.out @@ -19,7 +19,7 @@ WHERE AND nsp.nspname = 'pg_catalog' AND NOT has_table_privilege(pg_class.oid, 'select'); oid ------------------- +--------------------------------------------------------------------- pg_dist_authinfo (1 row) diff --git a/src/test/regress/expected/multi_metadata_attributes.out b/src/test/regress/expected/multi_metadata_attributes.out index abcef2178..505777e67 100644 --- a/src/test/regress/expected/multi_metadata_attributes.out +++ b/src/test/regress/expected/multi_metadata_attributes.out @@ -9,6 +9,6 @@ FROM pg_attribute WHERE atthasmissing AND attrelid NOT IN ('pg_dist_node'::regclass) ORDER BY attrelid, attname; attrelid | attname | atthasmissing | attmissingval -----------+---------+---------------+--------------- +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 59b760c5c..813b9521e 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -19,14 +19,14 @@ COMMENT ON FUNCTION master_metadata_snapshot() -- Show that none of the existing tables are qualified to be MX tables SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s'; logicalrelid | partmethod | partkey | colocationid | repmodel ---------------+------------+---------+--------------+---------- +--------------------------------------------------------------------- (0 rows) -- Show that, with no MX tables, metadata snapshot contains only the delete commands, -- pg_dist_node entries and reference tables SELECT unnest(master_metadata_snapshot()) order by 1; unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition TRUNCATE pg_dist_node CASCADE @@ -36,13 +36,13 @@ SELECT unnest(master_metadata_snapshot()) order by 1; CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL); SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('mx_test_table', 8, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -52,7 +52,7 @@ UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::re -- Show that the created MX table is included in the metadata snapshot SELECT unnest(master_metadata_snapshot()) order by 1; unnest --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE public.mx_test_table OWNER TO postgres @@ -72,7 +72,7 @@ SELECT unnest(master_metadata_snapshot()) order by 1; CREATE INDEX mx_index ON mx_test_table(col_2); SELECT unnest(master_metadata_snapshot()) order by 1; unnest --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE public.mx_test_table OWNER TO postgres @@ -96,7 +96,7 @@ WARNING: not propagating ALTER ... SET SCHEMA commands to worker nodes HINT: Connect to worker nodes directly to manually change schemas of affected objects. SELECT unnest(master_metadata_snapshot()) order by 1; unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres @@ -117,14 +117,14 @@ SELECT unnest(master_metadata_snapshot()) order by 1; CREATE TABLE non_mx_test_table (col_1 int, col_2 text); SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(master_metadata_snapshot()) order by 1; unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres @@ -145,7 +145,7 @@ SELECT unnest(master_metadata_snapshot()) order by 1; UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(master_metadata_snapshot()) order by 1; unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres @@ -166,7 +166,7 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Ensure that hasmetadata=false for all nodes SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -174,51 +174,51 @@ SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); master_add_node ------------------ +--------------------------------------------------------------------- 4 (1 row) SELECT start_metadata_sync_to_node('localhost', 8888); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; hasmetadata -------------- +--------------------------------------------------------------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', 8888); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; hasmetadata -------------- +--------------------------------------------------------------------- f (1 row) -- Add a node to another cluster to make sure it's also synced SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); master_add_secondary_node ---------------------------- +--------------------------------------------------------------------- 5 (1 row) -- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; nodeid | hasmetadata ---------+------------- +--------------------------------------------------------------------- 1 | t (1 row) @@ -226,13 +226,13 @@ SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND node \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; groupid ---------- +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+-----------+----------+----------+-------------+----------+-----------+----------------+----------------+------------------ +--------------------------------------------------------------------- 1 | 1 | localhost | 57637 | default | t | t | primary | default | f | t 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t 4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t @@ -241,13 +241,13 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; SELECT * FROM pg_dist_partition ORDER BY logicalrelid; logicalrelid | partmethod | partkey | colocationid | repmodel ----------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+--------------+---------- +--------------------------------------------------------------------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s (1 row) SELECT * FROM pg_dist_shard ORDER BY shardid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ----------------------------------+---------+--------------+---------------+--------------- +--------------------------------------------------------------------- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 @@ -260,7 +260,7 @@ SELECT * FROM pg_dist_shard ORDER BY shardid; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- 1310000 | 1 | 0 | localhost | 57637 | 100000 1310001 | 1 | 0 | localhost | 57638 | 100001 1310002 | 1 | 0 | localhost | 57637 | 100002 @@ -273,7 +273,7 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; Column | Type | Modifiers ---------+---------+--------------------------------------------------------------------------------- +--------------------------------------------------------------------- col_1 | integer | col_2 | text | not null col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) @@ -282,27 +282,27 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_sch SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; Column | Type | Definition ---------+---------+------------ +--------------------------------------------------------------------- col_1 | integer | col_1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_index'::regclass; Column | Type | Definition ---------+------+------------ +--------------------------------------------------------------------- col_2 | text | col_2 (1 row) -- Check that pg_dist_colocation is not synced SELECT * FROM pg_dist_colocation ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- (0 rows) -- Make sure that truncate trigger has been set for the MX table on worker SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -318,19 +318,19 @@ CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text, FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1 (col1, col3)); SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -338,7 +338,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass; Constraint | Definition ---------------------------+----------------------------------------------------------------------------- +--------------------------------------------------------------------- fk_test_2_col1_col2_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) (1 row) @@ -351,26 +351,26 @@ RESET citus.replication_model; \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; groupid ---------- +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------+---------+-----------+----------+----------+-------------+----------+-----------+----------------+----------------+------------------ +--------------------------------------------------------------------- 1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t 4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t @@ -379,13 +379,13 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; SELECT * FROM pg_dist_partition ORDER BY logicalrelid; logicalrelid | partmethod | partkey | colocationid | repmodel ----------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+--------------+---------- +--------------------------------------------------------------------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s (1 row) SELECT * FROM pg_dist_shard ORDER BY shardid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ----------------------------------+---------+--------------+---------------+--------------- +--------------------------------------------------------------------- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 @@ -398,7 +398,7 @@ SELECT * FROM pg_dist_shard ORDER BY shardid; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+-----------+----------+------------- +--------------------------------------------------------------------- 1310000 | 1 | 0 | localhost | 57637 | 100000 1310001 | 1 | 0 | localhost | 57638 | 100001 1310002 | 1 | 0 | localhost | 57637 | 100002 @@ -411,7 +411,7 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; Column | Type | Modifiers ---------+---------+--------------------------------------------------------------------------------- +--------------------------------------------------------------------- col_1 | integer | col_2 | text | not null col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) @@ -420,20 +420,20 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_sch SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; Column | Type | Definition ---------+---------+------------ +--------------------------------------------------------------------- col_1 | integer | col_1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_index'::regclass; Column | Type | Definition ---------+------+------------ +--------------------------------------------------------------------- col_2 | text | col_2 (1 row) SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -445,7 +445,7 @@ ERROR: start_metadata_sync_to_node cannot run inside a transaction block ROLLBACK; SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; hasmetadata -------------- +--------------------------------------------------------------------- f (1 row) @@ -455,20 +455,20 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE mx_query_test (a int, b text, c int); SELECT create_distributed_table('mx_query_test', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_query_test'::regclass; repmodel ----------- +--------------------------------------------------------------------- s (1 row) @@ -480,7 +480,7 @@ INSERT INTO mx_query_test VALUES (5, 'five', 24); \c - - - :worker_1_port SELECT * FROM mx_query_test ORDER BY a; a | b | c ----+-------+---- +--------------------------------------------------------------------- 1 | one | 1 2 | two | 4 3 | three | 9 @@ -493,7 +493,7 @@ UPDATE mx_query_test SET c = 25 WHERE a = 5; \c - - - :master_port SELECT * FROM mx_query_test ORDER BY a; a | b | c ----+-------+---- +--------------------------------------------------------------------- 1 | one | 1 2 | two | 4 3 | three | 9 @@ -508,32 +508,32 @@ DROP TABLE mx_query_test; \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; hasmetadata -------------- +--------------------------------------------------------------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; hasmetadata -------------- +--------------------------------------------------------------------- f (1 row) -- Test DDL propagation in MX tables SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -551,7 +551,7 @@ CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2); ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- col1 | integer | col2 | text | (2 rows) @@ -559,20 +559,20 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_1.mx_table_1_col1_key'::regclass; Column | Type | Definition ---------+---------+------------ +--------------------------------------------------------------------- col1 | integer | col1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_1.mx_index_1'::regclass; Column | Type | Definition ---------+---------+------------ +--------------------------------------------------------------------- col1 | integer | col1 (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_2.mx_table_2'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- col1 | integer | col2 | text | (2 rows) @@ -580,25 +580,25 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_2.mx_index_2'::regclass; Column | Type | Definition ---------+------+------------ +--------------------------------------------------------------------- col2 | text | col2 (1 row) SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_2.mx_table_2'::regclass; Constraint | Definition -------------------+----------------------------------------------------------------- +--------------------------------------------------------------------- mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1) (1 row) SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('mx_test_schema_2.mx_table_2', 'col1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -613,7 +613,7 @@ WHERE ORDER BY logicalrelid; logicalrelid | repmodel ------------------------------+---------- +--------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | s mx_test_schema_2.mx_table_2 | s (2 rows) @@ -629,7 +629,7 @@ WHERE ORDER BY logicalrelid, shardid; logicalrelid | shardid | nodename | nodeport ------------------------------+---------+-----------+---------- +--------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | 1310020 | localhost | 57637 mx_test_schema_1.mx_table_1 | 1310021 | localhost | 57638 mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637 @@ -648,7 +648,7 @@ ORDER BY \dt mx_test_schema_?.mx_table_? List of relations Schema | Name | Type | Owner -------------------+------------+-------+---------- +--------------------------------------------------------------------- mx_test_schema_1 | mx_table_1 | table | postgres mx_test_schema_2 | mx_table_2 | table | postgres (2 rows) @@ -662,7 +662,7 @@ WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass; logicalrelid | repmodel ------------------------------+---------- +--------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | s mx_test_schema_2.mx_table_2 | s (2 rows) @@ -678,7 +678,7 @@ WHERE ORDER BY logicalrelid, shardid; logicalrelid | shardid | nodename | nodeport ------------------------------+---------+-----------+---------- +--------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | 1310020 | localhost | 57637 mx_test_schema_1.mx_table_1 | 1310021 | localhost | 57638 mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637 @@ -697,17 +697,17 @@ ORDER BY \d mx_test_schema_2.mx_table_2 SELECT * FROM pg_dist_partition; logicalrelid | partmethod | partkey | colocationid | repmodel ---------------+------------+---------+--------------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------+---------+--------------+---------------+--------------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+----------+----------+------------- +--------------------------------------------------------------------- (0 rows) -- Check that CREATE INDEX statement is propagated @@ -720,14 +720,14 @@ ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQU SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_2.mx_index_3'::regclass; Column | Type | Definition ---------+---------+------------ +--------------------------------------------------------------------- col1 | integer | col1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_2.mx_table_2_col1_key'::regclass; Column | Type | Definition ---------+---------+------------ +--------------------------------------------------------------------- col1 | integer | col1 (1 row) @@ -757,7 +757,7 @@ REFERENCES \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- col1 | integer | col2 | text | col3 | integer | @@ -765,7 +765,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; Constraint | Definition -------------------+----------------------------------------------------------------- +--------------------------------------------------------------------- mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) (1 row) @@ -785,7 +785,7 @@ NOT VALID; \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; Constraint | Definition ---------------------+----------------------------------------------------------------- +--------------------------------------------------------------------- mx_fk_constraint_2 | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) (1 row) @@ -799,14 +799,14 @@ SET citus.replication_model TO 'streaming'; CREATE TABLE mx_colocation_test_1 (a int); SELECT create_distributed_table('mx_colocation_test_1', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE mx_colocation_test_2 (a int); SELECT create_distributed_table('mx_colocation_test_2', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -820,7 +820,7 @@ WHERE OR logicalrelid = 'mx_colocation_test_2'::regclass ORDER BY logicalrelid; logicalrelid | colocationid -----------------------+-------------- +--------------------------------------------------------------------- mx_colocation_test_1 | 10000 mx_colocation_test_2 | 10000 (2 rows) @@ -844,7 +844,7 @@ WHERE -- Mark tables colocated and see the changes on the master and the worker SELECT mark_tables_colocated('mx_colocation_test_1', ARRAY['mx_colocation_test_2']); mark_tables_colocated ------------------------ +--------------------------------------------------------------------- (1 row) @@ -856,7 +856,7 @@ WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; logicalrelid | colocationid -----------------------+-------------- +--------------------------------------------------------------------- mx_colocation_test_1 | 10001 mx_colocation_test_2 | 10001 (2 rows) @@ -870,7 +870,7 @@ WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; logicalrelid | colocationid -----------------------+-------------- +--------------------------------------------------------------------- mx_colocation_test_1 | 10001 mx_colocation_test_2 | 10001 (2 rows) @@ -892,13 +892,13 @@ SET citus.replication_model TO 'streaming'; CREATE TABLE mx_temp_drop_test (a int); SELECT create_distributed_table('mx_temp_drop_test', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; logicalrelid | repmodel --------------------+---------- +--------------------------------------------------------------------- mx_temp_drop_test | s (1 row) @@ -906,13 +906,13 @@ DROP TABLE mx_temp_drop_test; CREATE TABLE mx_temp_drop_test (a int); SELECT create_distributed_table('mx_temp_drop_test', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; logicalrelid | repmodel --------------------+---------- +--------------------------------------------------------------------- mx_temp_drop_test | s (1 row) @@ -924,13 +924,13 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -938,13 +938,13 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -953,7 +953,7 @@ DROP TABLE mx_table_with_small_sequence; CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -967,13 +967,13 @@ SET citus.replication_model TO 'streaming'; CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL); SELECT create_distributed_table('mx_table_with_sequence', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; Column | Type | Modifiers ---------+---------+-------------------------------------------------------------------- +--------------------------------------------------------------------- a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) @@ -982,14 +982,14 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_ \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner ---------+------------------------------+----------+---------- +--------------------------------------------------------------------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner ---------+------------------------------+----------+---------- +--------------------------------------------------------------------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) @@ -997,7 +997,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_ \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; Column | Type | Modifiers ---------+---------+-------------------------------------------------------------------- +--------------------------------------------------------------------- a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) @@ -1006,27 +1006,27 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_ \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner ---------+------------------------------+----------+---------- +--------------------------------------------------------------------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner ---------+------------------------------+----------+---------- +--------------------------------------------------------------------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) -- Check that the sequences on the worker have their own space SELECT nextval('mx_table_with_sequence_b_seq'); nextval ------------------ +--------------------------------------------------------------------- 281474976710657 (1 row) SELECT nextval('mx_table_with_sequence_c_seq'); nextval ------------------ +--------------------------------------------------------------------- 281474976710657 (1 row) @@ -1034,20 +1034,20 @@ SELECT nextval('mx_table_with_sequence_c_seq'); \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) \c - - - :worker_2_port SELECT groupid FROM pg_dist_local_group; groupid ---------- +--------------------------------------------------------------------- 2 (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; Column | Type | Modifiers ---------+---------+-------------------------------------------------------------------- +--------------------------------------------------------------------- a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) @@ -1056,26 +1056,26 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_ \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner ---------+------------------------------+----------+---------- +--------------------------------------------------------------------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner ---------+------------------------------+----------+---------- +--------------------------------------------------------------------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) SELECT nextval('mx_table_with_sequence_b_seq'); nextval ------------------ +--------------------------------------------------------------------- 562949953421313 (1 row) SELECT nextval('mx_table_with_sequence_c_seq'); nextval ------------------ +--------------------------------------------------------------------- 562949953421313 (1 row) @@ -1085,7 +1085,7 @@ INSERT INTO mx_table_with_small_sequence VALUES (2), (4); -- check our small sequence values SELECT a, b, c FROM mx_table_with_small_sequence ORDER BY a,b,c; a | b | c ----+-----------+------ +--------------------------------------------------------------------- 0 | 1 | 1 1 | 268435457 | 4097 2 | 536870913 | 8193 @@ -1099,13 +1099,13 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) -- Check that the sequences are dropped from the workers @@ -1114,13 +1114,13 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) -- Check that the sequences are dropped from the workers @@ -1128,13 +1128,13 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) -- Check that MX sequences play well with non-super users @@ -1149,7 +1149,7 @@ DELETE FROM pg_dist_partition; SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -1172,27 +1172,27 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT create_distributed_table('mx_table', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) \c - postgres - :master_port SELECT master_add_node('localhost', :worker_2_port); master_add_node ------------------ +--------------------------------------------------------------------- 6 (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) \c - mx_user - :worker_1_port SELECT nextval('mx_table_b_seq'); nextval ------------------ +--------------------------------------------------------------------- 281474976710657 (1 row) @@ -1200,7 +1200,7 @@ INSERT INTO mx_table (a) VALUES (37); INSERT INTO mx_table (a) VALUES (38); SELECT * FROM mx_table ORDER BY a; a | b -----+----------------- +--------------------------------------------------------------------- 37 | 281474976710658 38 | 281474976710659 (2 rows) @@ -1208,7 +1208,7 @@ SELECT * FROM mx_table ORDER BY a; \c - mx_user - :worker_2_port SELECT nextval('mx_table_b_seq'); nextval ------------------- +--------------------------------------------------------------------- 1125899906842625 (1 row) @@ -1216,7 +1216,7 @@ INSERT INTO mx_table (a) VALUES (39); INSERT INTO mx_table (a) VALUES (40); SELECT * FROM mx_table ORDER BY a; a | b -----+------------------ +--------------------------------------------------------------------- 37 | 281474976710658 38 | 281474976710659 39 | 1125899906842626 @@ -1245,7 +1245,7 @@ UPDATE pg_dist_placement \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -1259,7 +1259,7 @@ DROP USER mx_user; CREATE TABLE mx_ref (col_1 int, col_2 text); SELECT create_reference_table('mx_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1267,14 +1267,14 @@ SELECT create_reference_table('mx_ref'); -- multiple colocation entries for reference tables SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; count -------- +--------------------------------------------------------------------- 1 (1 row) \dt mx_ref List of relations Schema | Name | Type | Owner ---------+--------+-------+---------- +--------------------------------------------------------------------- public | mx_ref | table | postgres (1 row) @@ -1282,7 +1282,7 @@ SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; \dt mx_ref List of relations Schema | Name | Type | Owner ---------+--------+-------+---------- +--------------------------------------------------------------------- public | mx_ref | table | postgres (1 row) @@ -1297,7 +1297,7 @@ WHERE ORDER BY nodeport; logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport ---------------+------------+----------+---------+-------------+-----------+---------- +--------------------------------------------------------------------- mx_ref | n | t | 1310072 | 100072 | localhost | 57637 mx_ref | n | t | 1310072 | 100073 | localhost | 57638 (2 rows) @@ -1309,7 +1309,7 @@ ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0; CREATE INDEX mx_ref_index ON mx_ref(col_1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- col_1 | integer | col_2 | text | col_3 | numeric | default 0 @@ -1318,14 +1318,14 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regcl SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; Column | Type | Definition ---------+---------+------------ +--------------------------------------------------------------------- col_1 | integer | col_1 (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- col_1 | integer | col_2 | text | col_3 | numeric | default 0 @@ -1334,7 +1334,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regcl SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; Column | Type | Definition ---------+---------+------------ +--------------------------------------------------------------------- col_1 | integer | col_1 (1 row) @@ -1354,12 +1354,12 @@ LINE 2: relid = 'mx_ref_index'::regclass; ^ SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------+---------+--------------+---------------+--------------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard_placement WHERE shardid=:ref_table_shardid; shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+----------+----------+------------- +--------------------------------------------------------------------- (0 rows) -- Check that master_add_node propagates the metadata about new placements of a reference table @@ -1372,14 +1372,14 @@ DELETE FROM pg_dist_placement WHERE groupid = :old_worker_2_group; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE mx_ref (col_1 int, col_2 text); SELECT create_reference_table('mx_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1387,7 +1387,7 @@ SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; shardid | nodename | nodeport ----------+-----------+---------- +--------------------------------------------------------------------- 1310073 | localhost | 57637 (1 row) @@ -1396,7 +1396,7 @@ SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; shardid | nodename | nodeport ----------+-----------+---------- +--------------------------------------------------------------------- 1310073 | localhost | 57637 (1 row) @@ -1404,7 +1404,7 @@ WHERE logicalrelid='mx_ref'::regclass; SELECT master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "mx_ref" to the node localhost:xxxxx master_add_node ------------------ +--------------------------------------------------------------------- 7 (1 row) @@ -1413,7 +1413,7 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; shardid | nodename | nodeport ----------+-----------+---------- +--------------------------------------------------------------------- 1310073 | localhost | 57637 1310073 | localhost | 57638 (2 rows) @@ -1424,7 +1424,7 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; shardid | nodename | nodeport ----------+-----------+---------- +--------------------------------------------------------------------- 1310073 | localhost | 57637 1310073 | localhost | 57638 (2 rows) @@ -1444,14 +1444,14 @@ UPDATE pg_dist_placement \c - - - :master_port select shouldhaveshards from pg_dist_node where nodeport = 8888; shouldhaveshards ------------------- +--------------------------------------------------------------------- t (1 row) \c - postgres - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; shouldhaveshards ------------------- +--------------------------------------------------------------------- t (1 row) @@ -1459,20 +1459,20 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; \c - - - :master_port SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', false); master_set_node_property --------------------------- +--------------------------------------------------------------------- (1 row) select shouldhaveshards from pg_dist_node where nodeport = 8888; shouldhaveshards ------------------- +--------------------------------------------------------------------- f (1 row) \c - postgres - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; shouldhaveshards ------------------- +--------------------------------------------------------------------- f (1 row) @@ -1480,20 +1480,20 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; \c - postgres - :master_port SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', true); master_set_node_property --------------------------- +--------------------------------------------------------------------- (1 row) select shouldhaveshards from pg_dist_node where nodeport = 8888; shouldhaveshards ------------------- +--------------------------------------------------------------------- t (1 row) \c - postgres - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; shouldhaveshards ------------------- +--------------------------------------------------------------------- t (1 row) @@ -1506,7 +1506,7 @@ ALTER SYSTEM SET citus.metadata_sync_interval TO 300000; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 300000; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -1515,14 +1515,14 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table_1(a int); SELECT create_distributed_table('dist_table_1', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) UPDATE pg_dist_node SET metadatasynced=false WHERE nodeport=:worker_1_port; SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport=:worker_1_port; hasmetadata | metadatasynced --------------+---------------- +--------------------------------------------------------------------- t | f (1 row) @@ -1557,13 +1557,13 @@ HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT nodeid AS worker_2_nodeid FROM pg_dist_node WHERE nodeport=:worker_2_port \gset SELECT master_update_node(:worker_2_nodeid, 'localhost', 4444); master_update_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT master_update_node(:worker_2_nodeid, 'localhost', :worker_2_port); master_update_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -1571,7 +1571,7 @@ ALTER SYSTEM SET citus.metadata_sync_interval TO DEFAULT; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO DEFAULT; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -1579,13 +1579,13 @@ UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port; -- Cleanup SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_modifications.out b/src/test/regress/expected/multi_modifications.out index 8188853f8..15be73716 100644 --- a/src/test/regress/expected/multi_modifications.out +++ b/src/test/regress/expected/multi_modifications.out @@ -27,7 +27,7 @@ CREATE TABLE append_partitioned ( LIKE limit_orders ); SET citus.shard_count TO 2; SELECT create_distributed_table('limit_orders', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -35,13 +35,13 @@ SELECT create_distributed_table('multiple_hash', 'id', 'hash'); ERROR: column "id" of relation "multiple_hash" does not exist SELECT create_distributed_table('range_partitioned', 'id', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('append_partitioned', 'id', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -50,7 +50,7 @@ SET citus.shard_replication_factor TO 1; -- make a single shard that covers no partition values SELECT create_distributed_table('insufficient_shards', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -79,14 +79,14 @@ INSERT INTO limit_orders VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'bu 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 32743; count -------- +--------------------------------------------------------------------- 1 (1 row) -- basic single-row INSERT with RETURNING INSERT INTO limit_orders VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 32744 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -107,7 +107,7 @@ SELECT * FROM range_partitioned WHERE id = 32743; DEBUG: Creating router plan DEBUG: Plan is router executable id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 32743 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -115,7 +115,7 @@ SELECT * FROM append_partitioned WHERE id = 414123; DEBUG: Router planner does not support append-partitioned tables. DEBUG: Plan is router executable id | symbol | bidder_id | placed_at | kind | limit_price ---------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 414123 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -135,7 +135,7 @@ INSERT INTO limit_orders VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 's DEFAULT); SELECT COUNT(*) FROM limit_orders WHERE id = 12756; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -144,7 +144,7 @@ INSERT INTO limit_orders VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 1 interval '5 hours', 'buy', sqrt(2)); SELECT COUNT(*) FROM limit_orders WHERE id = 430; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -189,7 +189,7 @@ INSERT INTO limit_orders VALUES (12037, 'GOOG', 5634, '2001-04-16 03:37:28', 'bu (12039, 'GOOG', 5634, '2001-04-18 03:37:28', 'buy', 1.50); SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 12037 AND 12039; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -199,7 +199,7 @@ INSERT INTO limit_orders VALUES (22037, 'GOOG', 5634, now(), 'buy', 0.50), (22039, 'GOOG', 5634, now(), 'buy', 1.50) RETURNING id; id -------- +--------------------------------------------------------------------- 22037 22038 22039 @@ -207,7 +207,7 @@ RETURNING id; SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 22037 AND 22039; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -217,7 +217,7 @@ INSERT INTO limit_orders VALUES (random() * 10 + 70000, 'GOOG', 5634, now(), 'bu (random() * 10 + 80090, 'GOOG', 5634, now(), 'buy', 1.50); SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 70000 AND 90000; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -228,27 +228,27 @@ INSERT INTO limit_orders SELECT * FROM deleted_orders; INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 246; count -------- +--------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders WHERE id = 246; SELECT COUNT(*) FROM limit_orders WHERE id = 246; count -------- +--------------------------------------------------------------------- 0 (1 row) -- test simple DELETE with RETURNING DELETE FROM limit_orders WHERE id = 430 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+----------------- +--------------------------------------------------------------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) SELECT COUNT(*) FROM limit_orders WHERE id = 430; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -256,14 +256,14 @@ SELECT COUNT(*) FROM limit_orders WHERE id = 430; INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 246; count -------- +--------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders WHERE id = (2 * 123); SELECT COUNT(*) FROM limit_orders WHERE id = 246; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -284,7 +284,7 @@ ERROR: division by zero \set VERBOSITY default SELECT * FROM limit_orders WHERE id = 412; id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+-----------+------+------------- +--------------------------------------------------------------------- (0 rows) INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); @@ -292,14 +292,14 @@ INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell' UPDATE limit_orders SET symbol = 'GM' WHERE id = 246; SELECT symbol FROM limit_orders WHERE id = 246; symbol --------- +--------------------------------------------------------------------- GM (1 row) -- simple UPDATE with RETURNING UPDATE limit_orders SET symbol = 'GM' WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 246 | GM | 162 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) @@ -307,14 +307,14 @@ UPDATE limit_orders SET symbol = 'GM' WHERE id = 246 RETURNING *; UPDATE limit_orders SET bidder_id = 6 * 3 WHERE id = 246; SELECT bidder_id FROM limit_orders WHERE id = 246; bidder_id ------------ +--------------------------------------------------------------------- 18 (1 row) -- expression UPDATE with RETURNING UPDATE limit_orders SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) @@ -322,14 +322,14 @@ UPDATE limit_orders SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; UPDATE limit_orders SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; SELECT kind, limit_price FROM limit_orders WHERE id = 246; kind | limit_price -------+------------- +--------------------------------------------------------------------- buy | 0.00 (1 row) -- multi-column UPDATE with RETURNING UPDATE limit_orders SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | buy | 999 (1 row) @@ -359,21 +359,21 @@ ALTER TABLE renamed_orders RENAME TO limit_orders_750000; \c - - - :worker_1_port SELECT count(*) FROM limit_orders_750000 WHERE id = 276; count -------- +--------------------------------------------------------------------- 0 (1 row) \c - - - :worker_2_port SELECT count(*) FROM limit_orders_750000 WHERE id = 276; count -------- +--------------------------------------------------------------------- 0 (1 row) \c - - - :master_port SELECT count(*) FROM limit_orders WHERE id = 276; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -384,7 +384,7 @@ WHERE sp.shardid = s.shardid AND sp.shardstate = 3 AND s.logicalrelid = 'limit_orders'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -410,7 +410,7 @@ AND sp.nodeport = :worker_1_port AND sp.shardstate = 1 AND s.logicalrelid = 'limit_orders'::regclass; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -441,7 +441,7 @@ WITH deleted_orders AS (INSERT INTO limit_orders VALUES (399, 'PDR', 14, '2017-0 UPDATE limit_orders SET symbol = 'GM'; SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; symbol | bidder_id ---------+----------- +--------------------------------------------------------------------- GM | 30 (1 row) @@ -453,14 +453,14 @@ UPDATE limit_orders SET bidder_id = bidder_id + 1 WHERE id = 246; UPDATE limit_orders SET symbol = LOWER(symbol) WHERE id = 246; SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; symbol | bidder_id ---------+----------- +--------------------------------------------------------------------- gm | 247 (1 row) -- IMMUTABLE functions are allowed -- even in returning UPDATE limit_orders SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; id | lower | symbol ------+-------+-------- +--------------------------------------------------------------------- 246 | gm | GM (1 row) @@ -490,7 +490,7 @@ SET array_of_values = stable_append(array_of_values, 3) WHERE id = 246; ERROR: STABLE functions used in UPDATE queries cannot be called with column references SELECT array_of_values FROM limit_orders WHERE id = 246; array_of_values ------------------ +--------------------------------------------------------------------- {1,2} (1 row) @@ -503,7 +503,7 @@ ERROR: null value in column "bidder_id" violates not-null constraint \set VERBOSITY default SELECT array_of_values FROM limit_orders WHERE id = 246; array_of_values ------------------ +--------------------------------------------------------------------- {1,2} (1 row) @@ -520,7 +520,7 @@ INSERT INTO multiple_hash VALUES ('0', '5'); INSERT INTO multiple_hash VALUES ('0', '6'); UPDATE multiple_hash SET data = data ||'-1' WHERE category = '0' RETURNING *; category | data -----------+------ +--------------------------------------------------------------------- 0 | 1-1 0 | 2-1 0 | 3-1 @@ -531,7 +531,7 @@ UPDATE multiple_hash SET data = data ||'-1' WHERE category = '0' RETURNING *; DELETE FROM multiple_hash WHERE category = '0' RETURNING *; category | data -----------+------ +--------------------------------------------------------------------- 0 | 1-1 0 | 2-1 0 | 3-1 @@ -556,7 +556,7 @@ INSERT INTO multiple_hash VALUES ('2', '3'); INSERT 0 1 INSERT INTO multiple_hash VALUES ('2', '3') RETURNING *; category | data -----------+------ +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -571,7 +571,7 @@ UPDATE 3 -- three rows, with RETURNING UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1' RETURNING category; category ----------- +--------------------------------------------------------------------- 1 1 1 @@ -581,7 +581,7 @@ UPDATE 3 -- check SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; category | data -----------+--------- +--------------------------------------------------------------------- 1 | 1-1-2-2 1 | 2-2-2 1 | 3-2-2 @@ -597,7 +597,7 @@ DELETE 3 -- three rows, with RETURNING DELETE FROM multiple_hash WHERE category = '1' RETURNING category; category ----------- +--------------------------------------------------------------------- 1 1 1 @@ -607,12 +607,12 @@ DELETE 3 -- check SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; category | data -----------+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data; category | data -----------+------ +--------------------------------------------------------------------- (0 rows) -- verify interaction of default values, SERIAL, and RETURNING @@ -621,25 +621,25 @@ CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SET citus.shard_count TO 4; SELECT create_distributed_table('app_analytics_events', 'app_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; id ----- +--------------------------------------------------------------------- 1 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; id ----- +--------------------------------------------------------------------- 2 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; id | app_id | name -----+--------+------ +--------------------------------------------------------------------- 3 | 103 | Mynt (1 row) @@ -648,25 +648,25 @@ DROP TABLE app_analytics_events; CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT create_distributed_table('app_analytics_events', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; id ----- +--------------------------------------------------------------------- 1 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; id ----- +--------------------------------------------------------------------- 2 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; id | app_id | name -----+--------+------ +--------------------------------------------------------------------- 3 | 103 | Mynt (1 row) @@ -674,7 +674,7 @@ INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING * INSERT INTO app_analytics_events (app_id, name) VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; id | app_id | name -----+--------+------ +--------------------------------------------------------------------- 4 | 104 | Wayz 5 | 105 | Mynt (2 rows) @@ -682,7 +682,7 @@ VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; INSERT INTO app_analytics_events (id, name) VALUES (DEFAULT, 'Foo'), (300, 'Wah') RETURNING *; id | app_id | name ------+--------+------ +--------------------------------------------------------------------- 6 | | Foo 300 | | Wah (2 rows) @@ -692,49 +692,49 @@ INSERT INTO app_analytics_events (id, name) VALUES (DEFAULT, $1 || '.1'), (400 , $1 || '.2') RETURNING *; EXECUTE prep('version-1'); id | app_id | name ------+--------+------------- +--------------------------------------------------------------------- 7 | | version-1.1 400 | | version-1.2 (2 rows) EXECUTE prep('version-2'); id | app_id | name ------+--------+------------- +--------------------------------------------------------------------- 8 | | version-2.1 400 | | version-2.2 (2 rows) EXECUTE prep('version-3'); id | app_id | name ------+--------+------------- +--------------------------------------------------------------------- 9 | | version-3.1 400 | | version-3.2 (2 rows) EXECUTE prep('version-4'); id | app_id | name ------+--------+------------- +--------------------------------------------------------------------- 10 | | version-4.1 400 | | version-4.2 (2 rows) EXECUTE prep('version-5'); id | app_id | name ------+--------+------------- +--------------------------------------------------------------------- 11 | | version-5.1 400 | | version-5.2 (2 rows) EXECUTE prep('version-6'); id | app_id | name ------+--------+------------- +--------------------------------------------------------------------- 12 | | version-6.1 400 | | version-6.2 (2 rows) SELECT * FROM app_analytics_events ORDER BY id, name; id | app_id | name ------+--------+----------------- +--------------------------------------------------------------------- 1 | 101 | Fauxkemon Geaux 2 | 102 | Wayz 3 | 103 | Mynt @@ -762,14 +762,14 @@ ALTER TABLE app_analytics_events DROP COLUMN app_id; INSERT INTO app_analytics_events (name) VALUES ('Wayz'), ('Mynt') RETURNING *; id | name -----+------ +--------------------------------------------------------------------- 13 | Wayz 14 | Mynt (2 rows) SELECT * FROM app_analytics_events ORDER BY id; id | name -----+------ +--------------------------------------------------------------------- 13 | Wayz 14 | Mynt (2 rows) @@ -779,7 +779,7 @@ DROP TABLE app_analytics_events; CREATE TABLE app_analytics_events (id int default 3, app_id integer, name text); SELECT create_distributed_table('app_analytics_events', 'name', colocate_with => 'none'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -787,14 +787,14 @@ ALTER TABLE app_analytics_events DROP COLUMN app_id; INSERT INTO app_analytics_events (name) VALUES ('Wayz'), ('Mynt') RETURNING *; id | name -----+------ +--------------------------------------------------------------------- 3 | Mynt 3 | Wayz (2 rows) SELECT * FROM app_analytics_events WHERE name = 'Wayz'; id | name -----+------ +--------------------------------------------------------------------- 3 | Wayz (1 row) @@ -803,21 +803,21 @@ DROP TABLE app_analytics_events; CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT create_reference_table('app_analytics_events'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; id | app_id | name -----+--------+------ +--------------------------------------------------------------------- 1 | 104 | Wayz 2 | 105 | Mynt (2 rows) SELECT * FROM app_analytics_events ORDER BY id; id | app_id | name -----+--------+------ +--------------------------------------------------------------------- 1 | 104 | Wayz 2 | 105 | Mynt (2 rows) @@ -827,21 +827,21 @@ DROP TABLE app_analytics_events; CREATE TABLE app_analytics_events (id int, app_id serial, name text); SELECT create_distributed_table('app_analytics_events', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO app_analytics_events (id, name) VALUES (99, 'Wayz'), (98, 'Mynt') RETURNING name, app_id; name | app_id -------+-------- +--------------------------------------------------------------------- Mynt | 2 Wayz | 1 (2 rows) SELECT * FROM app_analytics_events ORDER BY id; id | app_id | name -----+--------+------ +--------------------------------------------------------------------- 98 | 2 | Mynt 99 | 1 | Wayz (2 rows) @@ -857,13 +857,13 @@ CREATE TABLE summary_table ( uniques int); SELECT create_distributed_table('raw_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('summary_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -884,7 +884,7 @@ UPDATE summary_table SET uniques = 0 WHERE null; UPDATE summary_table SET uniques = 0 WHERE null > jsonb_build_array(); SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+---------------+-------+--------- +--------------------------------------------------------------------- 1 | | | | 2 | | | | (2 rows) @@ -895,7 +895,7 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | | 200.0000000000000000 | | 2 | | | | (2 rows) @@ -906,7 +906,7 @@ UPDATE summary_table SET (min_value, average_value) = WHERE id = 2; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | | (2 rows) @@ -915,7 +915,7 @@ UPDATE summary_table SET min_value = 100 WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value > 100) AND id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | | (2 rows) @@ -925,7 +925,7 @@ UPDATE summary_table SET uniques = 2 WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value IN (100, 200)); SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | | 2 2 | 400 | 450.0000000000000000 | | (2 rows) @@ -935,7 +935,7 @@ UPDATE summary_table SET uniques = NULL WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | | (2 rows) @@ -951,7 +951,7 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 1 AND id = 4; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | | (2 rows) @@ -963,7 +963,7 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | | | 2 | 400 | 450.0000000000000000 | | (2 rows) @@ -995,7 +995,7 @@ WHERE summary_table.id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | | 4 | 2 2 | 400 | 450.0000000000000000 | | (2 rows) @@ -1005,7 +1005,7 @@ UPDATE summary_table SET count = count + 1 FROM raw_table WHERE raw_table.id = summary_table.id AND summary_table.id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | | 5 | 2 2 | 400 | 450.0000000000000000 | | (2 rows) @@ -1023,7 +1023,7 @@ EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | | 65 | 2 2 | 400 | 450.0000000000000000 | | (2 rows) @@ -1038,13 +1038,13 @@ CREATE TABLE reference_summary_table ( uniques int); SELECT create_reference_table('reference_raw_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('reference_summary_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1053,7 +1053,7 @@ INSERT INTO reference_raw_table VALUES (1, 200); INSERT INTO reference_raw_table VALUES (1, 200); INSERT INTO reference_raw_table VALUES (1,300), (2, 400), (2,500) RETURNING *; id | value -----+------- +--------------------------------------------------------------------- 1 | 300 2 | 400 2 | 500 @@ -1063,7 +1063,7 @@ INSERT INTO reference_summary_table VALUES (1); INSERT INTO reference_summary_table VALUES (2); SELECT * FROM reference_summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+---------------+-------+--------- +--------------------------------------------------------------------- 1 | | | | 2 | | | | (2 rows) @@ -1082,7 +1082,7 @@ UPDATE reference_summary_table SET (min_value, average_value) = WHERE id = 2; SELECT * FROM reference_summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | | (2 rows) @@ -1093,7 +1093,7 @@ UPDATE reference_summary_table SET (count) = WHERE min_value = 400; SELECT * FROM reference_summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | 2 | (2 rows) @@ -1126,7 +1126,7 @@ WHERE id = 3; COMMIT; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 2 | 400 | 450.0000000000000000 | | 3 | | 150.0000000000000000 | | @@ -1144,7 +1144,7 @@ WHERE id = 4; COMMIT; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 2 | 400 | 450.0000000000000000 | | 3 | | 150.0000000000000000 | | @@ -1162,7 +1162,7 @@ WHERE id = 5; COMMIT; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 2 | 400 | 450.0000000000000000 | | 3 | | 150.0000000000000000 | | @@ -1181,7 +1181,7 @@ WHERE id = 6; COMMIT; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 2 | 400 | 450.0000000000000000 | | 3 | | 150.0000000000000000 | | @@ -1193,7 +1193,7 @@ SELECT * FROM summary_table ORDER BY id; -- test DELETE queries SELECT * FROM raw_table ORDER BY id, value; id | value -----+------- +--------------------------------------------------------------------- 1 | 100 1 | 200 1 | 200 @@ -1212,7 +1212,7 @@ DELETE FROM summary_table WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 2 | 400 | 450.0000000000000000 | | 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | @@ -1225,7 +1225,7 @@ DELETE FROM summary_table USING raw_table WHERE summary_table.id = raw_table.id AND raw_table.id = 2; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | 5 | | 150.0000000000000000 | | @@ -1238,7 +1238,7 @@ DELETE FROM reference_summary_table USING raw_table ERROR: cannot perform select on a distributed table and modify a reference table SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | 5 | | 150.0000000000000000 | | @@ -1255,7 +1255,7 @@ DELETE FROM summary_table USING reference_raw_table COMMIT; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+----------------------+-------+--------- +--------------------------------------------------------------------- 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | 5 | | 150.0000000000000000 | | @@ -1276,7 +1276,7 @@ EXECUTE prepared_delete_with_join(5); EXECUTE prepared_delete_with_join(6); SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques -----+-----------+---------------+-------+--------- +--------------------------------------------------------------------- (0 rows) -- we don't support subqueries in VALUES clause diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index cc9c3dea0..bad2b6eff 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -14,25 +14,25 @@ CREATE TABLE labs ( ); SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('researchers', 2, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT master_create_distributed_table('labs', 'id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('labs', 1, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -50,7 +50,7 @@ INSERT INTO researchers VALUES (2, 1, 'John Backus'), (12, 1, 'Frances E. Allen' COMMIT; SELECT name FROM researchers WHERE lab_id = 1 AND id % 10 = 2; name ------------------- +--------------------------------------------------------------------- John Backus Frances E. Allen (2 rows) @@ -63,7 +63,7 @@ ROLLBACK; -- should have rolled everything back SELECT * FROM researchers WHERE id = 15 AND lab_id = 2; id | lab_id | name -----+--------+------ +--------------------------------------------------------------------- (0 rows) -- abort a modification @@ -72,7 +72,7 @@ DELETE FROM researchers WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers WHERE lab_id = 1 AND id = 1; name --------------- +--------------------------------------------------------------------- Donald Knuth (1 row) @@ -91,7 +91,7 @@ INSERT INTO researchers VALUES (6, 3, 'Ken Thompson'); COMMIT; SELECT name FROM researchers WHERE lab_id = 3 AND id = 6; name --------------- +--------------------------------------------------------------------- Ken Thompson (1 row) @@ -115,7 +115,7 @@ ROLLBACK TO hire_engelbart; COMMIT; SELECT name FROM researchers WHERE lab_id = 4; name ----------- +--------------------------------------------------------------------- Jim Gray (1 row) @@ -138,7 +138,7 @@ INSERT INTO labs VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers, labs WHERE labs.id = researchers.lab_id AND researchers.lab_id = 5; id | lab_id | name | id | name -----+--------+-------------------+----+------------ +--------------------------------------------------------------------- 8 | 5 | Douglas Engelbart | 5 | Los Alamos (1 row) @@ -161,7 +161,7 @@ BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); SELECT count(*) FROM researchers WHERE lab_id = 6; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -177,7 +177,7 @@ AND s.logicalrelid = 'researchers'::regclass; INSERT INTO labs VALUES (6, 'Bell Labs'); SELECT count(*) FROM researchers WHERE lab_id = 6; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -201,14 +201,14 @@ ABORT; -- but the DDL should correctly roll back SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.labs'::regclass; Column | Type | Modifiers ---------+--------+----------- +--------------------------------------------------------------------- id | bigint | not null name | text | not null (2 rows) SELECT * FROM labs WHERE id = 6; id | name -----+----------- +--------------------------------------------------------------------- 6 | Bell Labs (1 row) @@ -244,7 +244,7 @@ BEGIN; \copy labs from stdin delimiter ',' SELECT name FROM labs WHERE id = 10; name ----------------- +--------------------------------------------------------------------- Weyland-Yutani Weyland-Yutani (2 rows) @@ -258,7 +258,7 @@ BEGIN; COMMIT; SELECT name FROM labs WHERE id = 11 OR id = 12 ORDER BY id; name ----------------- +--------------------------------------------------------------------- Planet Express fsociety (2 rows) @@ -266,7 +266,7 @@ SELECT name FROM labs WHERE id = 11 OR id = 12 ORDER BY id; -- 1pc failure test SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -280,13 +280,13 @@ COMMIT; -- verify rollback SELECT * FROM researchers WHERE lab_id = 6; id | lab_id | name -----+--------+---------------- +--------------------------------------------------------------------- 9 | 6 | Leslie Lamport (1 row) SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -294,7 +294,7 @@ SELECT count(*) FROM pg_dist_transaction; SET citus.multi_shard_commit_protocol TO '2pc'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -308,13 +308,13 @@ COMMIT; -- verify rollback SELECT * FROM researchers WHERE lab_id = 6; id | lab_id | name -----+--------+---------------- +--------------------------------------------------------------------- 9 | 6 | Leslie Lamport (1 row) SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -325,7 +325,7 @@ COMMIT; -- verify success SELECT * FROM researchers WHERE lab_id = 6; id | lab_id | name -----+--------+---------------------- +--------------------------------------------------------------------- 9 | 6 | Leslie Lamport 17 | 6 | 'Bjarne Stroustrup' 18 | 6 | 'Dennis Ritchie' @@ -334,7 +334,7 @@ SELECT * FROM researchers WHERE lab_id = 6; -- verify 2pc SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -351,7 +351,7 @@ SELECT * from run_command_on_workers('CREATE FUNCTION reject_large_id() RETURNS $rli$ LANGUAGE plpgsql;') ORDER BY nodeport; nodename | nodeport | success | result ------------+----------+---------+----------------- +--------------------------------------------------------------------- localhost | 57637 | t | CREATE FUNCTION localhost | 57638 | t | CREATE FUNCTION (2 rows) @@ -360,7 +360,7 @@ ORDER BY nodeport; SELECT * FROM run_command_on_placements('researchers', 'CREATE CONSTRAINT TRIGGER reject_large_researcher_id AFTER INSERT ON %s DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_large_id()') ORDER BY nodeport, shardid; nodename | nodeport | shardid | success | result ------------+----------+---------+---------+---------------- +--------------------------------------------------------------------- localhost | 57637 | 1200000 | t | CREATE TRIGGER localhost | 57637 | 1200001 | t | CREATE TRIGGER localhost | 57638 | 1200000 | t | CREATE TRIGGER @@ -386,7 +386,7 @@ ERROR: could not commit transaction on any active node -- verify everyhing including delete is rolled back SELECT * FROM researchers WHERE lab_id = 6; id | lab_id | name -----+--------+---------------------- +--------------------------------------------------------------------- 9 | 6 | Leslie Lamport 17 | 6 | 'Bjarne Stroustrup' 18 | 6 | 'Dennis Ritchie' @@ -396,7 +396,7 @@ SELECT * FROM researchers WHERE lab_id = 6; SELECT * from run_command_on_placements('researchers', 'drop trigger reject_large_researcher_id on %s') ORDER BY nodeport, shardid; nodename | nodeport | shardid | success | result ------------+----------+---------+---------+-------------- +--------------------------------------------------------------------- localhost | 57637 | 1200000 | t | DROP TRIGGER localhost | 57637 | 1200001 | t | DROP TRIGGER localhost | 57638 | 1200000 | t | DROP TRIGGER @@ -406,7 +406,7 @@ ORDER BY nodeport, shardid; SELECT * FROM run_command_on_workers('drop function reject_large_id()') ORDER BY nodeport; nodename | nodeport | success | result ------------+----------+---------+--------------- +--------------------------------------------------------------------- localhost | 57637 | t | DROP FUNCTION localhost | 57638 | t | DROP FUNCTION (2 rows) @@ -424,12 +424,12 @@ ABORT; BEGIN; SELECT lab_id FROM researchers WHERE lab_id = 1 AND id = 0; lab_id --------- +--------------------------------------------------------------------- (0 rows) SELECT lab_id FROM researchers WHERE lab_id = 2 AND id = 0; lab_id --------- +--------------------------------------------------------------------- (0 rows) ALTER TABLE researchers ADD COLUMN motto text; @@ -439,12 +439,12 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT lab_id FROM researchers WHERE lab_id = 1 AND id = 0; lab_id --------- +--------------------------------------------------------------------- (0 rows) SELECT lab_id FROM researchers WHERE lab_id = 2 AND id = 0; lab_id --------- +--------------------------------------------------------------------- (0 rows) ALTER TABLE researchers ADD COLUMN motto text; @@ -458,7 +458,7 @@ ROLLBACK; -- should have rolled everything back SELECT * FROM labs WHERE id = 12; id | name -----+---------- +--------------------------------------------------------------------- 12 | fsociety (1 row) @@ -469,13 +469,13 @@ CREATE TABLE objects ( ); SELECT master_create_distributed_table('objects', 'id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('objects', 1, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -490,7 +490,7 @@ COMMIT; -- data shouldn't have persisted... SELECT * FROM objects WHERE id = 1; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- and placements should still be healthy... @@ -501,7 +501,7 @@ WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND s.logicalrelid = 'objects'::regclass; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -532,12 +532,12 @@ COMMIT; -- so the data should noy be persisted SELECT * FROM objects WHERE id = 2; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs WHERE id = 7; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- and none of placements should be inactive @@ -550,7 +550,7 @@ AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -584,12 +584,12 @@ COMMIT; -- data should NOT be persisted SELECT * FROM objects WHERE id = 1; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs WHERE id = 8; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- all placements should remain healthy @@ -601,7 +601,7 @@ AND sp.shardstate = 1 AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass); count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -624,7 +624,7 @@ WARNING: failed to commit transaction on localhost:xxxxx -- data should be persisted SELECT * FROM objects WHERE id = 2; id | name -----+------ +--------------------------------------------------------------------- 2 | BAD (1 row) @@ -638,7 +638,7 @@ AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -672,12 +672,12 @@ ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects WHERE id = 1; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs WHERE id = 8; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- all placements should remain healthy @@ -689,7 +689,7 @@ AND sp.shardstate = 1 AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass); count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -710,13 +710,13 @@ WARNING: could not commit transaction for shard xxxxx on any active node -- data to objects should be persisted, but labs should not... SELECT * FROM objects WHERE id = 1; id | name -----+------- +--------------------------------------------------------------------- 1 | apple (1 row) SELECT * FROM labs WHERE id = 8; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- labs should be healthy, but one object placement shouldn't be @@ -729,7 +729,7 @@ AND (s.logicalrelid = 'objects'::regclass OR GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; logicalrelid | shardstate | count ---------------+------------+------- +--------------------------------------------------------------------- labs | 1 | 1 objects | 1 | 1 objects | 3 | 1 @@ -739,7 +739,7 @@ ORDER BY s.logicalrelid, sp.shardstate; CREATE TABLE append_researchers ( LIKE researchers ); SELECT master_create_distributed_table('append_researchers', 'id', 'append'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) @@ -759,7 +759,7 @@ INSERT INTO append_researchers VALUES (0, 0, 'John Backus'); COMMIT; SELECT * FROM append_researchers WHERE id = 0; id | lab_id | name -----+--------+------------- +--------------------------------------------------------------------- 0 | 0 | John Backus (1 row) @@ -769,7 +769,7 @@ DELETE FROM append_researchers WHERE id = 0; ROLLBACK; SELECT * FROM append_researchers WHERE id = 0; id | lab_id | name -----+--------+------------- +--------------------------------------------------------------------- 0 | 0 | John Backus (1 row) @@ -782,7 +782,7 @@ HINT: Make sure the value for partition column "id" falls into a single shard. ROLLBACK; SELECT * FROM append_researchers; id | lab_id | name -----+--------+------------- +--------------------------------------------------------------------- 0 | 0 | John Backus (1 row) @@ -791,7 +791,7 @@ SELECT * FROM append_researchers; CREATE TABLE reference_modifying_xacts (key int, value int); SELECT create_reference_table('reference_modifying_xacts'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -799,7 +799,7 @@ SELECT create_reference_table('reference_modifying_xacts'); INSERT INTO reference_modifying_xacts VALUES (1, 1); SELECT * FROM reference_modifying_xacts; key | value ------+------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -808,7 +808,7 @@ BEGIN; INSERT INTO reference_modifying_xacts VALUES (2, 2); SELECT * FROM reference_modifying_xacts; key | value ------+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -817,7 +817,7 @@ COMMIT; -- we should be able to see the insert outside of the transaction as well SELECT * FROM reference_modifying_xacts; key | value ------+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -827,7 +827,7 @@ BEGIN; INSERT INTO reference_modifying_xacts VALUES (3, 3); SELECT * FROM reference_modifying_xacts; key | value ------+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -837,7 +837,7 @@ ROLLBACK; -- see that we've not inserted SELECT * FROM reference_modifying_xacts; key | value ------+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -893,7 +893,7 @@ AND s.logicalrelid = 'reference_modifying_xacts'::regclass GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; logicalrelid | shardstate | count ----------------------------+------------+------- +--------------------------------------------------------------------- reference_modifying_xacts | 1 | 2 (1 row) @@ -912,7 +912,7 @@ SET citus.shard_replication_factor = 1; CREATE TABLE hash_modifying_xacts (key int, value int); SELECT create_distributed_table('hash_modifying_xacts', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -956,7 +956,7 @@ COMMIT; -- ensure that the value didn't go into the reference table SELECT * FROM reference_modifying_xacts WHERE key = 55; key | value ------+------- +--------------------------------------------------------------------- (0 rows) -- now lets fail on of the workers for the hash distributed table table @@ -979,7 +979,7 @@ ERROR: illegal value -- ensure that the values didn't go into the reference table SELECT * FROM reference_modifying_xacts WHERE key = 12; key | value ------+------- +--------------------------------------------------------------------- (0 rows) -- all placements should be healthy @@ -992,7 +992,7 @@ AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; logicalrelid | shardstate | count ----------------------------+------------+------- +--------------------------------------------------------------------- reference_modifying_xacts | 1 | 2 hash_modifying_xacts | 1 | 4 (2 rows) @@ -1016,17 +1016,17 @@ ERROR: illegal value COMMIT; SELECT * FROM hash_modifying_xacts WHERE key = 80; key | value ------+------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM reference_modifying_xacts WHERE key = 66; key | value ------+------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM reference_modifying_xacts WHERE key = 999; key | value ------+------- +--------------------------------------------------------------------- (0 rows) -- all placements should be healthy @@ -1039,7 +1039,7 @@ AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; logicalrelid | shardstate | count ----------------------------+------------+------- +--------------------------------------------------------------------- reference_modifying_xacts | 1 | 2 hash_modifying_xacts | 1 | 4 (2 rows) @@ -1048,21 +1048,21 @@ ORDER BY s.logicalrelid, sp.shardstate; -- tables are done in 2PC SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) INSERT INTO reference_modifying_xacts VALUES (70, 70); SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -1071,7 +1071,7 @@ INSERT INTO reference_modifying_xacts VALUES (71, 71); COMMIT; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -1081,14 +1081,14 @@ SET citus.shard_replication_factor = 2; CREATE TABLE hash_modifying_xacts_second (key int, value int); SELECT create_distributed_table('hash_modifying_xacts_second', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -1098,35 +1098,35 @@ INSERT INTO reference_modifying_xacts VALUES (72, 3); COMMIT; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) DELETE FROM reference_modifying_xacts; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) UPDATE reference_modifying_xacts SET key = 10; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -1147,7 +1147,7 @@ SET citus.next_shard_id TO 1200015; CREATE TABLE reference_failure_test (key int, value int); SELECT create_reference_table('reference_failure_test'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1156,7 +1156,7 @@ SET citus.shard_count TO 4; CREATE TABLE numbers_hash_failure_test(key int, value int); SELECT create_distributed_table('numbers_hash_failure_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1165,7 +1165,7 @@ SELECT create_distributed_table('numbers_hash_failure_test', 'key'); \dt reference_failure_test_1200015 List of relations Schema | Name | Type | Owner ---------+--------------------------------+-------+----------- +--------------------------------------------------------------------- public | reference_failure_test_1200015 | table | test_user (1 row) @@ -1191,7 +1191,7 @@ COMMIT; SET client_min_messages to 'ERROR'; SELECT * FROM reference_failure_test; key | value ------+------- +--------------------------------------------------------------------- (0 rows) RESET client_min_messages; @@ -1204,7 +1204,7 @@ AND s.logicalrelid = 'reference_failure_test'::regclass GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; logicalrelid | shardstate | count -------------------------+------------+------- +--------------------------------------------------------------------- reference_failure_test | 1 | 2 (1 row) @@ -1218,7 +1218,7 @@ FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 @@ -1235,7 +1235,7 @@ SELECT count(*) FROM numbers_hash_failure_test; WARNING: connection error: localhost:xxxxx WARNING: connection error: localhost:xxxxx count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1245,7 +1245,7 @@ FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1200016 | 1 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 @@ -1266,7 +1266,7 @@ FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 @@ -1284,7 +1284,7 @@ FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 @@ -1300,7 +1300,7 @@ SELECT count(*) FROM numbers_hash_failure_test; WARNING: connection error: localhost:xxxxx WARNING: connection error: localhost:xxxxx count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -1318,7 +1318,7 @@ SET citus.next_placement_id TO 1200033; -- unbreak both nodes by renaming the user back to the original name SELECT * FROM run_command_on_workers('ALTER USER test_user_new RENAME TO test_user'); nodename | nodeport | success | result ------------+----------+---------+------------ +--------------------------------------------------------------------- localhost | 57637 | t | ALTER ROLE localhost | 57638 | t | ALTER ROLE (2 rows) @@ -1327,7 +1327,7 @@ DROP TABLE reference_modifying_xacts, hash_modifying_xacts, hash_modifying_xacts reference_failure_test, numbers_hash_failure_test; SELECT * FROM run_command_on_workers('DROP USER test_user'); nodename | nodeport | success | result ------------+----------+---------+----------- +--------------------------------------------------------------------- localhost | 57637 | t | DROP ROLE localhost | 57638 | t | DROP ROLE (2 rows) @@ -1343,7 +1343,7 @@ CREATE TABLE usergroups ( ); SELECT create_reference_table('usergroups'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1353,7 +1353,7 @@ CREATE TABLE itemgroups ( ); SELECT create_reference_table('itemgroups'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1364,7 +1364,7 @@ CREATE TABLE users ( ); SELECT create_distributed_table('users', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1375,7 +1375,7 @@ CREATE TABLE items ( ); SELECT create_distributed_table('items', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1389,7 +1389,7 @@ USING (shardid) ORDER BY id; id | shard_name | nodename | nodeport -----+---------------+-----------+---------- +--------------------------------------------------------------------- 1 | users_1200022 | localhost | 57637 2 | users_1200025 | localhost | 57638 3 | users_1200023 | localhost | 57638 @@ -1411,7 +1411,7 @@ INSERT INTO items VALUES (6, 'item-6'); END; SELECT user_id FROM items ORDER BY user_id; user_id ---------- +--------------------------------------------------------------------- 1 6 (2 rows) @@ -1426,13 +1426,13 @@ ROLLBACK; BEGIN; SELECT id FROM users WHERE id = 1; id ----- +--------------------------------------------------------------------- 1 (1 row) SELECT id FROM users WHERE id = 6; id ----- +--------------------------------------------------------------------- 6 (1 row) @@ -1443,13 +1443,13 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT id FROM users WHERE id = 1; id ----- +--------------------------------------------------------------------- 1 (1 row) SELECT id FROM users WHERE id = 6; id ----- +--------------------------------------------------------------------- 6 (1 row) @@ -1460,13 +1460,13 @@ BEGIN; ALTER TABLE items ADD COLUMN last_update timestamptz; SELECT id FROM users JOIN items ON (id = user_id) WHERE id = 1; id ----- +--------------------------------------------------------------------- 1 (1 row) SELECT id FROM users JOIN items ON (id = user_id) WHERE id = 6; id ----- +--------------------------------------------------------------------- 6 (1 row) @@ -1477,12 +1477,12 @@ BEGIN; -- now read from the reference table over each connection SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 2; user_id ---------- +--------------------------------------------------------------------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 3; user_id ---------- +--------------------------------------------------------------------- (0 rows) -- perform a DDL command on the reference table errors @@ -1497,12 +1497,12 @@ BEGIN; -- read from the reference table over each connection SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 2; user_id ---------- +--------------------------------------------------------------------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 3; user_id ---------- +--------------------------------------------------------------------- (0 rows) -- perform a DDL command on a co-located reference table @@ -1523,12 +1523,12 @@ BEGIN; DELETE FROM users; SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 1; user_id ---------- +--------------------------------------------------------------------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 6; user_id ---------- +--------------------------------------------------------------------- (0 rows) END; @@ -1540,14 +1540,14 @@ BEGIN; -- Uses first connection, which wrote the row with id = 2 SELECT * FROM users JOIN usergroups ON (user_group = gid) WHERE id = 2; id | name | user_group | gid | name -----+-------+------------+-----+------- +--------------------------------------------------------------------- 2 | onder | 2 | 2 | group (1 row) -- Should use second connection, which wrote the row with id = 4 SELECT * FROM users JOIN usergroups ON (user_group = gid) WHERE id = 4; id | name | user_group | gid | name -----+-------+------------+-----+------- +--------------------------------------------------------------------- 4 | murat | 2 | 2 | group (1 row) @@ -1566,7 +1566,7 @@ SELECT insert_abort(); ERROR: do not insert SELECT name FROM labs WHERE id = 1001; name ------- +--------------------------------------------------------------------- (0 rows) -- if function_opens_transaction-block is disabled the insert commits immediately @@ -1575,7 +1575,7 @@ SELECT insert_abort(); ERROR: do not insert SELECT name FROM labs WHERE id = 1001; name ---------------- +--------------------------------------------------------------------- Rollback Labs (1 row) diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index 7d01cc32f..2fba5a6a2 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -8,14 +8,14 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE test (id integer, val integer); SELECT create_distributed_table('test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_coloc (id integer, val integer); SELECT create_distributed_table('test_coloc', 'id', colocate_with := 'test'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -23,7 +23,7 @@ SET citus.shard_count TO 1; CREATE TABLE singleshard (id integer, val integer); SELECT create_distributed_table('singleshard', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -124,34 +124,34 @@ SET ROLE full_access; EXECUTE prepare_insert(1); EXECUTE prepare_select; count -------- +--------------------------------------------------------------------- 1 (1 row) INSERT INTO test VALUES (2); SELECT count(*) FROM test; count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test WHERE id = 1; count -------- +--------------------------------------------------------------------- 1 (1 row) SET citus.task_executor_type TO 'task-tracker'; SELECT count(*), min(current_user) FROM test; count | min --------+------------- +--------------------------------------------------------------------- 2 | full_access (1 row) -- test re-partition query (needs to transmit intermediate results) SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -167,7 +167,7 @@ HINT: Run the command with a superuser. -- create a task that other users should not be able to inspect SELECT task_tracker_assign_task(1, 1, 'SELECT 1'); task_tracker_assign_task --------------------------- +--------------------------------------------------------------------- (1 row) @@ -176,14 +176,14 @@ SET ROLE read_access; -- should be allowed to run commands, as the current user SELECT result FROM run_command_on_workers($$SELECT current_user$$); result -------------- +--------------------------------------------------------------------- read_access read_access (2 rows) SELECT result FROM run_command_on_placements('test', $$SELECT current_user$$); result -------------- +--------------------------------------------------------------------- read_access read_access read_access @@ -192,7 +192,7 @@ SELECT result FROM run_command_on_placements('test', $$SELECT current_user$$); SELECT result FROM run_command_on_colocated_placements('test', 'test_coloc', $$SELECT current_user$$); result -------------- +--------------------------------------------------------------------- read_access read_access read_access @@ -203,7 +203,7 @@ EXECUTE prepare_insert(1); ERROR: permission denied for table test EXECUTE prepare_select; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -211,27 +211,27 @@ INSERT INTO test VALUES (2); ERROR: permission denied for table test SELECT count(*) FROM test; count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test WHERE id = 1; count -------- +--------------------------------------------------------------------- 1 (1 row) SET citus.task_executor_type TO 'task-tracker'; SELECT count(*), min(current_user) FROM test; count | min --------+------------- +--------------------------------------------------------------------- 2 | read_access (1 row) -- test re-partition query (needs to transmit intermediate results) SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -251,7 +251,7 @@ ERROR: must be owner of schema pg_merge_job_0001 BEGIN; SELECT lock_relation_if_exists('test', 'ACCESS SHARE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) @@ -286,13 +286,13 @@ RESET citus.task_executor_type; BEGIN; SELECT create_intermediate_result('topten', 'SELECT s FROM generate_series(1,10) s'); create_intermediate_result ----------------------------- +--------------------------------------------------------------------- 10 (1 row) SELECT * FROM read_intermediate_result('topten', 'binary'::citus_copy_format) AS res (s int) ORDER BY s; s ----- +--------------------------------------------------------------------- 1 2 3 @@ -334,20 +334,20 @@ CREATE TABLE my_table (id integer, val integer); RESET ROLE; SELECT create_distributed_table('my_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT result FROM run_command_on_workers($$SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_table_%' LIMIT 1$$); result -------------- +--------------------------------------------------------------------- full_access full_access (2 rows) SELECT task_tracker_cleanup_job(1); task_tracker_cleanup_job --------------------------- +--------------------------------------------------------------------- (1 row) @@ -359,13 +359,13 @@ RESET ROLE; SELECT create_distributed_table('my_table_with_data', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM my_table_with_data; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -386,7 +386,7 @@ SET ROLE read_access; SELECT create_distributed_table('my_role_table_with_data', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -396,7 +396,7 @@ SELECT result FROM run_command_on_workers($cmd$ SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_role_table_with_data%' LIMIT 1; $cmd$); result ------------ +--------------------------------------------------------------------- some_role some_role (2 rows) @@ -433,46 +433,46 @@ ERROR: must be owner of function usage_access_func SET ROLE usage_access; SELECT create_distributed_function('usage_access_func(usage_access_type,int[])'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT typowner::regrole FROM pg_type WHERE typname = 'usage_access_type'; typowner --------------- +--------------------------------------------------------------------- usage_access (1 row) SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func'; proowner --------------- +--------------------------------------------------------------------- usage_access (1 row) SELECT run_command_on_workers($$SELECT typowner::regrole FROM pg_type WHERE typname = 'usage_access_type'$$); run_command_on_workers ----------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,usage_access) (localhost,57638,t,usage_access) (2 rows) SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func'$$); run_command_on_workers ----------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,usage_access) (localhost,57638,t,usage_access) (2 rows) SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE colocation_table(id text); SELECT create_distributed_table('colocation_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -483,13 +483,13 @@ CREATE FUNCTION usage_access_func_second(key int, variadic v int[]) RETURNS text LANGUAGE plpgsql AS 'begin return current_user; end;'; SELECT create_distributed_function('usage_access_func_second(int,int[])', '$1', colocate_with := 'colocation_table'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT usage_access_func_second(1, 2,3,4,5) FROM full_access_user_schema.t1 LIMIT 1; usage_access_func_second --------------------------- +--------------------------------------------------------------------- usage_access (1 row) @@ -501,26 +501,26 @@ CREATE FUNCTION usage_access_func_third(key int, variadic v int[]) RETURNS text -- show that the current user is a super user SELECT usesuper FROM pg_user where usename IN (SELECT current_user); usesuper ----------- +--------------------------------------------------------------------- t (1 row) -- superuser creates the distributed function that is owned by a regular user SELECT create_distributed_function('usage_access_func_third(int,int[])', '$1', colocate_with := 'colocation_table'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func_third'; proowner --------------- +--------------------------------------------------------------------- usage_access (1 row) SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func_third'$$); run_command_on_workers ----------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,usage_access) (localhost,57638,t,usage_access) (2 rows) @@ -529,13 +529,13 @@ SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE pron -- that might change the test outputs, so we're just trying to be careful SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -544,7 +544,7 @@ RESET ROLE; SELECT create_distributed_table('full_access_user_schema.t1', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -557,7 +557,7 @@ SELECT result FROM run_command_on_workers($cmd$ LIMIT 1; $cmd$); result --------------- +--------------------------------------------------------------------- usage_access usage_access (2 rows) @@ -567,7 +567,7 @@ SET ROLE full_access; CREATE TABLE full_access_user_schema.t2(id int); SELECT create_distributed_table('full_access_user_schema.t2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -580,13 +580,13 @@ CREATE TABLE full_access_user_schema.r1(id int); SET LOCAL citus.shard_count TO 1; SELECT create_distributed_table('full_access_user_schema.r1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT upgrade_to_reference_table('full_access_user_schema.r1'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -600,7 +600,7 @@ CREATE TABLE full_access_user_schema.r2(id int); SET LOCAL citus.shard_count TO 1; SELECT create_distributed_table('full_access_user_schema.r2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -614,7 +614,7 @@ RESET ROLE; -- the super user should be able SELECT upgrade_to_reference_table('full_access_user_schema.r2'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -627,7 +627,7 @@ SELECT result FROM run_command_on_workers($cmd$ LIMIT 1; $cmd$); result -------------- +--------------------------------------------------------------------- full_access full_access (2 rows) @@ -635,7 +635,7 @@ $cmd$); -- super user should be the only one being able to call worker_cleanup_job_schema_cache SELECT worker_cleanup_job_schema_cache(); worker_cleanup_job_schema_cache ---------------------------------- +--------------------------------------------------------------------- (1 row) @@ -657,7 +657,7 @@ RESET ROLE; SET ROLE full_access; SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS a', 'a', 23, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); worker_hash_partition_table ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -679,7 +679,7 @@ ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_0000 SET ROLE full_access; SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); worker_fetch_partition_file ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -698,7 +698,7 @@ SET ROLE full_access; -- not what we want SELECT task_tracker_assign_task(42, 1, 'SELECT 1'); task_tracker_assign_task --------------------------- +--------------------------------------------------------------------- (1 row) @@ -713,13 +713,13 @@ RESET ROLE; SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']); WARNING: Task file "task_000001.43115" does not have expected suffix ".10" worker_merge_files_into_table -------------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -727,13 +727,13 @@ DROP TABLE pg_merge_job_0042.task_000001; -- drop table so we can reuse the same SET ROLE full_access; SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']); worker_merge_files_into_table -------------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; count -------- +--------------------------------------------------------------------- 25 (1 row) @@ -755,19 +755,19 @@ SELECT worker_merge_files_and_run_query(42, 1, ); WARNING: Task file "task_000001.43115" does not have expected suffix ".10" worker_merge_files_and_run_query ----------------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001_merge; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -779,7 +779,7 @@ SELECT worker_merge_files_and_run_query(42, 1, 'CREATE TABLE task_000001 (a) AS SELECT sum(merge_column_0) FROM task_000001_merge' ); worker_merge_files_and_run_query ----------------------------------- +--------------------------------------------------------------------- (1 row) @@ -798,13 +798,13 @@ ERROR: permission denied to drop role CONTEXT: SQL statement "DROP USER usage_access" SELECT count(*) FROM pg_merge_job_0042.task_000001_merge; count -------- +--------------------------------------------------------------------- 25 (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; count -------- +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index 3b41a445b..371db737b 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -9,7 +9,7 @@ SET client_min_messages TO WARNING; CREATE USER reprefuser WITH LOGIN; SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN'); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -18,28 +18,28 @@ SET citus.enable_alter_role_propagation TO ON; ALTER ROLE reprefuser WITH CREATEDB; SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- test that coordinator pg_dist_node entry is synced to the workers SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); verify_metadata | verify_metadata ------------------+----------------- +--------------------------------------------------------------------- t | t (1 row) CREATE TABLE ref(a int); SELECT create_reference_table('ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -49,14 +49,14 @@ SET citus.enable_alter_role_propagation TO ON; ALTER ROLE reprefuser WITH CREATEROLE; select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; rolcreatedb | rolcreaterole --------------+--------------- +--------------------------------------------------------------------- t | t (1 row) \c - - - :worker_2_port select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; rolcreatedb | rolcreaterole --------------+--------------- +--------------------------------------------------------------------- t | f (1 row) @@ -65,7 +65,7 @@ SET search_path TO mx_add_coordinator,public; SET client_min_messages TO WARNING; select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; rolcreatedb | rolcreaterole --------------+--------------- +--------------------------------------------------------------------- t | f (1 row) @@ -79,7 +79,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable LOG: executing the command locally: SELECT count(*) AS count FROM mx_add_coordinator.ref_7000000 ref count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -89,7 +89,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable LOG: executing the command locally: SELECT count(*) AS count FROM mx_add_coordinator.ref_7000000 ref count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -100,7 +100,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -109,7 +109,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -118,7 +118,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -144,7 +144,7 @@ ERROR: relation local_table is not distributed SET search_path TO mx_add_coordinator,public; SELECT * FROM ref ORDER BY a; a ---- +--------------------------------------------------------------------- 2 3 (2 rows) @@ -153,33 +153,33 @@ SELECT * FROM ref ORDER BY a; -- of multi_mx_transaction_recovery consistent. SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM run_command_on_workers('SELECT recover_prepared_transactions()'); count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT master_remove_node('localhost', :master_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) -- test that coordinator pg_dist_node entry was removed from the workers SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); verify_metadata | verify_metadata ------------------+----------------- +--------------------------------------------------------------------- t | t (1 row) diff --git a/src/test/regress/expected/multi_mx_call.out b/src/test/regress/expected/multi_mx_call.out index b7c7d6725..8745fd6da 100644 --- a/src/test/regress/expected/multi_mx_call.out +++ b/src/test/regress/expected/multi_mx_call.out @@ -8,7 +8,7 @@ set citus.replication_model to 'statement'; create table mx_call_dist_table_replica(id int, val int); select create_distributed_table('mx_call_dist_table_replica', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -21,7 +21,7 @@ set citus.replication_model to 'streaming'; create table mx_call_dist_table_1(id int, val int); select create_distributed_table('mx_call_dist_table_1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -29,7 +29,7 @@ insert into mx_call_dist_table_1 values (3,1),(4,5),(9,2),(6,5),(3,5); create table mx_call_dist_table_2(id int, val int); select create_distributed_table('mx_call_dist_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -37,7 +37,7 @@ insert into mx_call_dist_table_2 values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_bigint(id bigint, val bigint); select create_distributed_table('mx_call_dist_table_bigint', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -45,7 +45,7 @@ insert into mx_call_dist_table_bigint values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_ref(id int, val int); select create_reference_table('mx_call_dist_table_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -54,7 +54,7 @@ create type mx_call_enum as enum ('A', 'S', 'D', 'F'); create table mx_call_dist_table_enum(id int, key mx_call_enum); select create_distributed_table('mx_call_dist_table_enum', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -86,45 +86,45 @@ END;$$; -- Test that undistributed procedures have no issue executing call multi_mx_call.mx_call_proc(2, 0); y ----- +--------------------------------------------------------------------- 29 (1 row) call multi_mx_call.mx_call_proc_custom_types('S', 'A'); x | y ----+--- +--------------------------------------------------------------------- F | S (1 row) -- Same for unqualified names call mx_call_proc(2, 0); y ----- +--------------------------------------------------------------------- 29 (1 row) call mx_call_proc_custom_types('S', 'A'); x | y ----+--- +--------------------------------------------------------------------- F | S (1 row) -- Mark both procedures as distributed ... select create_distributed_function('mx_call_proc(int,int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) select create_distributed_function('mx_call_proc_bigint(bigint,bigint)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) select create_distributed_function('mx_call_proc_custom_types(mx_call_enum,mx_call_enum)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -140,68 +140,68 @@ DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment y ----- +--------------------------------------------------------------------- 29 (1 row) call mx_call_proc_bigint(4, 2); DEBUG: stored procedure does not have co-located tables y ---- +--------------------------------------------------------------------- 8 (1 row) call multi_mx_call.mx_call_proc_custom_types('S', 'A'); DEBUG: stored procedure does not have co-located tables x | y ----+--- +--------------------------------------------------------------------- F | S (1 row) -- Mark them as colocated with a table. Now we should route them to workers. select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) select colocate_proc_with_table('mx_call_proc_bigint', 'mx_call_dist_table_bigint'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) select colocate_proc_with_table('mx_call_proc_custom_types', 'mx_call_dist_table_enum'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) call multi_mx_call.mx_call_proc(2, 0); DEBUG: pushing down the procedure y ----- +--------------------------------------------------------------------- 28 (1 row) call multi_mx_call.mx_call_proc_custom_types('S', 'A'); DEBUG: pushing down the procedure x | y ----+--- +--------------------------------------------------------------------- S | S (1 row) call mx_call_proc(2, 0); DEBUG: pushing down the procedure y ----- +--------------------------------------------------------------------- 28 (1 row) call mx_call_proc_custom_types('S', 'A'); DEBUG: pushing down the procedure x | y ----+--- +--------------------------------------------------------------------- S | S (1 row) @@ -209,7 +209,7 @@ DEBUG: pushing down the procedure call mx_call_proc_bigint(4, 2); DEBUG: pushing down the procedure y ---- +--------------------------------------------------------------------- 8 (1 row) @@ -224,7 +224,7 @@ DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment y ----- +--------------------------------------------------------------------- 29 (1 row) @@ -237,7 +237,7 @@ SET client_min_messages TO DEBUG1; call multi_mx_call.mx_call_proc_custom_types('S', 'A'); DEBUG: stored procedure does not have co-located tables x | y ----+--- +--------------------------------------------------------------------- F | S (1 row) @@ -245,7 +245,7 @@ DEBUG: stored procedure does not have co-located tables -- This also tests that we have cache invalidation for pg_dist_object updates select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, -1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -258,13 +258,13 @@ DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment y ----- +--------------------------------------------------------------------- 29 (1 row) select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 2); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -277,14 +277,14 @@ DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment y ----- +--------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with reference tables select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_ref'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -297,14 +297,14 @@ DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment y ----- +--------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with replicated tables select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_replica'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -317,7 +317,7 @@ DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment y ----- +--------------------------------------------------------------------- 29 (1 row) @@ -326,7 +326,7 @@ drop table mx_call_dist_table_replica; SET client_min_messages TO DEBUG1; select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -348,7 +348,7 @@ select create_distributed_function('mx_call_proc_tx(int)', '$1', 'mx_call_dist_t DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -356,7 +356,7 @@ CALL multi_mx_call.mx_call_proc_tx(20); DEBUG: pushing down the procedure SELECT id, val FROM mx_call_dist_table_1 ORDER BY id, val; id | val -----+----- +--------------------------------------------------------------------- 3 | 1 3 | 5 4 | 5 @@ -378,7 +378,7 @@ select create_distributed_function('mx_call_proc_raise(int)', '$1', 'mx_call_dis DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -391,13 +391,13 @@ ERROR: error -- Test that we don't propagate to non-metadata worker nodes select stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) select stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -410,20 +410,20 @@ DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment y ----- +--------------------------------------------------------------------- 29 (1 row) SET client_min_messages TO NOTICE; select start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) select start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -442,7 +442,7 @@ SELECT create_distributed_function('mx_call_add(int,int)'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -456,7 +456,7 @@ DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_ CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment y ----- +--------------------------------------------------------------------- 29 (1 row) @@ -464,7 +464,7 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment call multi_mx_call.mx_call_proc(multi_mx_call.mx_call_add(3, 4), 2); DEBUG: pushing down the procedure y ----- +--------------------------------------------------------------------- 33 (1 row) @@ -478,7 +478,7 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_ CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment y ----- +--------------------------------------------------------------------- 27 (1 row) diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index 9ad854dd0..2969d6779 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -4,13 +4,13 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -146,7 +146,7 @@ CREATE TABLE nation_hash( SET citus.shard_count TO 16; SELECT create_distributed_table('nation_hash', 'n_nationkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -160,7 +160,7 @@ CREATE TABLE citus_mx_test_schema.nation_hash( ); SELECT create_distributed_table('nation_hash', 'n_nationkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -172,7 +172,7 @@ CREATE TABLE citus_mx_test_schema_join_1.nation_hash ( SET citus.shard_count TO 4; SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash', 'n_nationkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -183,7 +183,7 @@ CREATE TABLE citus_mx_test_schema_join_1.nation_hash_2 ( n_comment varchar(152)); SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash_2', 'n_nationkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -195,7 +195,7 @@ CREATE TABLE nation_hash ( n_comment varchar(152)); SELECT create_distributed_table('nation_hash', 'n_nationkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -208,7 +208,7 @@ CREATE TABLE nation_hash_collation_search_path( ); SELECT create_distributed_table('nation_hash_collation_search_path', 'n_nationkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -222,7 +222,7 @@ CREATE TABLE citus_mx_test_schema.nation_hash_composite_types( ); SELECT create_distributed_table('citus_mx_test_schema.nation_hash_composite_types', 'n_nationkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -253,7 +253,7 @@ CREATE TABLE lineitem_mx ( SET citus.shard_count TO 16; SELECT create_distributed_table('lineitem_mx', 'l_orderkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -271,7 +271,7 @@ CREATE TABLE orders_mx ( PRIMARY KEY(o_orderkey) ); SELECT create_distributed_table('orders_mx', 'o_orderkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -286,7 +286,7 @@ CREATE TABLE customer_mx ( c_comment varchar(117) not null); SELECT create_reference_table('customer_mx'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -297,7 +297,7 @@ CREATE TABLE nation_mx ( n_comment varchar(152)); SELECT create_reference_table('nation_mx'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -313,7 +313,7 @@ CREATE TABLE part_mx ( p_comment varchar(23) not null); SELECT create_reference_table('part_mx'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -329,7 +329,7 @@ CREATE TABLE supplier_mx ); SELECT create_reference_table('supplier_mx'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -341,7 +341,7 @@ CREATE TABLE mx_ddl_table ( SET citus.shard_count TO 4; SELECT create_distributed_table('mx_ddl_table', 'key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -359,7 +359,7 @@ CREATE TABLE limit_orders_mx ( SET citus.shard_count TO 2; SELECT create_distributed_table('limit_orders_mx', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -370,7 +370,7 @@ CREATE TABLE multiple_hash_mx ( ); SELECT create_distributed_table('multiple_hash_mx', 'category'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -378,7 +378,7 @@ SET citus.shard_count TO 4; CREATE TABLE app_analytics_events_mx (id bigserial, app_id integer, name text); SELECT create_distributed_table('app_analytics_events_mx', 'app_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -390,7 +390,7 @@ CREATE TABLE researchers_mx ( SET citus.shard_count TO 2; SELECT create_distributed_table('researchers_mx', 'lab_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -401,7 +401,7 @@ CREATE TABLE labs_mx ( SET citus.shard_count TO 1; SELECT create_distributed_table('labs_mx', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -412,7 +412,7 @@ CREATE TABLE objects_mx ( ); SELECT create_distributed_table('objects_mx', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -427,14 +427,14 @@ CREATE TABLE articles_single_shard_hash_mx (LIKE articles_hash_mx); SET citus.shard_count TO 2; SELECT create_distributed_table('articles_hash_mx', 'author_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SET citus.shard_count TO 1; SELECT create_distributed_table('articles_single_shard_hash_mx', 'author_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -442,7 +442,7 @@ SET citus.shard_count TO 4; CREATE TABLE company_employees_mx (company_id int, employee_id int, manager_id int); SELECT create_distributed_table('company_employees_mx', 'company_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -453,7 +453,7 @@ SELECT logicalrelid, colocationid, shard_count, partmethod, repmodel FROM pg_dist_partition NATURAL JOIN shard_counts ORDER BY colocationid, logicalrelid; logicalrelid | colocationid | shard_count | partmethod | repmodel ---------------------------------------------------------+--------------+-------------+------------+---------- +--------------------------------------------------------------------- citus_mx_test_schema_join_1.nation_hash | 1390002 | 4 | h | s citus_mx_test_schema_join_1.nation_hash_2 | 1390002 | 4 | h | s citus_mx_test_schema_join_2.nation_hash | 1390002 | 4 | h | s diff --git a/src/test/regress/expected/multi_mx_ddl.out b/src/test/regress/expected/multi_mx_ddl.out index da543acb6..d55fd1359 100644 --- a/src/test/regress/expected/multi_mx_ddl.out +++ b/src/test/regress/expected/multi_mx_ddl.out @@ -1,7 +1,7 @@ -- Tests related to distributed DDL commands on mx cluster SELECT * FROM mx_ddl_table ORDER BY key; key | value ------+------- +--------------------------------------------------------------------- 1 | 10 2 | 11 3 | 21 @@ -25,7 +25,7 @@ ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL; -- See that the changes are applied on coordinator, worker tables and shards SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers ----------+---------+-------------------- +--------------------------------------------------------------------- key | integer | not null value | integer | version | integer | not null default 1 @@ -34,7 +34,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table': SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index'; relname | Column | Type | Definition ----------------------------+--------+---------+------------ +--------------------------------------------------------------------- ddl_test_index | value | integer | value ddl_test_concurrent_index | value | integer | value (2 rows) @@ -44,7 +44,7 @@ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE SET citus.override_table_visibility TO FALSE; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers ----------+---------+-------------------- +--------------------------------------------------------------------- key | integer | not null value | integer | version | integer | not null default 1 @@ -53,14 +53,14 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table': SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index'; relname | Column | Type | Definition ----------------------------+--------+---------+------------ +--------------------------------------------------------------------- ddl_test_index | value | integer | value ddl_test_concurrent_index | value | integer | value (2 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; Column | Type | Modifiers ----------+---------+-------------------- +--------------------------------------------------------------------- key | integer | not null value | integer | version | integer | not null default 1 @@ -69,7 +69,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1 SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index_1220088'; relname | Column | Type | Definition ------------------------------------+--------+---------+------------ +--------------------------------------------------------------------- ddl_test_index_1220088 | value | integer | value ddl_test_concurrent_index_1220088 | value | integer | value (2 rows) @@ -79,7 +79,7 @@ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE SET citus.override_table_visibility TO FALSE; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers ----------+---------+-------------------- +--------------------------------------------------------------------- key | integer | not null value | integer | version | integer | not null default 1 @@ -88,14 +88,14 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table': SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index'; relname | Column | Type | Definition ----------------------------+--------+---------+------------ +--------------------------------------------------------------------- ddl_test_index | value | integer | value ddl_test_concurrent_index | value | integer | value (2 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; Column | Type | Modifiers ----------+---------+-------------------- +--------------------------------------------------------------------- key | integer | not null value | integer | version | integer | not null default 1 @@ -104,7 +104,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1 SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index_1220089'; relname | Column | Type | Definition ------------------------------------+--------+---------+------------ +--------------------------------------------------------------------- ddl_test_index_1220089 | value | integer | value ddl_test_concurrent_index_1220089 | value | integer | value (2 rows) @@ -119,7 +119,7 @@ INSERT INTO mx_ddl_table VALUES (78, 83, 2.1); \c - - - :worker_1_port SELECT * FROM mx_ddl_table ORDER BY key; key | value | version ------+-------+--------- +--------------------------------------------------------------------- 1 | 10 | 0 2 | 11 | 0 3 | 21 | 0 @@ -147,7 +147,7 @@ ALTER TABLE mx_ddl_table DROP COLUMN version; -- See that the changes are applied on coordinator, worker tables and shards SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- key | integer | not null value | integer | (2 rows) @@ -155,13 +155,13 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table': \di ddl_test*_index List of relations Schema | Name | Type | Owner | Table ---------+------+------+-------+------- +--------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- key | integer | not null value | integer | (2 rows) @@ -169,12 +169,12 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table': \di ddl_test*_index List of relations Schema | Name | Type | Owner | Table ---------+------+------+-------+------- +--------------------------------------------------------------------- (0 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- key | integer | not null value | integer | (2 rows) @@ -182,13 +182,13 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1 \di ddl_test*_index_1220088 List of relations Schema | Name | Type | Owner | Table ---------+------+------+-------+------- +--------------------------------------------------------------------- (0 rows) \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- key | integer | not null value | integer | (2 rows) @@ -196,12 +196,12 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table': \di ddl_test*_index List of relations Schema | Name | Type | Owner | Table ---------+------+------+-------+------- +--------------------------------------------------------------------- (0 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- key | integer | not null value | integer | (2 rows) @@ -209,7 +209,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1 \di ddl_test*_index_1220089 List of relations Schema | Name | Type | Owner | Table ---------+------+------+-------+------- +--------------------------------------------------------------------- (0 rows) -- Show that DDL commands are done within a two-phase commit transaction @@ -223,7 +223,7 @@ SET citus.replication_model TO streaming; CREATE TABLE mx_sequence(key INT, value BIGSERIAL); SELECT create_distributed_table('mx_sequence', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -236,7 +236,7 @@ SELECT last_value AS worker_2_lastval FROM mx_sequence_value_seq \gset -- which can change depending on the tests which have run before this one SELECT :worker_1_lastval = :worker_2_lastval; ?column? ----------- +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index bd20fe4d9..d8b44ff3e 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -7,7 +7,7 @@ SET citus.replication_model TO 'statement'; create table mx_call_dist_table_replica(id int, val int); select create_distributed_table('mx_call_dist_table_replica', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -20,7 +20,7 @@ SET citus.replication_model TO 'streaming'; create table mx_call_dist_table_1(id int, val int); select create_distributed_table('mx_call_dist_table_1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -28,7 +28,7 @@ insert into mx_call_dist_table_1 values (3,1),(4,5),(9,2),(6,5),(3,5); create table mx_call_dist_table_2(id int, val int); select create_distributed_table('mx_call_dist_table_2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -36,7 +36,7 @@ insert into mx_call_dist_table_2 values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_bigint(id bigint, val bigint); select create_distributed_table('mx_call_dist_table_bigint', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -44,7 +44,7 @@ insert into mx_call_dist_table_bigint values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_ref(id int, val int); select create_reference_table('mx_call_dist_table_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -53,7 +53,7 @@ create type mx_call_enum as enum ('A', 'S', 'D', 'F'); create table mx_call_dist_table_enum(id int, key mx_call_enum); select create_distributed_table('mx_call_dist_table_enum', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -88,19 +88,19 @@ END;$$; -- Test that undistributed functions have no issue executing select multi_mx_function_call_delegation.mx_call_func(2, 0); mx_call_func --------------- +--------------------------------------------------------------------- 29 (1 row) select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); mx_call_func_custom_types ---------------------------- +--------------------------------------------------------------------- (F,S) (1 row) select squares(4); squares ---------- +--------------------------------------------------------------------- (1,1) (2,4) (3,9) @@ -110,32 +110,32 @@ select squares(4); -- Same for unqualified name select mx_call_func(2, 0); mx_call_func --------------- +--------------------------------------------------------------------- 29 (1 row) -- Mark both functions as distributed ... select create_distributed_function('mx_call_func(int,int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) select create_distributed_function('mx_call_func_bigint(bigint,bigint)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) select create_distributed_function('mx_call_func_custom_types(mx_call_enum,mx_call_enum)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) select create_distributed_function('squares(int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -151,67 +151,67 @@ DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func --------------- +--------------------------------------------------------------------- 29 (1 row) select multi_mx_function_call_delegation.mx_call_func_bigint(4, 2); DEBUG: function does not have co-located tables mx_call_func_bigint ---------------------- +--------------------------------------------------------------------- 8 (1 row) select mx_call_func_custom_types('S', 'A'); DEBUG: function does not have co-located tables mx_call_func_custom_types ---------------------------- +--------------------------------------------------------------------- (F,S) (1 row) -- Mark them as colocated with a table. Now we should route them to workers. select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) select colocate_proc_with_table('mx_call_func_bigint', 'mx_call_dist_table_bigint'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) select colocate_proc_with_table('mx_call_func_custom_types', 'mx_call_dist_table_enum'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) select mx_call_func(2, 0); DEBUG: pushing down the function call mx_call_func --------------- +--------------------------------------------------------------------- 28 (1 row) select mx_call_func_bigint(4, 2); DEBUG: pushing down the function call mx_call_func_bigint ---------------------- +--------------------------------------------------------------------- 8 (1 row) select mx_call_func_custom_types('S', 'A'); DEBUG: pushing down the function call mx_call_func_custom_types ---------------------------- +--------------------------------------------------------------------- (S,S) (1 row) @@ -221,14 +221,14 @@ ERROR: input of anonymous composite types is not implemented select multi_mx_function_call_delegation.mx_call_func(2, 0); DEBUG: pushing down the function call mx_call_func --------------- +--------------------------------------------------------------------- 28 (1 row) select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); DEBUG: pushing down the function call mx_call_func_custom_types ---------------------------- +--------------------------------------------------------------------- (S,S) (1 row) @@ -243,7 +243,7 @@ DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func --------------- +--------------------------------------------------------------------- 29 (1 row) @@ -256,7 +256,7 @@ SET client_min_messages TO DEBUG1; select mx_call_func_custom_types('S', 'A'); DEBUG: function does not have co-located tables mx_call_func_custom_types ---------------------------- +--------------------------------------------------------------------- (F,S) (1 row) @@ -264,7 +264,7 @@ DEBUG: function does not have co-located tables -- This also tests that we have cache invalidation for pg_dist_object updates select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, -1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -277,13 +277,13 @@ DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func --------------- +--------------------------------------------------------------------- 29 (1 row) select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 2); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -296,14 +296,14 @@ DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func --------------- +--------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with reference tables select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_ref'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -316,14 +316,14 @@ DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func --------------- +--------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with replicated tables select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_replica'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -336,7 +336,7 @@ DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func --------------- +--------------------------------------------------------------------- 29 (1 row) @@ -345,7 +345,7 @@ drop table mx_call_dist_table_replica; SET client_min_messages TO DEBUG1; select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 1); colocate_proc_with_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -366,7 +366,7 @@ END;$$; -- before distribution ... select mx_call_func_tbl(10); mx_call_func_tbl ------------------- +--------------------------------------------------------------------- (10,-1) (11,4) (2 rows) @@ -376,14 +376,14 @@ select create_distributed_function('mx_call_func_tbl(int)', '$1', 'mx_call_dist_ DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) select mx_call_func_tbl(20); DEBUG: pushing down the function call mx_call_func_tbl ------------------- +--------------------------------------------------------------------- (20,-1) (21,4) (2 rows) @@ -399,7 +399,7 @@ select create_distributed_function('mx_call_func_raise(int)', '$1', 'mx_call_dis DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -415,7 +415,7 @@ SET client_min_messages TO ERROR; CREATE TABLE test (x int primary key); SELECT create_distributed_table('test','x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -432,7 +432,7 @@ END; $function$; SELECT create_distributed_function('delegated_function(int)', 'a'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -449,7 +449,7 @@ DEBUG: not pushing down function calls in CTEs or Subqueries DEBUG: generating subplan 31_1 for subquery SELECT multi_mx_function_call_delegation.delegated_function(4) AS delegated_function DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT x FROM multi_mx_function_call_delegation.test WHERE (NOT (EXISTS (SELECT intermediate_result.delegated_function FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)))) x ---- +--------------------------------------------------------------------- (0 rows) WITH r AS ( @@ -460,7 +460,7 @@ DEBUG: not pushing down function calls in CTEs or Subqueries DEBUG: generating subplan 34_2 for subquery SELECT (count(*) OPERATOR(pg_catalog.=) 0) FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) r DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT x FROM multi_mx_function_call_delegation.test WHERE (SELECT intermediate_result."?column?" FROM read_intermediate_result('34_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" boolean)) x ---- +--------------------------------------------------------------------- (0 rows) WITH r AS ( @@ -473,7 +473,7 @@ DEBUG: not pushing down function calls in CTEs or Subqueries DEBUG: generating subplan 38_2 for CTE t: SELECT count(*) AS c FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) r DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT test.x, t.c FROM multi_mx_function_call_delegation.test, (SELECT intermediate_result.c FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(c bigint)) t WHERE (t.c OPERATOR(pg_catalog.=) 0) x | c ----+--- +--------------------------------------------------------------------- (0 rows) WITH r AS ( @@ -489,19 +489,19 @@ DEBUG: not pushing down function calls in CTEs or Subqueries DEBUG: generating subplan 42_3 for CTE t: SELECT count(*) AS c FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) s DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT test.x, r.count, t.c FROM multi_mx_function_call_delegation.test, (SELECT intermediate_result.count FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) r, (SELECT intermediate_result.c FROM read_intermediate_result('42_3'::text, 'binary'::citus_copy_format) intermediate_result(c bigint)) t WHERE (t.c OPERATOR(pg_catalog.=) 0) x | count | c ----+-------+--- +--------------------------------------------------------------------- (0 rows) -- Test that we don't propagate to non-metadata worker nodes select stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) select stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -514,20 +514,20 @@ DEBUG: Plan 47 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func --------------- +--------------------------------------------------------------------- 29 (1 row) SET client_min_messages TO NOTICE; select start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) select start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -547,7 +547,7 @@ SELECT create_distributed_function('mx_call_add(int,int)', '$1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -561,7 +561,7 @@ DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT (9 OPERATOR(pg_ CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func --------------- +--------------------------------------------------------------------- 35 (1 row) @@ -575,7 +575,7 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_ CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func --------------- +--------------------------------------------------------------------- 27 (1 row) @@ -588,13 +588,13 @@ DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_ CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment y ----- +--------------------------------------------------------------------- 29 (1 row) select mx_call_func(2, 0) from mx_call_dist_table_1; mx_call_func --------------- +--------------------------------------------------------------------- 28 28 28 @@ -614,7 +614,7 @@ DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_ CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func --------------- +--------------------------------------------------------------------- (0 rows) select mx_call_func(2, 0), mx_call_func(0, 2); @@ -631,7 +631,7 @@ DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment mx_call_func | mx_call_func ---------------+-------------- +--------------------------------------------------------------------- 29 | 27 (1 row) @@ -641,7 +641,7 @@ CONTEXT: SQL statement "SELECT mx_call_func_tbl(40)" PL/pgSQL function inline_code_block line 1 at PERFORM SELECT * FROM mx_call_dist_table_1 WHERE id >= 40 ORDER BY id, val; id | val -----+----- +--------------------------------------------------------------------- 40 | -1 41 | 4 (2 rows) @@ -651,42 +651,42 @@ PREPARE call_plan (int, int) AS SELECT mx_call_func($1, $2); EXECUTE call_plan(2, 0); DEBUG: pushing down the function call mx_call_func --------------- +--------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call mx_call_func --------------- +--------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call mx_call_func --------------- +--------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call mx_call_func --------------- +--------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call mx_call_func --------------- +--------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call mx_call_func --------------- +--------------------------------------------------------------------- 28 (1 row) diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index 409d97fff..6cc614d54 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -15,7 +15,7 @@ WHERE proname LIKE '%table_is_visible%' ORDER BY 1; proname | proisstrict | proretset | provolatile | proparallel | pronargs | pronargdefaults | prorettype | proargtypes | proacl -------------------------+-------------+-----------+-------------+-------------+----------+-----------------+------------+-------------+-------- +--------------------------------------------------------------------- citus_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 | pg_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 | (2 rows) @@ -27,20 +27,20 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table(id int, time date); SELECT create_distributed_table('test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -48,12 +48,12 @@ SELECT create_distributed_table('test_table', 'id'); -- any shards on the coordinator as expected SELECT * FROM citus_shards_on_worker; Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM citus_shard_indexes_on_worker; Schema | Name | Type | Owner | Table ---------+------+------+-------+------- +--------------------------------------------------------------------- (0 rows) -- now show that we see the shards, but not the @@ -62,14 +62,14 @@ SELECT * FROM citus_shard_indexes_on_worker; SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner ----------------------+--------------------+-------+---------- +--------------------------------------------------------------------- mx_hide_shard_names | test_table_1130000 | table | postgres mx_hide_shard_names | test_table_1130002 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; Schema | Name | Type | Owner | Table ---------+------+------+-------+------- +--------------------------------------------------------------------- (0 rows) -- also show that nested calls to pg_table_is_visible works fine @@ -85,7 +85,7 @@ SELECT LIMIT 1)); pg_table_is_visible ---------------------- +--------------------------------------------------------------------- f (1 row) @@ -99,14 +99,14 @@ CREATE INDEX test_index ON mx_hide_shard_names.test_table(id); SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner ----------------------+--------------------+-------+---------- +--------------------------------------------------------------------- mx_hide_shard_names | test_table_1130000 | table | postgres mx_hide_shard_names | test_table_1130002 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; Schema | Name | Type | Owner | Table ----------------------+--------------------+-------+----------+-------------------- +--------------------------------------------------------------------- mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000 mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002 (2 rows) @@ -115,21 +115,21 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; -- know the name of the tables SELECT count(*) FROM test_table_1130000; count -------- +--------------------------------------------------------------------- 0 (1 row) -- disable the config so that table becomes visible SELECT pg_table_is_visible('test_table_1130000'::regclass); pg_table_is_visible ---------------------- +--------------------------------------------------------------------- f (1 row) SET citus.override_table_visibility TO FALSE; SELECT pg_table_is_visible('test_table_1130000'::regclass); pg_table_is_visible ---------------------- +--------------------------------------------------------------------- t (1 row) @@ -144,7 +144,7 @@ SET citus.replication_model TO 'streaming'; CREATE TABLE test_table_102008(id int, time date); SELECT create_distributed_table('test_table_102008', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -157,7 +157,7 @@ SET search_path TO 'mx_hide_shard_names'; CREATE TABLE test_table_2_1130000(id int, time date); SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner ----------------------+---------------------------+-------+---------- +--------------------------------------------------------------------- mx_hide_shard_names | test_table_102008_1130004 | table | postgres mx_hide_shard_names | test_table_102008_1130006 | table | postgres mx_hide_shard_names | test_table_1130000 | table | postgres @@ -167,7 +167,7 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2; \d List of relations Schema | Name | Type | Owner ----------------------+----------------------+-------+---------- +--------------------------------------------------------------------- mx_hide_shard_names | test_table | table | postgres mx_hide_shard_names | test_table_102008 | table | postgres mx_hide_shard_names | test_table_2_1130000 | table | postgres @@ -183,7 +183,7 @@ SET citus.replication_model TO 'streaming'; CREATE TABLE test_table(id int, time date); SELECT create_distributed_table('test_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -192,7 +192,7 @@ CREATE INDEX test_index ON mx_hide_shard_names_2.test_table(id); SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner ----------------------+---------------------------+-------+---------- +--------------------------------------------------------------------- mx_hide_shard_names | test_table_102008_1130004 | table | postgres mx_hide_shard_names | test_table_102008_1130006 | table | postgres mx_hide_shard_names | test_table_1130000 | table | postgres @@ -201,7 +201,7 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2; SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; Schema | Name | Type | Owner | Table ----------------------+--------------------+-------+----------+-------------------- +--------------------------------------------------------------------- mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000 mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002 (2 rows) @@ -209,14 +209,14 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; SET search_path TO 'mx_hide_shard_names_2'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner ------------------------+--------------------+-------+---------- +--------------------------------------------------------------------- mx_hide_shard_names_2 | test_table_1130008 | table | postgres mx_hide_shard_names_2 | test_table_1130010 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; Schema | Name | Type | Owner | Table ------------------------+--------------------+-------+----------+-------------------- +--------------------------------------------------------------------- mx_hide_shard_names_2 | test_index_1130008 | index | postgres | test_table_1130008 mx_hide_shard_names_2 | test_index_1130010 | index | postgres | test_table_1130010 (2 rows) @@ -224,12 +224,12 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; SET search_path TO 'mx_hide_shard_names_2, mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; Schema | Name | Type | Owner | Table ---------+------+------+-------+------- +--------------------------------------------------------------------- (0 rows) -- now try very long table names @@ -245,7 +245,7 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col2 integer not null); SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -253,7 +253,7 @@ SELECT create_distributed_table('too_long_12345678901234567890123456789012345678 SET search_path TO 'mx_hide_shard_names_3'; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner ------------------------+-----------------------------------------------------------------+-------+---------- +--------------------------------------------------------------------- mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130012 | table | postgres mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130014 | table | postgres (2 rows) @@ -261,7 +261,7 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2; \d List of relations Schema | Name | Type | Owner ------------------------+-------------------------------------------------------------+-------+---------- +--------------------------------------------------------------------- mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678901234567890 | table | postgres (1 row) @@ -277,7 +277,7 @@ CREATE INDEX "MyTenantIndex" ON "CiTuS.TeeN"."TeeNTabLE.1!?!"("TeNANt_Id"); -- create distributed table with weird names SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -285,14 +285,14 @@ SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id'); SET search_path TO "CiTuS.TeeN"; SELECT * FROM citus_shards_on_worker ORDER BY 2; Schema | Name | Type | Owner -------------+------------------------+-------+---------- +--------------------------------------------------------------------- CiTuS.TeeN | TeeNTabLE.1!?!_1130016 | table | postgres CiTuS.TeeN | TeeNTabLE.1!?!_1130018 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; Schema | Name | Type | Owner | Table -------------+-----------------------+-------+----------+------------------------ +--------------------------------------------------------------------- CiTuS.TeeN | MyTenantIndex_1130016 | index | postgres | TeeNTabLE.1!?!_1130016 CiTuS.TeeN | MyTenantIndex_1130018 | index | postgres | TeeNTabLE.1!?!_1130018 (2 rows) @@ -300,14 +300,14 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; \d List of relations Schema | Name | Type | Owner -------------+----------------+-------+---------- +--------------------------------------------------------------------- CiTuS.TeeN | TeeNTabLE.1!?! | table | postgres (1 row) \di List of relations Schema | Name | Type | Owner | Table -------------+---------------+-------+----------+---------------- +--------------------------------------------------------------------- CiTuS.TeeN | MyTenantIndex | index | postgres | TeeNTabLE.1!?! (1 row) @@ -319,7 +319,7 @@ SET search_path TO 'mx_hide_shard_names'; \d List of relations Schema | Name | Type | Owner ----------------------+-------------------+-------+---------- +--------------------------------------------------------------------- mx_hide_shard_names | test_table | table | postgres mx_hide_shard_names | test_table_102008 | table | postgres (2 rows) @@ -327,7 +327,7 @@ SET search_path TO 'mx_hide_shard_names'; \di List of relations Schema | Name | Type | Owner | Table ----------------------+------------+-------+----------+------------ +--------------------------------------------------------------------- mx_hide_shard_names | test_index | index | postgres | test_table (1 row) diff --git a/src/test/regress/expected/multi_mx_metadata.out b/src/test/regress/expected/multi_mx_metadata.out index 4bab9a2af..5af03cee6 100644 --- a/src/test/regress/expected/multi_mx_metadata.out +++ b/src/test/regress/expected/multi_mx_metadata.out @@ -3,7 +3,7 @@ ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -33,35 +33,35 @@ SET citus.replication_model TO streaming; SET citus.shard_count TO 4; SELECT create_distributed_table('distributed_mx_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- Verify that we've logged commit records SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) -- Confirm that the metadata transactions have been committed SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) -- Verify that the commit records have been removed SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; Column | Type | Modifiers -----------+--------+------------------------------------------------------------------------- +--------------------------------------------------------------------- key | text | not null value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) @@ -70,35 +70,35 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_pkey'::regclass; Column | Type | Definition ---------+------+------------ +--------------------------------------------------------------------- key | text | key (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_value_idx'::regclass; Column | Type | Definition ---------+------+------------ +--------------------------------------------------------------------- value | text | value (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'distributed_mx_table'::regclass; repmodel ----------- +--------------------------------------------------------------------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'distributed_mx_table'::regclass; count -------- +--------------------------------------------------------------------- 4 (1 row) \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; Column | Type | Modifiers -----------+--------+------------------------------------------------------------------------- +--------------------------------------------------------------------- key | text | not null value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) @@ -107,28 +107,28 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_pkey'::regclass; Column | Type | Definition ---------+------+------------ +--------------------------------------------------------------------- key | text | key (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_value_idx'::regclass; Column | Type | Definition ---------+------+------------ +--------------------------------------------------------------------- value | text | value (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'distributed_mx_table'::regclass; repmodel ----------- +--------------------------------------------------------------------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'distributed_mx_table'::regclass; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -143,7 +143,7 @@ CREATE TABLE should_not_exist ( ); SELECT create_distributed_table('should_not_exist', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -152,7 +152,7 @@ ABORT; \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_not_exist'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -167,7 +167,7 @@ CREATE TABLE should_not_exist ( ); SELECT create_distributed_table('should_not_exist', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -185,7 +185,7 @@ CREATE TABLE objects_for_xacts ( ); SELECT create_distributed_table('objects_for_xacts', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -195,14 +195,14 @@ COMMIT; SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; repmodel ----------- +--------------------------------------------------------------------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -222,7 +222,7 @@ CREATE TABLE objects_for_xacts2 ( ); SELECT create_distributed_table('objects_for_xacts2', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -230,7 +230,7 @@ ROLLBACK; -- show that the table not exists on the coordinator SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -238,14 +238,14 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schema -- the distributed table not exists on the worker node SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts'; count -------- +--------------------------------------------------------------------- 0 (1 row) -- shard also does not exist since we create shards in a transaction SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and schemaname = 'citus_mx_schema_for_xacts'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -256,7 +256,7 @@ HINT: Connect to the coordinator and run it again. -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -278,13 +278,13 @@ INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_commit' INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_be_forgotten'); SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 3 (1 row) SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -292,13 +292,13 @@ SELECT count(*) FROM pg_dist_transaction; \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -308,7 +308,7 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER no_access_mx;$$); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -346,7 +346,7 @@ ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE SELECT master_drop_sequences(ARRAY['non_existing_schema.distributed_mx_table_some_val_seq']); master_drop_sequences ------------------------ +--------------------------------------------------------------------- (1 row) @@ -356,7 +356,7 @@ SELECT master_drop_sequences(ARRAY['public.']); ERROR: invalid name syntax SELECT master_drop_sequences(ARRAY['public.distributed_mx_table_some_val_seq_not_existing']); master_drop_sequences ------------------------ +--------------------------------------------------------------------- (1 row) @@ -366,7 +366,7 @@ DROP TABLE unrelated_table; -- doesn't error out but it has no effect, so no need to error out SELECT master_drop_sequences(NULL); master_drop_sequences ------------------------ +--------------------------------------------------------------------- (1 row) @@ -374,7 +374,7 @@ SELECT master_drop_sequences(NULL); -- finally make sure that the sequence remains SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; Column | Type | Modifiers -----------+--------+------------------------------------------------------------------------- +--------------------------------------------------------------------- key | text | not null value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) @@ -405,7 +405,7 @@ SELECT raise_failed_aclcheck($$ SELECT master_drop_sequences(ARRAY['public.distributed_mx_table_some_val_seq']); $$); raise_failed_aclcheck ------------------------ +--------------------------------------------------------------------- (1 row) @@ -422,7 +422,7 @@ DROP TABLE unrelated_table; -- finally make sure that the sequence remains SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; Column | Type | Modifiers -----------+--------+------------------------------------------------------------------------- +--------------------------------------------------------------------- key | text | not null value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) @@ -433,7 +433,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_mx_modifications.out b/src/test/regress/expected/multi_mx_modifications.out index 1bb6b7a60..b1604c8d9 100644 --- a/src/test/regress/expected/multi_mx_modifications.out +++ b/src/test/regress/expected/multi_mx_modifications.out @@ -7,7 +7,7 @@ INSERT INTO limit_orders_mx VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32743; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -17,7 +17,7 @@ INSERT INTO limit_orders_mx VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32744; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -28,14 +28,14 @@ INSERT INTO limit_orders_mx VALUES (32745, 'AAPL', 9580, '2004-10-19 10:23:54', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32745; count -------- +--------------------------------------------------------------------- 1 (1 row) -- and see all the inserted rows SELECT * FROM limit_orders_mx ORDER BY 1; id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 32743 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 32744 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 32745 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 @@ -44,7 +44,7 @@ SELECT * FROM limit_orders_mx ORDER BY 1; -- basic single-row INSERT with RETURNING INSERT INTO limit_orders_mx VALUES (32746, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 32746 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -53,7 +53,7 @@ INSERT INTO limit_orders_mx VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', DEFAULT); SELECT * FROM limit_orders_mx WHERE id = 12756; id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 12756 | MSFT | 10959 | Wed May 08 07:29:23 2013 | sell | 0.00 (1 row) @@ -62,7 +62,7 @@ INSERT INTO limit_orders_mx VALUES (430, upper('ibm'), 214, timestamp '2003-01-2 interval '5 hours', 'buy', sqrt(2)); SELECT * FROM limit_orders_mx WHERE id = 430; id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+----------------- +--------------------------------------------------------------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) @@ -112,27 +112,27 @@ INSERT INTO limit_orders_mx SELECT * FROM deleted_orders; INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; count -------- +--------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders_mx WHERE id = 246; SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; count -------- +--------------------------------------------------------------------- 0 (1 row) -- test simple DELETE with RETURNING DELETE FROM limit_orders_mx WHERE id = 430 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+----------------- +--------------------------------------------------------------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) SELECT COUNT(*) FROM limit_orders_mx WHERE id = 430; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -140,14 +140,14 @@ SELECT COUNT(*) FROM limit_orders_mx WHERE id = 430; INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; count -------- +--------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders_mx WHERE id = (2 * 123); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -170,14 +170,14 @@ INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'se UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246; SELECT symbol FROM limit_orders_mx WHERE id = 246; symbol --------- +--------------------------------------------------------------------- GM (1 row) -- simple UPDATE with RETURNING UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 246 | GM | 162 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) @@ -185,14 +185,14 @@ UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246 RETURNING *; UPDATE limit_orders_mx SET bidder_id = 6 * 3 WHERE id = 246; SELECT bidder_id FROM limit_orders_mx WHERE id = 246; bidder_id ------------ +--------------------------------------------------------------------- 18 (1 row) -- expression UPDATE with RETURNING UPDATE limit_orders_mx SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) @@ -200,14 +200,14 @@ UPDATE limit_orders_mx SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; SELECT kind, limit_price FROM limit_orders_mx WHERE id = 246; kind | limit_price -------+------------- +--------------------------------------------------------------------- buy | 0.00 (1 row) -- multi-column UPDATE with RETURNING UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price ------+--------+-----------+--------------------------+------+------------- +--------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | buy | 999 (1 row) @@ -231,7 +231,7 @@ WITH deleted_orders AS (INSERT INTO limit_orders_mx VALUES (399, 'PDR', 14, '201 UPDATE limit_orders_mx SET symbol = 'GM'; SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246; symbol | bidder_id ---------+----------- +--------------------------------------------------------------------- GM | 30 (1 row) @@ -243,14 +243,14 @@ UPDATE limit_orders_mx SET bidder_id = bidder_id + 1 WHERE id = 246; UPDATE limit_orders_mx SET symbol = LOWER(symbol) WHERE id = 246; SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246; symbol | bidder_id ---------+----------- +--------------------------------------------------------------------- gm | 247 (1 row) -- IMMUTABLE functions are allowed -- even in returning UPDATE limit_orders_mx SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; id | lower | symbol ------+-------+-------- +--------------------------------------------------------------------- 246 | gm | GM (1 row) @@ -277,7 +277,7 @@ SET array_of_values = stable_append_mx(array_of_values, 3) WHERE id = 246; ERROR: STABLE functions used in UPDATE queries cannot be called with column references SELECT array_of_values FROM limit_orders_mx WHERE id = 246; array_of_values ------------------ +--------------------------------------------------------------------- {1,2} (1 row) @@ -288,7 +288,7 @@ UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246; ERROR: null value in column "bidder_id" violates not-null constraint SELECT array_of_values FROM limit_orders_mx WHERE id = 246; array_of_values ------------------ +--------------------------------------------------------------------- {1,2} (1 row) @@ -312,7 +312,7 @@ INSERT INTO multiple_hash_mx VALUES ('0', '5'); INSERT INTO multiple_hash_mx VALUES ('0', '6'); UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '0' RETURNING *; category | data -----------+------ +--------------------------------------------------------------------- 0 | 1-1 0 | 2-1 0 | 3-1 @@ -323,7 +323,7 @@ UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '0' RETURNING *; DELETE FROM multiple_hash_mx WHERE category = '0' RETURNING *; category | data -----------+------ +--------------------------------------------------------------------- 0 | 1-1 0 | 2-1 0 | 3-1 @@ -348,7 +348,7 @@ INSERT INTO multiple_hash_mx VALUES ('2', '3'); INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('2', '3') RETURNING *; category | data -----------+------ +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -363,7 +363,7 @@ UPDATE 3 -- three rows, with RETURNING UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1' RETURNING category; category ----------- +--------------------------------------------------------------------- 1 1 1 @@ -373,7 +373,7 @@ UPDATE 3 -- check SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data; category | data -----------+--------- +--------------------------------------------------------------------- 1 | 1-1-2-2 1 | 2-2-2 1 | 3-2-2 @@ -389,7 +389,7 @@ DELETE 3 -- three rows, with RETURNING DELETE FROM multiple_hash_mx WHERE category = '1' RETURNING category; category ----------- +--------------------------------------------------------------------- 1 1 1 @@ -399,12 +399,12 @@ DELETE 3 -- check SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data; category | data -----------+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM multiple_hash_mx WHERE category = '2' ORDER BY category, data; category | data -----------+------ +--------------------------------------------------------------------- (0 rows) --- INSERT ... SELECT ... FROM commands are supported from workers @@ -431,32 +431,32 @@ SELECT last_value FROM app_analytics_events_mx_id_seq \gset ALTER SEQUENCE app_analytics_events_mx_id_seq NO MINVALUE NO MAXVALUE; SELECT setval('app_analytics_events_mx_id_seq'::regclass, 3940649673949184); setval ------------------- +--------------------------------------------------------------------- 3940649673949184 (1 row) INSERT INTO app_analytics_events_mx VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; id ------------------- +--------------------------------------------------------------------- 3940649673949185 (1 row) INSERT INTO app_analytics_events_mx (app_id, name) VALUES (102, 'Wayz') RETURNING id; id ------------------- +--------------------------------------------------------------------- 3940649673949186 (1 row) INSERT INTO app_analytics_events_mx (app_id, name) VALUES (103, 'Mynt') RETURNING *; id | app_id | name -------------------+--------+------ +--------------------------------------------------------------------- 3940649673949187 | 103 | Mynt (1 row) -- clean up SELECT setval('app_analytics_events_mx_id_seq'::regclass, :last_value); setval ------------------- +--------------------------------------------------------------------- 4503599627370497 (1 row) diff --git a/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out b/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out index b8febf421..7b6cd15bf 100644 --- a/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out +++ b/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out @@ -9,34 +9,34 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) CREATE TABlE ref_table(id int, value_1 int); SELECT create_reference_table('ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABlE ref_table_2(id int, value_1 int); SELECT create_reference_table('ref_table_2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_table_1(id int, value_1 int); SELECT create_distributed_table('test_table_1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -47,28 +47,28 @@ SET search_path TO 'mx_modify_reference_table'; INSERT INTO ref_table VALUES(1,1),(2,2); SELECT SUM(value_1) FROM ref_table; sum ------ +--------------------------------------------------------------------- 3 (1 row) UPDATE ref_table SET value_1 = 1 WHERE id = 2; SELECT SUM(value_1) FROM ref_table; sum ------ +--------------------------------------------------------------------- 2 (1 row) DELETE FROM ref_table; SELECT SUM(value_1) FROM ref_table; sum ------ +--------------------------------------------------------------------- (1 row) COPY ref_table FROM STDIN DELIMITER ','; SELECT SUM(value_1) FROM ref_table; sum ------ +--------------------------------------------------------------------- 3 (1 row) @@ -76,7 +76,7 @@ SELECT SUM(value_1) FROM ref_table; -- It has been started to be supported on MX nodes with DML operations. SELECT * FROM ref_table FOR UPDATE; id | value_1 -----+--------- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -86,14 +86,14 @@ SELECT * FROM ref_table FOR UPDATE; INSERT INTO ref_table SELECT * FROM test_table_1; SELECT SUM(value_1) FROM ref_table; sum ------ +--------------------------------------------------------------------- 14 (1 row) INSERT INTO ref_table_2 SELECT * FROM ref_table; SELECT SUM(value_1) FROM ref_table_2; sum ------ +--------------------------------------------------------------------- 14 (1 row) @@ -102,13 +102,13 @@ SELECT SUM(value_1) FROM ref_table_2; SET search_path TO 'mx_modify_reference_table'; SELECT SUM(value_1) FROM ref_table; sum ------ +--------------------------------------------------------------------- 14 (1 row) SELECT SUM(value_1) FROM ref_table_2; sum ------ +--------------------------------------------------------------------- 14 (1 row) @@ -118,35 +118,35 @@ SELECT SUM(value_1) FROM ref_table_2; INSERT INTO ref_table VALUES(1,1),(2,2); SELECT SUM(value_1) FROM ref_table; sum ------ +--------------------------------------------------------------------- 17 (1 row) UPDATE ref_table SET value_1 = 1 WHERE id = 2; SELECT SUM(value_1) FROM ref_table; sum ------ +--------------------------------------------------------------------- 15 (1 row) COPY ref_table FROM STDIN DELIMITER ','; SELECT SUM(value_1) FROM ref_table; sum ------ +--------------------------------------------------------------------- 18 (1 row) INSERT INTO ref_table SELECT * FROM test_table_1; SELECT SUM(value_1) FROM ref_table; sum ------ +--------------------------------------------------------------------- 29 (1 row) INSERT INTO ref_table_2 SELECT * FROM ref_table; SELECT SUM(value_1) FROM ref_table_2; sum ------ +--------------------------------------------------------------------- 43 (1 row) diff --git a/src/test/regress/expected/multi_mx_modifying_xacts.out b/src/test/regress/expected/multi_mx_modifying_xacts.out index a0b85db5c..ff1937679 100644 --- a/src/test/regress/expected/multi_mx_modifying_xacts.out +++ b/src/test/regress/expected/multi_mx_modifying_xacts.out @@ -14,7 +14,7 @@ INSERT INTO researchers_mx VALUES (2, 1, 'John Backus'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; name -------------- +--------------------------------------------------------------------- John Backus (1 row) @@ -26,7 +26,7 @@ INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 1'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; name ----------------------- +--------------------------------------------------------------------- John Backus Worker 1 (1 row) @@ -38,7 +38,7 @@ INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 2'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; name ----------------------- +--------------------------------------------------------------------- John Backus Worker 2 (1 row) @@ -49,7 +49,7 @@ DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; name --------------- +--------------------------------------------------------------------- Donald Knuth (1 row) @@ -60,7 +60,7 @@ DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; name --------------- +--------------------------------------------------------------------- Donald Knuth (1 row) @@ -71,7 +71,7 @@ DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; name --------------- +--------------------------------------------------------------------- Donald Knuth (1 row) @@ -85,7 +85,7 @@ INSERT INTO researchers_mx VALUES (6, 3, 'Ken Thompson'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 3 AND id = 6; name --------------- +--------------------------------------------------------------------- Ken Thompson (1 row) @@ -109,7 +109,7 @@ ROLLBACK TO hire_engelbart; COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 4; name ----------- +--------------------------------------------------------------------- Jim Gray (1 row) @@ -131,7 +131,7 @@ INSERT INTO labs_mx VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id and researchers_mx.lab_id = 5;; id | lab_id | name | id | name -----+--------+-------------------+----+------------ +--------------------------------------------------------------------- 8 | 5 | Douglas Engelbart | 5 | Los Alamos (1 row) @@ -150,7 +150,7 @@ INSERT INTO labs_mx VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id and researchers_mx.lab_id = 5; id | lab_id | name | id | name -----+--------+-------------------+----+------------ +--------------------------------------------------------------------- 8 | 5 | Douglas Engelbart | 5 | Los Alamos 8 | 5 | Douglas Engelbart | 5 | Los Alamos 8 | 5 | Douglas Engelbart | 5 | Los Alamos @@ -172,7 +172,7 @@ SET LOCAL citus.enable_local_execution TO off; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); SELECT count(*) FROM researchers_mx WHERE lab_id = 6; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -188,7 +188,7 @@ BEGIN; \copy labs_mx from stdin delimiter ',' SELECT name FROM labs_mx WHERE id = 10; name ------------------- +--------------------------------------------------------------------- Weyland-Yutani-1 Weyland-Yutani-2 (2 rows) @@ -206,7 +206,7 @@ COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- same test on the second worker node @@ -222,7 +222,7 @@ COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- create trigger on one worker to reject certain values @@ -250,12 +250,12 @@ COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 7; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- same failure test from worker 2 @@ -269,12 +269,12 @@ COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 7; id | name -----+------ +--------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port @@ -297,12 +297,12 @@ COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- same test from the other worker @@ -319,12 +319,12 @@ COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- what if the failures happen at COMMIT time? @@ -349,12 +349,12 @@ ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 7; id | name -----+------ +--------------------------------------------------------------------- (0 rows) DROP TRIGGER reject_bad_mx ON labs_mx_1220102; @@ -377,12 +377,12 @@ ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- what if one shard (objects_mx) succeeds but another (labs_mx) completely fails? @@ -402,11 +402,11 @@ ERROR: could not commit transaction on any active node -- no data should persists SELECT * FROM objects_mx WHERE id = 1; id | name -----+------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; id | name -----+------ +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index 39d707118..0f05bc090 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -23,7 +23,7 @@ $$; SELECT master_add_node('localhost', :worker_1_port) As nodeid_1 \gset SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- +--------------------------------------------------------------------- 2 | localhost | 57637 | f | f (1 row) @@ -31,14 +31,14 @@ SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node CREATE TABLE ref_table(a int primary key); SELECT create_reference_table('ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE dist_table_1(a int primary key, b int references ref_table(a)); SELECT create_distributed_table('dist_table_1', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -46,51 +46,51 @@ SELECT create_distributed_table('dist_table_1', 'a'); SELECT 1 FROM master_update_node((SELECT nodeid FROM pg_dist_node), 'localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- +--------------------------------------------------------------------- 2 | localhost | 57638 | f | f (1 row) -- start syncing metadata to the node SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- +--------------------------------------------------------------------- 2 | localhost | 57638 | t | t (1 row) --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test that maintenance daemon syncs after master_update_node --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Update the node again. We do this as epeatable read, so we just see the -- changes by master_update_node(). This is to avoid inconsistent results -- if the maintenance daemon does the metadata sync too fast. BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- +--------------------------------------------------------------------- 2 | localhost | 57638 | t | t (1 row) SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- +--------------------------------------------------------------------- 2 | localhost | 57637 | t | f (1 row) @@ -99,19 +99,19 @@ END; -- check if metadata is synced again SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- +--------------------------------------------------------------------- 2 | t | t (1 row) SELECT verify_metadata('localhost', :worker_1_port); verify_metadata ------------------ +--------------------------------------------------------------------- t (1 row) @@ -120,19 +120,19 @@ SELECT verify_metadata('localhost', :worker_1_port); BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- +--------------------------------------------------------------------- 2 | localhost | 57637 | t | t (1 row) SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | nodename | nodeport | hasmetadata | metadatasynced ---------+-----------+----------+-------------+---------------- +--------------------------------------------------------------------- 2 | localhost | 12345 | t | f (1 row) @@ -140,43 +140,43 @@ END; -- maintenace daemon metadata sync should fail, because node is still unwriteable. SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- +--------------------------------------------------------------------- 2 | t | f (1 row) -- update it back to :worker_1_port, now metadata should be synced SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- +--------------------------------------------------------------------- 2 | t | t (1 row) --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test updating a node when another node is in readonly-mode --------------------------------------------------------------------------- +--------------------------------------------------------------------- SELECT master_add_node('localhost', :worker_2_port) AS nodeid_2 \gset NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -184,14 +184,14 @@ SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); CREATE TABLE dist_table_2(a int); SELECT create_distributed_table('dist_table_2', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO dist_table_2 SELECT i FROM generate_series(1, 100) i; SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); mark_node_readonly --------------------- +--------------------------------------------------------------------- t (1 row) @@ -199,13 +199,13 @@ SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- +--------------------------------------------------------------------- 2 | t | f 3 | t | f (2 rows) @@ -215,13 +215,13 @@ COMMIT; -- we shouldn't see the warnings. SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 23456); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- +--------------------------------------------------------------------- 2 | t | f 3 | t | f (2 rows) @@ -229,20 +229,20 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; -- Make the node writeable. SELECT mark_node_readonly('localhost', :worker_2_port, FALSE); mark_node_readonly --------------------- +--------------------------------------------------------------------- t (1 row) SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) -- Mark the node readonly again, so the following master_update_node warns SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); mark_node_readonly --------------------- +--------------------------------------------------------------------- t (1 row) @@ -250,56 +250,56 @@ SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM dist_table_2; count -------- +--------------------------------------------------------------------- 100 (1 row) END; SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) -- Make the node writeable. SELECT mark_node_readonly('localhost', :worker_2_port, FALSE); mark_node_readonly --------------------- +--------------------------------------------------------------------- t (1 row) SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); verify_metadata | verify_metadata ------------------+----------------- +--------------------------------------------------------------------- t | t (1 row) --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test that master_update_node rolls back properly --------------------------------------------------------------------------- +--------------------------------------------------------------------- BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -307,17 +307,17 @@ ROLLBACK; SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); verify_metadata | verify_metadata ------------------+----------------- +--------------------------------------------------------------------- t | t (1 row) --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test that master_update_node can appear in a prepared transaction. --------------------------------------------------------------------------- +--------------------------------------------------------------------- BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -325,13 +325,13 @@ PREPARE TRANSACTION 'tx01'; COMMIT PREPARED 'tx01'; SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- +--------------------------------------------------------------------- 2 | t | f 3 | t | t (2 rows) @@ -339,7 +339,7 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -347,13 +347,13 @@ PREPARE TRANSACTION 'tx01'; COMMIT PREPARED 'tx01'; SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; nodeid | hasmetadata | metadatasynced ---------+-------------+---------------- +--------------------------------------------------------------------- 2 | t | t 3 | t | t (2 rows) @@ -361,52 +361,52 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); verify_metadata | verify_metadata ------------------+----------------- +--------------------------------------------------------------------- t | t (1 row) --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test that changes in isactive is propagated to the metadata nodes --------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Don't drop the reference table so it has shards on the nodes being disabled DROP TABLE dist_table_1, dist_table_2; SELECT 1 FROM master_disable_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); verify_metadata ------------------ +--------------------------------------------------------------------- t (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); verify_metadata ------------------ +--------------------------------------------------------------------- t (1 row) ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test master_disable_node() when the node that is being disabled is actually down ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- SELECT master_update_node(:nodeid_2, 'localhost', 1); master_update_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) @@ -420,60 +420,60 @@ HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for n -- try again after stopping metadata sync SELECT stop_metadata_sync_to_node('localhost', 1); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_disable_node('localhost', 1); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); verify_metadata ------------------ +--------------------------------------------------------------------- t (1 row) SELECT master_update_node(:nodeid_2, 'localhost', :worker_2_port); master_update_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); verify_metadata ------------------ +--------------------------------------------------------------------- t (1 row) ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- -- Test master_disable_node() when the other node is down ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- -- node 1 is down. SELECT master_update_node(:nodeid_1, 'localhost', 1); master_update_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) @@ -487,39 +487,39 @@ HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for n -- try again after stopping metadata sync SELECT stop_metadata_sync_to_node('localhost', 1); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_disable_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- bring up node 1 SELECT master_update_node(:nodeid_1, 'localhost', :worker_1_port); master_update_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); verify_metadata ------------------ +--------------------------------------------------------------------- t (1 row) @@ -528,7 +528,7 @@ DROP TABLE ref_table; TRUNCATE pg_dist_colocation; SELECT count(*) FROM (SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node) t; count -------- +--------------------------------------------------------------------- 2 (1 row) diff --git a/src/test/regress/expected/multi_mx_partitioning.out b/src/test/regress/expected/multi_mx_partitioning.out index 3554bb303..8d7a22f7c 100644 --- a/src/test/regress/expected/multi_mx_partitioning.out +++ b/src/test/regress/expected/multi_mx_partitioning.out @@ -8,7 +8,7 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -28,7 +28,7 @@ SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -36,7 +36,7 @@ NOTICE: Copying data from local table... \c - - - :worker_1_port SELECT * FROM partitioning_test ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -52,7 +52,7 @@ WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; logicalrelid ------------------------- +--------------------------------------------------------------------- partitioning_test partitioning_test_2009 partitioning_test_2010 @@ -67,7 +67,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | count -------------------------+------- +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2009 | 4 partitioning_test_2010 | 4 @@ -76,7 +76,7 @@ ORDER BY -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; inhrelid ------------------------- +--------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 (2 rows) @@ -96,7 +96,7 @@ WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') ORDER BY 1; logicalrelid ------------------------- +--------------------------------------------------------------------- partitioning_test partitioning_test_2011 (2 rows) @@ -110,7 +110,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | count -------------------------+------- +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2011 | 4 (2 rows) @@ -118,7 +118,7 @@ ORDER BY -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; inhrelid ------------------------- +--------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 partitioning_test_2011 @@ -144,7 +144,7 @@ WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') ORDER BY 1; logicalrelid ------------------------- +--------------------------------------------------------------------- partitioning_test partitioning_test_2012 (2 rows) @@ -158,7 +158,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | count -------------------------+------- +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2012 | 4 (2 rows) @@ -166,7 +166,7 @@ ORDER BY -- see from MX node, see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -178,7 +178,7 @@ SELECT * FROM partitioning_test ORDER BY 1; -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; inhrelid ------------------------- +--------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 partitioning_test_2011 @@ -192,7 +192,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE partitioning_test_2013(id int, time date); SELECT create_distributed_table('partitioning_test_2013', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -204,7 +204,7 @@ ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES \c - - - :worker_1_port SELECT * FROM partitioning_test ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -218,7 +218,7 @@ SELECT * FROM partitioning_test ORDER BY 1; -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; inhrelid ------------------------- +--------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 partitioning_test_2011 @@ -233,7 +233,7 @@ ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009; \c - - - :worker_1_port SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; inhrelid ------------------------- +--------------------------------------------------------------------- partitioning_test_2010 partitioning_test_2011 partitioning_test_2012 @@ -248,19 +248,19 @@ HINT: Connect to the coordinator and run it again. -- make sure we can repeatedly call start_metadata_sync_to_node SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -279,7 +279,7 @@ SET SEARCH_PATH TO partition_test; CREATE TABLE partition_parent_table(a int, b int, c int) PARTITION BY RANGE (b); SELECT create_distributed_table('partition_parent_table', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_mx_reference_table.out b/src/test/regress/expected/multi_mx_reference_table.out index 6dc428df8..bb7c1abbf 100644 --- a/src/test/regress/expected/multi_mx_reference_table.out +++ b/src/test/regress/expected/multi_mx_reference_table.out @@ -3,7 +3,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -15,14 +15,14 @@ INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05'); -- SELECT .. FOR UPDATE should work on coordinator (takes lock on first worker) SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 1 (1 row) BEGIN; SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -31,14 +31,14 @@ END; -- SELECT .. FOR UPDATE should work on first worker (takes lock on self) SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 1 (1 row) BEGIN; SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -49,7 +49,7 @@ SELECT FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 @@ -64,7 +64,7 @@ FROM WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -76,7 +76,7 @@ FROM ORDER BY 2 ASC LIMIT 3; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -91,7 +91,7 @@ WHERE ORDER BY 2 LIMIT 3; value_1 | value_3 ----------+--------- +--------------------------------------------------------------------- 4 | 4 5 | 5 (2 rows) @@ -104,7 +104,7 @@ ORDER BY 2 ASC LIMIT 2; value_1 | ?column? ----------+---------- +--------------------------------------------------------------------- 1 | 15 2 | 30 (2 rows) @@ -116,7 +116,7 @@ FROM ORDER BY 2 ASC LIMIT 2 OFFSET 2; value_1 | ?column? ----------+---------- +--------------------------------------------------------------------- 3 | 45 4 | 60 (2 rows) @@ -128,7 +128,7 @@ FROM WHERE value_2 = 2 OR value_2 = 3; value_2 | value_4 ----------+-------------------------- +--------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -140,7 +140,7 @@ FROM WHERE value_2 = 2 AND value_2 = 3; value_2 | value_4 ----------+--------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -150,7 +150,7 @@ FROM WHERE value_3 = '2' OR value_1 = 3; value_2 | value_4 ----------+-------------------------- +--------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -165,7 +165,7 @@ WHERE ) AND FALSE; value_2 | value_4 ----------+--------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -182,7 +182,7 @@ WHERE ) AND value_1 < 3; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (2 rows) @@ -197,7 +197,7 @@ WHERE '1', '2' ); value_4 --------------------------- +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 (2 rows) @@ -212,7 +212,7 @@ WHERE '5', '2' ); date_part ------------ +--------------------------------------------------------------------- 2 5 (2 rows) @@ -224,7 +224,7 @@ FROM WHERE value_2 <= 2 AND value_2 >= 4; value_4 ---------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -234,7 +234,7 @@ FROM WHERE value_2 <= 20 AND value_2 >= 4; value_4 --------------------------- +--------------------------------------------------------------------- Sun Dec 04 00:00:00 2016 Mon Dec 05 00:00:00 2016 (2 rows) @@ -246,7 +246,7 @@ FROM WHERE value_2 >= 5 AND value_2 <= random(); value_4 ---------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -256,7 +256,7 @@ FROM WHERE value_4 BETWEEN '2016-12-01' AND '2016-12-03'; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -269,7 +269,7 @@ FROM WHERE FALSE; value_1 ---------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -279,7 +279,7 @@ FROM WHERE int4eq(1, 2); value_1 ---------- +--------------------------------------------------------------------- (0 rows) -- rename output name and do some operations @@ -288,7 +288,7 @@ SELECT FROM reference_table_test; id | age -----+----- +--------------------------------------------------------------------- 1 | 15 2 | 30 3 | 45 @@ -303,7 +303,7 @@ SELECT FROM some_data; value_2 | value_4 ----------+-------------------------- +--------------------------------------------------------------------- 3 | Sat Dec 03 00:00:00 2016 4 | Sun Dec 04 00:00:00 2016 5 | Mon Dec 05 00:00:00 2016 @@ -313,7 +313,7 @@ FROM WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -325,7 +325,7 @@ FROM WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 | position ----------+---------+---------+--------------------------+---------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 (1 row) @@ -336,7 +336,7 @@ FROM WHERE value_1 = 1 OR value_1 = 2; value_1 | value_2 | value_3 | value_4 | position ----------+---------+---------+--------------------------+---------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 | 3 (2 rows) @@ -349,7 +349,7 @@ SELECT * FROM ( ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -361,7 +361,7 @@ SELECT * FROM ( ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -372,7 +372,7 @@ SELECT * FROM ( ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- +--------------------------------------------------------------------- (0 rows) -- to make the tests more interested for aggregation tests, ingest some more data @@ -393,7 +393,7 @@ HAVING ORDER BY 1; value_4 | sum ---------------------------+----- +--------------------------------------------------------------------- Fri Dec 02 00:00:00 2016 | 4 Sat Dec 03 00:00:00 2016 | 6 Sun Dec 04 00:00:00 2016 | 4 @@ -410,7 +410,7 @@ GROUP BY GROUPING sets ((value_4), (value_3)) ORDER BY 1, 2, 3; value_4 | value_3 | sum ---------------------------+---------+----- +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | | 2 Fri Dec 02 00:00:00 2016 | | 4 Sat Dec 03 00:00:00 2016 | | 6 @@ -431,7 +431,7 @@ FROM ORDER BY 1; value_4 --------------------------- +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 Sat Dec 03 00:00:00 2016 @@ -445,7 +445,7 @@ SELECT FROM reference_table_test; value_4 | rank ---------------------------+------ +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1 Thu Dec 01 00:00:00 2016 | 1 Fri Dec 02 00:00:00 2016 | 1 @@ -462,7 +462,7 @@ SELECT FROM reference_table_test; value_4 | avg ---------------------------+------------------------ +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Fri Dec 02 00:00:00 2016 | 2.0000000000000000 @@ -485,7 +485,7 @@ SELECT FROM reference_table_test; c ---- +--------------------------------------------------------------------- 3 (1 row) @@ -506,7 +506,7 @@ SELECT ORDER BY 1; value_1 | c ----------+--- +--------------------------------------------------------------------- 1 | 0 2 | 0 3 | 1 @@ -518,7 +518,7 @@ SELECT BEGIN; SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 @@ -531,7 +531,7 @@ SELECT * FROM reference_table_test; SELECT * FROM reference_table_test WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (2 rows) @@ -546,13 +546,13 @@ DECLARE test_cursor CURSOR FOR ORDER BY value_1; FETCH test_cursor; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) FETCH ALL test_cursor; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 @@ -560,12 +560,12 @@ FETCH ALL test_cursor; FETCH test_cursor; -- fetch one row after the last value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) @@ -581,14 +581,14 @@ CREATE TEMP TABLE temp_reference_test as CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_second'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_third'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -602,14 +602,14 @@ INSERT INTO reference_table_test_third VALUES (5, 5.0, '5', '2016-12-05'); -- SELECT .. FOR UPDATE should work on second worker (takes lock on first worker) SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 1 (1 row) BEGIN; SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -624,7 +624,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -639,7 +639,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 4 5 (2 rows) @@ -653,7 +653,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- (0 rows) -- join on different columns and different data types via casts @@ -666,7 +666,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -681,7 +681,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -696,7 +696,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -715,7 +715,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 3 (1 row) @@ -729,7 +729,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 3 (1 row) @@ -742,7 +742,7 @@ FROM ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 3 (1 row) @@ -755,7 +755,7 @@ FROM ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -771,7 +771,7 @@ FROM ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 3 (2 rows) @@ -783,14 +783,14 @@ SET citus.replication_model TO streaming; CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test', 'value_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test_2', 'value_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -813,7 +813,7 @@ WHERE ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] value_1 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -827,7 +827,7 @@ WHERE ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] value_2 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -841,7 +841,7 @@ WHERE ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] value_2 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -856,7 +856,7 @@ WHERE ORDER BY colocated_table_test.value_2; LOG: join order: [ "colocated_table_test_2" ][ cartesian product reference join "reference_table_test" ][ dual partition join "colocated_table_test" ] value_2 ---------- +--------------------------------------------------------------------- 1 1 2 @@ -873,7 +873,7 @@ WHERE ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ local partition join "colocated_table_test_2" ] value_2 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -888,7 +888,7 @@ WHERE ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] value_2 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -902,7 +902,7 @@ WHERE ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] value_2 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) diff --git a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out index b8760055d..ba6a2e269 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out @@ -128,14 +128,14 @@ SET citus.replication_model TO streaming; SET citus.shard_count TO 3; SELECT create_distributed_table('repartition_udt', 'pk'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SET citus.shard_count TO 5; SELECT create_distributed_table('repartition_udt_other', 'pk'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -158,7 +158,7 @@ SET citus.task_executor_type = 'task-tracker'; SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk; pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- +--------------------------------------------------------------------- (0 rows) -- Query that should result in a repartition join on UDT column. @@ -168,7 +168,7 @@ EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other WHERE repartition_udt.pk > 1; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] QUERY PLAN --------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 Tasks Shown: None, not supported for re-partition queries @@ -186,7 +186,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- +--------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo diff --git a/src/test/regress/expected/multi_mx_repartition_udt_w1.out b/src/test/regress/expected/multi_mx_repartition_udt_w1.out index 934d94725..ba8450bc9 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_w1.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_w1.out @@ -12,7 +12,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM repartition_udt JOIN repartition_udt_other @@ -21,7 +21,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- +--------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo diff --git a/src/test/regress/expected/multi_mx_repartition_udt_w2.out b/src/test/regress/expected/multi_mx_repartition_udt_w2.out index c35b003ff..22ec5ddec 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_w2.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_w2.out @@ -12,7 +12,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM repartition_udt JOIN repartition_udt_other @@ -21,7 +21,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- +--------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index 885ab82ba..12c238777 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -74,7 +74,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 id | author_id | title | word_count -----+-----------+-----------+------------ +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) @@ -84,7 +84,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 title ------------- +--------------------------------------------------------------------- aggrandize absentness andelee @@ -100,7 +100,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 title | word_count -------------+------------ +--------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 attemper | 14976 @@ -117,7 +117,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 title | id ----------+---- +--------------------------------------------------------------------- aruru | 5 adversa | 15 (2 rows) @@ -130,7 +130,7 @@ SELECT title, author_id FROM articles_hash_mx DEBUG: Creating router plan DEBUG: Plan is router executable title | author_id --------------+----------- +--------------------------------------------------------------------- aseptic | 7 auriga | 7 arsenous | 7 @@ -149,7 +149,7 @@ SELECT title, author_id FROM articles_hash_mx DEBUG: Creating router plan DEBUG: Plan is router executable title | author_id --------------+----------- +--------------------------------------------------------------------- aseptic | 7 agatized | 8 auriga | 7 @@ -172,7 +172,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx DEBUG: Creating router plan DEBUG: Plan is router executable author_id | corpus_size ------------+------------- +--------------------------------------------------------------------- 10 | 59955 8 | 55410 7 | 36756 @@ -189,7 +189,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 author_id | corpus_size ------------+------------- +--------------------------------------------------------------------- 1 | 35894 (1 row) @@ -198,7 +198,7 @@ DETAIL: distribution column value: 1 SELECT * FROM articles_hash_mx WHERE author_id <= 1; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -210,7 +210,7 @@ SELECT * FROM articles_hash_mx WHERE author_id IN (1, 3); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -230,7 +230,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -245,7 +245,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 title --------------- +--------------------------------------------------------------------- arsenous alamo arcading @@ -261,7 +261,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | id | title -----+-----------+----+-------------- +--------------------------------------------------------------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo 21 | 1 | 21 | arcading @@ -275,7 +275,7 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title -----+-----------+----+------- +--------------------------------------------------------------------- (0 rows) -- CTE joins on different workers are supported because they are both planned recursively @@ -295,7 +295,7 @@ DEBUG: Plan 66 query after replacing subqueries and CTEs: SELECT id_author.id, DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title -----+-----------+----+------- +--------------------------------------------------------------------- (0 rows) -- recursive CTEs are supported when filtered on partition column @@ -347,7 +347,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 company_id | employee_id | manager_id | level -------------+-------------+------------+------- +--------------------------------------------------------------------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 1 | 3 | 1 | 2 @@ -393,7 +393,7 @@ SELECT DEBUG: Creating router plan DEBUG: Plan is router executable id | subtitle | count -----+----------+------- +--------------------------------------------------------------------- 1 | | 1 3 | | 1 11 | | 1 @@ -430,7 +430,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count | position -----+-----------+--------------+------------+---------- +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 21 | 1 | arcading | 5890 | 3 @@ -442,7 +442,7 @@ SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 o DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | position -----+-----------+--------------+------------+---------- +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 3 | 3 | asternal | 10480 | 3 11 | 1 | alamo | 1347 | 3 @@ -460,7 +460,7 @@ SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 o DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 id | author_id | title | word_count | position -----+-----------+------------+------------+---------- +--------------------------------------------------------------------- 12 | 2 | archiblast | 18185 | 3 42 | 2 | ausable | 15885 | 3 2 | 2 | abducing | 13642 | 3 @@ -479,7 +479,7 @@ DEBUG: Plan 85 query after replacing subqueries and CTEs: SELECT articles_hash_ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 id | word_count -----+------------ +--------------------------------------------------------------------- 50 | 19519 14 | 19094 48 | 18610 @@ -499,7 +499,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -522,7 +522,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -537,7 +537,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -552,7 +552,7 @@ SELECT * WHERE author_id = 1 OR author_id = 18; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -568,7 +568,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 article_id | random_value -------------+-------------- +--------------------------------------------------------------------- 1 | 9572 11 | 14817 21 | 123690 @@ -585,7 +585,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- 10 | 17277 10 | 1820 10 | 6363 @@ -601,7 +601,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- 10 | 19519 10 | 19519 10 | 19519 @@ -625,7 +625,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- (0 rows) -- single shard select with limit is router plannable @@ -637,7 +637,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -653,7 +653,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 (2 rows) @@ -669,7 +669,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 (2 rows) @@ -684,7 +684,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -701,7 +701,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -717,7 +717,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 avg --------------------- +--------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -730,7 +730,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 max | min | sum | cnt --------+------+-------+----- +--------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -743,7 +743,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 max -------- +--------------------------------------------------------------------- 11814 (1 row) @@ -757,7 +757,7 @@ ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -776,7 +776,7 @@ UNION DEBUG: Creating router plan DEBUG: Plan is router executable left ------- +--------------------------------------------------------------------- a (1 row) @@ -786,7 +786,7 @@ INTERSECT DEBUG: Creating router plan DEBUG: Plan is router executable left ------- +--------------------------------------------------------------------- a (1 row) @@ -799,7 +799,7 @@ ORDER BY 1; DEBUG: Creating router plan DEBUG: Plan is router executable left ------- +--------------------------------------------------------------------- al ar at @@ -816,7 +816,7 @@ DEBUG: generating subplan 110_1 for subquery SELECT id, author_id, title, word_ DEBUG: generating subplan 110_2 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2) DEBUG: Plan 110 query after replacing subqueries and CTEs: SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('110_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) UNION SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('110_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) ORDER BY 1, 2 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 11 | 1 | alamo | 1347 @@ -837,7 +837,7 @@ ORDER BY 1, 2 LIMIT 5; DEBUG: push down of limit count: 5 id | author_id | title | word_count -----+-----------+------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 11 | 1 | alamo | 1347 @@ -858,7 +858,7 @@ SELECT * WHERE author_id >= 1 AND author_id <= 3 ORDER BY 1,2,3,4; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 3 | 3 | asternal | 10480 @@ -888,7 +888,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -902,7 +902,7 @@ SELECT * WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -918,7 +918,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 (2 rows) @@ -931,7 +931,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- not router plannable due to function call on the right side @@ -940,7 +940,7 @@ SELECT * WHERE author_id = (random()::int * 0 + 1); DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -954,7 +954,7 @@ SELECT * WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -970,7 +970,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -984,7 +984,7 @@ SELECT * WHERE 1 = abs(author_id); DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -998,7 +998,7 @@ SELECT * WHERE author_id = abs(author_id - 2); DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1014,7 +1014,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -1024,7 +1024,7 @@ SELECT * WHERE (author_id = 1) is true; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1040,7 +1040,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1056,7 +1056,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) @@ -1069,7 +1069,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1082,7 +1082,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1095,7 +1095,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1111,7 +1111,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 41 | 1 | aznavour | 11814 @@ -1125,7 +1125,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 prev | title | word_count -----------+----------+------------ +--------------------------------------------------------------------- | afrasia | 864 afrasia | adversa | 3164 adversa | antehall | 7707 @@ -1141,7 +1141,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 prev | title | word_count -----------+----------+------------ +--------------------------------------------------------------------- aminate | aruru | 11389 antehall | aminate | 9089 adversa | antehall | 7707 @@ -1156,7 +1156,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | min -----+----- +--------------------------------------------------------------------- 11 | 11 21 | 11 31 | 11 @@ -1171,7 +1171,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | word_count | avg -----+------------+----------------------- +--------------------------------------------------------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 31 | 7271 | 4836.0000000000000000 @@ -1186,7 +1186,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 word_count | rank -------------+------ +--------------------------------------------------------------------- 1347 | 1 5890 | 2 7271 | 3 @@ -1225,7 +1225,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 c ---- +--------------------------------------------------------------------- 5 (1 row) @@ -1246,7 +1246,7 @@ SELECT ORDER BY c; DEBUG: Router planner cannot handle multi-shard select queries c ---- +--------------------------------------------------------------------- 4 5 5 @@ -1269,7 +1269,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1290,19 +1290,19 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 1 FETCH test_cursor; id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) FETCH test_cursor; id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 (1 row) FETCH BACKWARD test_cursor; id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -1338,7 +1338,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count | count --------+------- +--------------------------------------------------------------------- 5 | 1 (1 row) @@ -1348,7 +1348,7 @@ SELECT count(*), count(*) FILTER (WHERE id < 3) WHERE author_id = 1 or author_id = 2; DEBUG: Router planner cannot handle multi-shard select queries count | count --------+------- +--------------------------------------------------------------------- 10 | 2 (1 row) @@ -1362,7 +1362,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1380,7 +1380,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1410,7 +1410,7 @@ CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) @@ -1438,7 +1438,7 @@ CONTEXT: SQL statement "SELECT ah.id, ah.word_count WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1454,7 +1454,7 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 1 SELECT * FROM mv_articles_hash_mx; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1488,7 +1488,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 diff --git a/src/test/regress/expected/multi_mx_schema_support.out b/src/test/regress/expected/multi_mx_schema_support.out index afb5f687e..5a9427f1c 100644 --- a/src/test/regress/expected/multi_mx_schema_support.out +++ b/src/test/regress/expected/multi_mx_schema_support.out @@ -6,7 +6,7 @@ -- test very basic queries SELECT * FROM nation_hash ORDER BY n_nationkey LIMIT 4; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -15,7 +15,7 @@ SELECT * FROM nation_hash ORDER BY n_nationkey LIMIT 4; SELECT * FROM citus_mx_test_schema.nation_hash ORDER BY n_nationkey LIMIT 4; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -31,18 +31,18 @@ DECLARE test_cursor CURSOR FOR WHERE n_nationkey = 1; FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+--------+-------------+----------- +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -56,18 +56,18 @@ DECLARE test_cursor CURSOR FOR WHERE n_nationkey = 1; FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+--------+-------------+----------- +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -78,7 +78,7 @@ INSERT INTO citus_mx_test_schema.nation_hash(n_nationkey, n_name, n_regionkey) V -- verify insertion SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey = 100; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+----------- +--------------------------------------------------------------------- 100 | TURKEY | 3 | (1 row) @@ -88,7 +88,7 @@ INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (101, 'GERMANY' -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 101; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+----------- +--------------------------------------------------------------------- 101 | GERMANY | 3 | (1 row) @@ -98,7 +98,7 @@ SET search_path TO public; -- UDF in public, table in a schema other than public, search_path is not set SELECT simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; simpletestfunction --------------------- +--------------------------------------------------------------------- 152 151 37 @@ -110,7 +110,7 @@ SELECT simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_has SET search_path TO citus_mx_test_schema; SELECT public.simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; simpletestfunction --------------------- +--------------------------------------------------------------------- 152 151 37 @@ -122,7 +122,7 @@ SELECT public.simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nat SET search_path TO public; SELECT citus_mx_test_schema.simpleTestFunction2(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; simpletestfunction2 ---------------------- +--------------------------------------------------------------------- 152 151 37 @@ -134,7 +134,7 @@ SELECT citus_mx_test_schema.simpleTestFunction2(n_nationkey)::int FROM citus_mx_ SET search_path TO citus_mx_test_schema; SELECT simpleTestFunction2(n_nationkey)::int FROM nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; simpletestfunction2 ---------------------- +--------------------------------------------------------------------- 152 151 37 @@ -147,7 +147,7 @@ SET search_path TO public; -- test with search_path is not set SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey OPERATOR(citus_mx_test_schema.===) 1; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -155,13 +155,13 @@ SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey OPERATOR(citus SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) SELECT * FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY 1; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -172,7 +172,7 @@ SELECT * FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY 1; SELECT n_comment FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY n_comment COLLATE citus_mx_test_schema.english; n_comment -------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai @@ -184,7 +184,7 @@ SELECT n_comment FROM citus_mx_test_schema.nation_hash_collation_search_path ORD SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold @@ -195,7 +195,7 @@ SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC; SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; n_comment -------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai @@ -206,7 +206,7 @@ SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLA SELECT * FROM citus_mx_test_schema.nation_hash_composite_types WHERE test_col = '(a,a)'::citus_mx_test_schema.new_composite_type ORDER BY 1::int DESC; n_nationkey | n_name | n_regionkey | n_comment | test_col --------------+---------------------------+-------------+----------------------------------------------------+---------- +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) @@ -214,7 +214,7 @@ SELECT * FROM citus_mx_test_schema.nation_hash_composite_types WHERE test_col = SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type ORDER BY 1::int DESC; n_nationkey | n_name | n_regionkey | n_comment | test_col --------------+---------------------------+-------------+----------------------------------------------------+---------- +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) @@ -229,7 +229,7 @@ FROM WHERE n1.n_nationkey = n2.n_nationkey; count -------- +--------------------------------------------------------------------- 25 (1 row) @@ -244,7 +244,7 @@ FROM WHERE n1.n_nationkey = n2.n_nationkey; count -------- +--------------------------------------------------------------------- 25 (1 row) @@ -259,7 +259,7 @@ FROM WHERE n1.n_nationkey = n2.n_nationkey; count -------- +--------------------------------------------------------------------- 25 (1 row) @@ -274,7 +274,7 @@ FROM WHERE n1.n_nationkey = n2.n_nationkey; count -------- +--------------------------------------------------------------------- 25 (1 row) @@ -291,7 +291,7 @@ FROM WHERE n1.n_nationkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 25 (1 row) @@ -306,7 +306,7 @@ FROM WHERE n1.n_nationkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 25 (1 row) @@ -321,7 +321,7 @@ FROM WHERE n1.n_nationkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 25 (1 row) @@ -337,7 +337,7 @@ FROM WHERE n1.n_regionkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 125 (1 row) @@ -352,7 +352,7 @@ FROM WHERE n1.n_regionkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 125 (1 row) @@ -367,7 +367,7 @@ FROM WHERE n1.n_regionkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 125 (1 row) @@ -389,7 +389,7 @@ SET search_path TO mx_ddl_schema_1; CREATE TABLE table_1 (key int PRIMARY KEY, value text); SELECT create_distributed_table('table_1', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -400,7 +400,7 @@ SET search_path TO mx_ddl_schema_1, mx_ddl_schema_2; CREATE TABLE mx_ddl_schema_2.table_2 (key int PRIMARY KEY, value text); SELECT create_distributed_table('mx_ddl_schema_2.table_2', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -410,7 +410,7 @@ SET search_path TO "CiTuS.TeAeN"; CREATE TABLE "TeeNTabLE.1!?!"(id int, "TeNANt_Id" int); SELECT create_distributed_table('"TeeNTabLE.1!?!"', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query1.out b/src/test/regress/expected/multi_mx_tpch_query1.out index 423be6399..9247272ab 100644 --- a/src/test/regress/expected/multi_mx_tpch_query1.out +++ b/src/test/regress/expected/multi_mx_tpch_query1.out @@ -26,7 +26,7 @@ ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order ---------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- +--------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 @@ -58,7 +58,7 @@ ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order ---------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- +--------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 @@ -90,7 +90,7 @@ ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order ---------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- +--------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 diff --git a/src/test/regress/expected/multi_mx_tpch_query10.out b/src/test/regress/expected/multi_mx_tpch_query10.out index fdd243e15..40eb3f63c 100644 --- a/src/test/regress/expected/multi_mx_tpch_query10.out +++ b/src/test/regress/expected/multi_mx_tpch_query10.out @@ -37,7 +37,7 @@ ORDER BY revenue DESC LIMIT 20; c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment ------------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole @@ -95,7 +95,7 @@ ORDER BY revenue DESC LIMIT 20; c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment ------------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole @@ -153,7 +153,7 @@ ORDER BY revenue DESC LIMIT 20; c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment ------------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole diff --git a/src/test/regress/expected/multi_mx_tpch_query12.out b/src/test/regress/expected/multi_mx_tpch_query12.out index 013135b27..0c0a8c0e2 100644 --- a/src/test/regress/expected/multi_mx_tpch_query12.out +++ b/src/test/regress/expected/multi_mx_tpch_query12.out @@ -33,7 +33,7 @@ GROUP BY ORDER BY l_shipmode; l_shipmode | high_line_count | low_line_count -------------+-----------------+---------------- +--------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) @@ -70,7 +70,7 @@ GROUP BY ORDER BY l_shipmode; l_shipmode | high_line_count | low_line_count -------------+-----------------+---------------- +--------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) @@ -107,7 +107,7 @@ GROUP BY ORDER BY l_shipmode; l_shipmode | high_line_count | low_line_count -------------+-----------------+---------------- +--------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) diff --git a/src/test/regress/expected/multi_mx_tpch_query14.out b/src/test/regress/expected/multi_mx_tpch_query14.out index f613da09b..f39cd6296 100644 --- a/src/test/regress/expected/multi_mx_tpch_query14.out +++ b/src/test/regress/expected/multi_mx_tpch_query14.out @@ -18,7 +18,7 @@ WHERE AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; promo_revenue ---------------------- +--------------------------------------------------------------------- 32.1126387112005225 (1 row) @@ -39,7 +39,7 @@ WHERE AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; promo_revenue ---------------------- +--------------------------------------------------------------------- 32.1126387112005225 (1 row) @@ -60,7 +60,7 @@ WHERE AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; promo_revenue ---------------------- +--------------------------------------------------------------------- 32.1126387112005225 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query19.out b/src/test/regress/expected/multi_mx_tpch_query19.out index ee1295dee..789cd86af 100644 --- a/src/test/regress/expected/multi_mx_tpch_query19.out +++ b/src/test/regress/expected/multi_mx_tpch_query19.out @@ -35,7 +35,7 @@ WHERE AND l_shipinstruct = 'DELIVER IN PERSON' ); revenue -------------- +--------------------------------------------------------------------- 144747.0857 (1 row) @@ -73,7 +73,7 @@ WHERE AND l_shipinstruct = 'DELIVER IN PERSON' ); revenue -------------- +--------------------------------------------------------------------- 144747.0857 (1 row) @@ -111,7 +111,7 @@ WHERE AND l_shipinstruct = 'DELIVER IN PERSON' ); revenue -------------- +--------------------------------------------------------------------- 144747.0857 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query3.out b/src/test/regress/expected/multi_mx_tpch_query3.out index 45559e6b0..d7e3a78b8 100644 --- a/src/test/regress/expected/multi_mx_tpch_query3.out +++ b/src/test/regress/expected/multi_mx_tpch_query3.out @@ -27,7 +27,7 @@ ORDER BY revenue DESC, o_orderdate; l_orderkey | revenue | o_orderdate | o_shippriority -------------+-------------+-------------+---------------- +--------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 @@ -71,7 +71,7 @@ ORDER BY revenue DESC, o_orderdate; l_orderkey | revenue | o_orderdate | o_shippriority -------------+-------------+-------------+---------------- +--------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 @@ -115,7 +115,7 @@ ORDER BY revenue DESC, o_orderdate; l_orderkey | revenue | o_orderdate | o_shippriority -------------+-------------+-------------+---------------- +--------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 diff --git a/src/test/regress/expected/multi_mx_tpch_query6.out b/src/test/regress/expected/multi_mx_tpch_query6.out index 8bdc4c86e..9899ace97 100644 --- a/src/test/regress/expected/multi_mx_tpch_query6.out +++ b/src/test/regress/expected/multi_mx_tpch_query6.out @@ -14,7 +14,7 @@ WHERE and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; revenue -------------- +--------------------------------------------------------------------- 243277.7858 (1 row) @@ -31,7 +31,7 @@ WHERE and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; revenue -------------- +--------------------------------------------------------------------- 243277.7858 (1 row) @@ -48,7 +48,7 @@ WHERE and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; revenue -------------- +--------------------------------------------------------------------- 243277.7858 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query7.out b/src/test/regress/expected/multi_mx_tpch_query7.out index ac2e0b2e5..4bc98ca05 100644 --- a/src/test/regress/expected/multi_mx_tpch_query7.out +++ b/src/test/regress/expected/multi_mx_tpch_query7.out @@ -44,7 +44,7 @@ ORDER BY cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -91,7 +91,7 @@ ORDER BY cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -138,7 +138,7 @@ ORDER BY cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query7_nested.out b/src/test/regress/expected/multi_mx_tpch_query7_nested.out index 9271fb615..c8ebfce7b 100644 --- a/src/test/regress/expected/multi_mx_tpch_query7_nested.out +++ b/src/test/regress/expected/multi_mx_tpch_query7_nested.out @@ -53,7 +53,7 @@ ORDER BY cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -109,7 +109,7 @@ ORDER BY cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -165,7 +165,7 @@ ORDER BY cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_mx_transaction_recovery.out b/src/test/regress/expected/multi_mx_transaction_recovery.out index 1e49ad878..47b6aeaad 100644 --- a/src/test/regress/expected/multi_mx_transaction_recovery.out +++ b/src/test/regress/expected/multi_mx_transaction_recovery.out @@ -5,7 +5,7 @@ SET citus.replication_model TO streaming; CREATE TABLE test_recovery (x text); SELECT create_distributed_table('test_recovery', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -14,7 +14,7 @@ SELECT create_distributed_table('test_recovery', 'x'); ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -22,13 +22,13 @@ SET citus.multi_shard_commit_protocol TO '2pc'; -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -37,7 +37,7 @@ SELECT count(*) FROM pg_dist_transaction; -- accordingly. SELECT * FROM pg_dist_local_group; groupid ---------- +--------------------------------------------------------------------- 14 (1 row) @@ -61,33 +61,33 @@ INSERT INTO pg_dist_transaction VALUES (14, 'citus_14_should_be_forgotten'); INSERT INTO pg_dist_transaction VALUES (122, 'citus_122_should_do_nothing'); SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 3 (1 row) -- delete the citus_122_should_do_nothing transaction DELETE FROM pg_dist_transaction WHERE gid = 'citus_122_should_do_nothing' RETURNING *; groupid | gid ----------+----------------------------- +--------------------------------------------------------------------- 122 | citus_122_should_do_nothing (1 row) ROLLBACK PREPARED 'citus_122_should_do_nothing'; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_abort'; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_commit'; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -95,7 +95,7 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_commit'; INSERT INTO test_recovery VALUES ('hello'); SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -107,13 +107,13 @@ INSERT INTO test_recovery VALUES ('world'); COMMIT; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -127,13 +127,13 @@ INSERT INTO test_recovery VALUES ('world'); COMMIT; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -141,13 +141,13 @@ SELECT recover_prepared_transactions(); INSERT INTO test_recovery (x) SELECT 'hello-'||s FROM generate_series(1,100) s; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -155,7 +155,7 @@ SELECT recover_prepared_transactions(); COPY test_recovery (x) FROM STDIN CSV; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -163,27 +163,27 @@ SELECT count(*) FROM pg_dist_transaction; ALTER SYSTEM SET citus.recover_2pc_interval TO 10; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) -- Sleep 1 second to give Valgrind enough time to clear transactions SELECT pg_sleep(1); pg_sleep ----------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_mx_truncate_from_worker.out b/src/test/regress/expected/multi_mx_truncate_from_worker.out index 705968b22..00f96a56f 100644 --- a/src/test/regress/expected/multi_mx_truncate_from_worker.out +++ b/src/test/regress/expected/multi_mx_truncate_from_worker.out @@ -8,14 +8,14 @@ SET citus.replication_model TO streaming; CREATE TABLE "refer'ence_table"(id int PRIMARY KEY); SELECT create_reference_table('refer''ence_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE on_update_fkey_table(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('on_update_fkey_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -26,7 +26,7 @@ INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000 TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -37,13 +37,13 @@ TRUNCATE "refer'ence_table" CASCADE; NOTICE: truncate cascades to table "on_update_fkey_table" SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM "refer'ence_table"; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -56,7 +56,7 @@ BEGIN; TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -67,7 +67,7 @@ SET search_path TO 'truncate_from_workers'; TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -78,13 +78,13 @@ TRUNCATE "refer'ence_table" CASCADE; NOTICE: truncate cascades to table "on_update_fkey_table" SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM "refer'ence_table"; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -115,14 +115,14 @@ BEGIN; INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000) i; SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 1001 (1 row) TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -138,7 +138,7 @@ BEGIN; -- should fail since the schema is not provided SELECT lock_relation_if_exists('on_update_fkey_table', 'ACCESS SHARE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- f (1 row) @@ -148,7 +148,7 @@ BEGIN; SET search_path TO 'truncate_from_workers'; SELECT lock_relation_if_exists('on_update_fkey_table', 'ACCESS SHARE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) @@ -157,7 +157,7 @@ BEGIN; -- should return false since there is no such table SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_tableXXX', 'ACCESS SHARE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- f (1 row) @@ -171,62 +171,62 @@ BEGIN; -- test all lock levels SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ACCESS SHARE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ROW SHARE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ROW EXCLUSIVE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE UPDATE EXCLUSIVE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE ROW EXCLUSIVE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE ROW EXCLUSIVE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'EXCLUSIVE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ACCESS EXCLUSIVE'); lock_relation_if_exists -------------------------- +--------------------------------------------------------------------- t (1 row) -- see them all SELECT relation::regclass, mode FROM pg_locks WHERE pid = pg_backend_pid() AND relation = 'truncate_from_workers.on_update_fkey_table'::regclass ORDER BY 2 DESC; relation | mode ---------------------------------------------+-------------------------- +--------------------------------------------------------------------- truncate_from_workers.on_update_fkey_table | ShareUpdateExclusiveLock truncate_from_workers.on_update_fkey_table | ShareRowExclusiveLock truncate_from_workers.on_update_fkey_table | ShareLock diff --git a/src/test/regress/expected/multi_name_lengths.out b/src/test/regress/expected/multi_name_lengths.out index 6c422f5e1..e884550ab 100644 --- a/src/test/regress/expected/multi_name_lengths.out +++ b/src/test/regress/expected/multi_name_lengths.out @@ -10,13 +10,13 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col2 integer not null); SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2'); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -24,7 +24,7 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345 \dt too_long_* List of relations Schema | Name | Type | Owner ---------+-----------------------------------------------------------------+-------+---------- +--------------------------------------------------------------------- public | too_long_12345678901234567890123456789012345678_e0119164_225000 | table | postgres public | too_long_12345678901234567890123456789012345678_e0119164_225001 | table | postgres (2 rows) @@ -35,7 +35,7 @@ SET citus.shard_replication_factor TO 2; -- Verify that the UDF works and rejects bad arguments. SELECT shard_name(NULL, 666666); shard_name ------------- +--------------------------------------------------------------------- (1 row) @@ -43,13 +43,13 @@ SELECT shard_name(0, 666666); ERROR: object_name does not reference a valid relation SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, 666666); shard_name ------------------------------------------------------------------ +--------------------------------------------------------------------- too_long_12345678901234567890123456789012345678_e0119164_666666 (1 row) SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, NULL); shard_name ------------- +--------------------------------------------------------------------- (1 row) @@ -64,7 +64,7 @@ CREATE TABLE name_lengths ( ); SELECT create_distributed_table('name_lengths', 'col1', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -83,7 +83,7 @@ ERROR: cannot create constraint without a name on a distributed table \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC; Column | Type | Modifiers ---------------------------------------------------------------+------------------+----------- +--------------------------------------------------------------------- int_col_12345678901234567890123456789012345678901234567890 | integer | default 1 float_col_12345678901234567890123456789012345678901234567890 | double precision | date_col_12345678901234567890123456789012345678901234567890 | date | @@ -103,7 +103,7 @@ ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_123456789012345678901234567890 \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC; Constraint | Definition ------------------------------------------------------------------+------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- nl_checky_1234567890123456789012345678901234567_b16df46d_225002 | CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '01-01-2014'::date) (1 row) @@ -121,7 +121,7 @@ CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; relname | Column | Type | Definition ------------------------------------------------------------------+--------+---------+------------ +--------------------------------------------------------------------- tmp_idx_123456789012345678901234567890123456789_5e470afa_225003 | col2 | integer | col2 tmp_idx_123456789012345678901234567890123456789_5e470afa_225002 | col2 | integer | col2 (2 rows) @@ -135,7 +135,7 @@ NOTICE: identifier "tmp_idx_123456789012345678901234567890123456789012345678901 SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; relname | Column | Type | Definition ------------------------------------------------------------------+--------+---------+------------ +--------------------------------------------------------------------- tmp_idx_123456789012345678901234567890123456789_5e470afa_225003 | col2 | integer | col2 tmp_idx_123456789012345678901234567890123456789_5e470afa_225002 | col2 | integer | col2 tmp_idx_123456789012345678901234567890123456789_599636aa_225003 | col2 | integer | col2 @@ -155,7 +155,7 @@ CREATE TABLE sneaky_name_lengths ( ); SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -168,25 +168,25 @@ CREATE TABLE sneaky_name_lengths ( \di public.sneaky_name_lengths* List of relations Schema | Name | Type | Owner | Table ---------+-----------------------------------------------------------------+-------+----------+--------------------- +--------------------------------------------------------------------- public | sneaky_name_lengths_int_col_1234567890123456789012345678901_key | index | postgres | sneaky_name_lengths (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths'::regclass ORDER BY 1 DESC, 2 DESC; Constraint | Definition ------------------------------------------------------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100) (1 row) SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -194,13 +194,13 @@ SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); \di public.sneaky*225006 List of relations Schema | Name | Type | Owner | Table ---------+-----------------------------------------------------------------+-------+----------+---------------------------- +--------------------------------------------------------------------- public | sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225006 | index | postgres | sneaky_name_lengths_225006 (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass ORDER BY 1 DESC, 2 DESC; Constraint | Definition ------------------------------------------------------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100) (1 row) @@ -217,7 +217,7 @@ CREATE TABLE sneaky_name_lengths ( ); SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -225,7 +225,7 @@ SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); \di unique*225008 List of relations Schema | Name | Type | Owner | Table ---------+-----------------------------------------------------------------+-------+----------+---------------------------- +--------------------------------------------------------------------- public | unique_1234567890123456789012345678901234567890_a5986f27_225008 | index | postgres | sneaky_name_lengths_225008 (1 row) @@ -240,7 +240,7 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col2 integer not null); SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -248,7 +248,7 @@ SELECT create_distributed_table('too_long_12345678901234567890123456789012345678 \dt *225000000000* List of relations Schema | Name | Type | Owner ---------+-----------------------------------------------------------------+-------+---------- +--------------------------------------------------------------------- public | too_long_1234567890123456789012345678901_e0119164_2250000000000 | table | postgres public | too_long_1234567890123456789012345678901_e0119164_2250000000001 | table | postgres (2 rows) @@ -263,7 +263,7 @@ CREATE TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E col2 integer not null); SELECT create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -272,7 +272,7 @@ SELECT shard_name(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B FROM pg_dist_shard WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass; shard_name ---------------------------------------------------- +--------------------------------------------------------------------- "elephant_слонслонслонсло_c8b737c2_2250000000002" (1 row) @@ -280,7 +280,7 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0 \dt public.elephant_* List of relations Schema | Name | Type | Owner ---------+-------------------------------------------------+-------+---------- +--------------------------------------------------------------------- public | elephant_слонслонслонсло_c8b737c2_2250000000002 | table | postgres public | elephant_слонслонслонсло_c8b737c2_2250000000003 | table | postgres (2 rows) @@ -288,7 +288,7 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0 \di public.elephant_* List of relations Schema | Name | Type | Owner | Table ---------+-------------------------------------------------+-------+----------+------------------------------------------------- +--------------------------------------------------------------------- public | elephant_слонслонслонсло_14d34928_2250000000002 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000002 public | elephant_слонслонслонсло_14d34928_2250000000003 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000003 (2 rows) @@ -303,7 +303,7 @@ CREATE TABLE multi_name_lengths.too_long_123456789012345678901234567890123456789 col2 integer not null); SELECT create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -311,7 +311,7 @@ SELECT shard_name('multi_name_lengths.too_long_123456789012345678901234567890123 FROM pg_dist_shard WHERE logicalrelid = 'multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass; shard_name ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- multi_name_lengths.too_long_1234567890123456789012345678901_e0119164_2250000000004 (1 row) diff --git a/src/test/regress/expected/multi_name_resolution.out b/src/test/regress/expected/multi_name_resolution.out index f4ba9d226..71b91e08d 100644 --- a/src/test/regress/expected/multi_name_resolution.out +++ b/src/test/regress/expected/multi_name_resolution.out @@ -9,13 +9,13 @@ create table namenest1 (id integer primary key, user_id integer); create table namenest2 (id integer primary key, value_2 integer); select * from create_distributed_table('namenest1', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) select * from create_reference_table('namenest2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -32,7 +32,7 @@ FROM ( ) AS join_alias(id_deep) WHERE bar.id_deep = join_alias.id_deep; r ---- +--------------------------------------------------------------------- (0 rows) DROP SCHEMA multi_name_resolution CASCADE; diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning.out b/src/test/regress/expected/multi_null_minmax_value_pruning.out index 986422381..c114acebf 100644 --- a/src/test/regress/expected/multi_null_minmax_value_pruning.out +++ b/src/test/regress/expected/multi_null_minmax_value_pruning.out @@ -13,13 +13,13 @@ SET citus.log_multi_join_order to true; SET citus.enable_repartition_joins to ON; SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; shardminvalue | shardmaxvalue ----------------+--------------- +--------------------------------------------------------------------- 1 | 5986 (1 row) SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; shardminvalue | shardmaxvalue ----------------+--------------- +--------------------------------------------------------------------- 8997 | 14947 (1 row) @@ -34,7 +34,7 @@ CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE st LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement coordinator_plan ------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 2 (2 rows) @@ -47,7 +47,7 @@ LOG: join order: [ "lineitem" ][ local partition join "orders" ] DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] QUERY PLAN --------------------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 2 @@ -83,7 +83,7 @@ CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE st LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement coordinator_plan ------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 2 (2 rows) @@ -122,7 +122,7 @@ DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 QUERY PLAN -------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 @@ -148,7 +148,7 @@ CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE st LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement coordinator_plan ------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 2 (2 rows) @@ -187,7 +187,7 @@ DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 QUERY PLAN -------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 @@ -215,7 +215,7 @@ CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE st DEBUG: Plan is router executable CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement coordinator_plan ------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 (2 rows) @@ -254,7 +254,7 @@ DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 QUERY PLAN -------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 diff --git a/src/test/regress/expected/multi_orderby_limit_pushdown.out b/src/test/regress/expected/multi_orderby_limit_pushdown.out index db1a315a2..b79b799f8 100644 --- a/src/test/regress/expected/multi_orderby_limit_pushdown.out +++ b/src/test/regress/expected/multi_orderby_limit_pushdown.out @@ -10,7 +10,7 @@ GROUP BY user_id ORDER BY avg(value_1) DESC LIMIT 5; user_id | avg ----------+-------------------- +--------------------------------------------------------------------- 1 | 3.2857142857142857 4 | 2.7391304347826087 5 | 2.6538461538461538 @@ -24,7 +24,7 @@ GROUP BY user_id ORDER BY avg(value_1) DESC LIMIT 1; user_id | avg ----------+-------------------- +--------------------------------------------------------------------- 1 | 3.2857142857142857 (1 row) @@ -35,7 +35,7 @@ GROUP BY user_id ORDER BY avg(value_1) DESC LIMIT 1; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit (cost=0.00..0.00 rows=0 width=0) -> Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan.avg DESC @@ -58,7 +58,7 @@ GROUP BY user_id ORDER BY avg(value_1) + 1 DESC LIMIT 1; user_id | ?column? ----------+-------------------- +--------------------------------------------------------------------- 1 | 4.2857142857142857 (1 row) @@ -68,7 +68,7 @@ GROUP BY user_id ORDER BY avg(value_1) + 1 DESC LIMIT 1; user_id | avg ----------+-------------------- +--------------------------------------------------------------------- 1 | 3.2857142857142857 (1 row) @@ -78,7 +78,7 @@ GROUP BY user_id ORDER BY 2 DESC LIMIT 1; user_id | ?column? ----------+--------------------- +--------------------------------------------------------------------- 5 | 65.6538461538461538 (1 row) @@ -87,7 +87,7 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC; user_id | ?column? ----------+--------------------- +--------------------------------------------------------------------- 5 | 28.6538461538461538 4 | 25.7391304347826087 2 | 20.3333333333333333 @@ -102,7 +102,7 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC; QUERY PLAN ---------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan."?column?" DESC -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) @@ -121,7 +121,7 @@ GROUP BY user_id ORDER BY 2 DESC LIMIT 1; user_id | ?column? ----------+--------------------- +--------------------------------------------------------------------- 5 | 28.6538461538461538 (1 row) @@ -131,7 +131,7 @@ GROUP BY user_id ORDER BY 2 DESC LIMIT 1; user_id | ?column? ----------+---------- +--------------------------------------------------------------------- 5 | 132 (1 row) @@ -141,7 +141,7 @@ GROUP BY user_id ORDER BY sum(value_2) DESC LIMIT 1; user_id | ?column? ----------+---------- +--------------------------------------------------------------------- 5 | 132 (1 row) @@ -151,7 +151,7 @@ GROUP BY user_id ORDER BY 2 DESC, 1 DESC LIMIT 2; user_id | ?column? ----------+---------- +--------------------------------------------------------------------- 2 | 25 6 | 20 (2 rows) @@ -162,7 +162,7 @@ GROUP BY user_id ORDER BY 2 DESC, 1 LIMIT 2; user_id | ?column? ----------+---------- +--------------------------------------------------------------------- 2 | 100 3 | 100 (2 rows) @@ -173,7 +173,7 @@ GROUP BY user_id ORDER BY 2 DESC LIMIT 2; user_id | sum ----------+----- +--------------------------------------------------------------------- 5 | 132 4 | 113 (2 rows) @@ -184,7 +184,7 @@ GROUP BY user_id ORDER BY 2 DESC LIMIT 2; user_id | ?column? ----------+---------- +--------------------------------------------------------------------- 6 | 238 1 | 232 (2 rows) @@ -195,7 +195,7 @@ GROUP BY user_id ORDER BY (10000 / (sum(value_1 + value_2))) DESC LIMIT 2; user_id | sum ----------+----- +--------------------------------------------------------------------- 6 | 42 1 | 43 (2 rows) @@ -206,7 +206,7 @@ GROUP BY user_id ORDER BY (10000 / (sum(value_1 + value_2))) DESC LIMIT 2; user_id ---------- +--------------------------------------------------------------------- 6 1 (2 rows) @@ -218,7 +218,7 @@ GROUP BY user_id ORDER BY (10000 / (sum(value_1 + value_2))) DESC LIMIT 2; QUERY PLAN ---------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.worker_column_2 DESC @@ -240,7 +240,7 @@ FROM users_table ORDER BY 1 DESC LIMIT 2; ?column? ----------- +--------------------------------------------------------------------- 19 (1 row) @@ -250,7 +250,7 @@ GROUP BY user_id ORDER BY user_id * avg(value_1) DESC LIMIT 2; user_id | avg ----------+-------------------- +--------------------------------------------------------------------- 5 | 2.6538461538461538 6 | 2.1000000000000000 (2 rows) @@ -261,7 +261,7 @@ GROUP BY user_id ORDER BY user_id * avg(value_1 + value_2) DESC LIMIT 2; user_id | avg ----------+-------------------- +--------------------------------------------------------------------- 5 | 2.6538461538461538 6 | 2.1000000000000000 (2 rows) @@ -272,7 +272,7 @@ GROUP BY user_id ORDER BY sum(value_1) DESC LIMIT 2; user_id ---------- +--------------------------------------------------------------------- 5 4 (2 rows) @@ -284,7 +284,7 @@ GROUP BY user_id ORDER BY sum(value_1) DESC LIMIT 2; QUERY PLAN ---------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.worker_column_2 DESC @@ -308,7 +308,7 @@ GROUP BY ut.user_id ORDER BY MAX(et.time), AVG(ut.value_1) LIMIT 5; user_id | avg ----------+-------------------- +--------------------------------------------------------------------- 6 | 2.1000000000000000 2 | 2.7777777777777778 5 | 2.4230769230769231 @@ -324,7 +324,7 @@ GROUP BY ut.user_id ORDER BY MAX(et.time), AVG(ut.value_1) LIMIT 5; QUERY PLAN -------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.worker_column_3, remote_scan.worker_column_4 @@ -353,7 +353,7 @@ GROUP BY ut.user_id ORDER BY avg(ut.value_2) DESC, AVG(et.value_2) LIMIT 5; user_id | avg ----------+-------------------- +--------------------------------------------------------------------- 3 | 1.8947368421052632 1 | 2.4615384615384615 2 | 2.0000000000000000 @@ -368,7 +368,7 @@ GROUP BY ut.user_id ORDER BY 2, AVG(ut.value_1), 1 DESC LIMIT 2; user_id | count ----------+------- +--------------------------------------------------------------------- 1 | 4 6 | 5 (2 rows) @@ -381,7 +381,7 @@ GROUP BY ut.user_id ORDER BY 2, AVG(ut.value_1), 1 DESC LIMIT 5; QUERY PLAN ------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.count, remote_scan.worker_column_3, remote_scan.user_id DESC diff --git a/src/test/regress/expected/multi_partition_pruning.out b/src/test/regress/expected/multi_partition_pruning.out index f56698c7f..6b0e9d3d8 100644 --- a/src/test/regress/expected/multi_partition_pruning.out +++ b/src/test/regress/expected/multi_partition_pruning.out @@ -7,7 +7,7 @@ SET citus.next_shard_id TO 770000; -- Adding additional l_orderkey = 1 to make this query not router executable SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1 ORDER BY 1,2; l_orderkey | l_linenumber | l_shipdate -------------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | 03-13-1996 1 | 2 | 04-12-1996 1 | 3 | 01-29-1996 @@ -29,21 +29,21 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903 -- non-deterministic debug messages. To avoid this chain, we use l_linenumber. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; sum | avg --------+-------------------- +--------------------------------------------------------------------- 17999 | 3.0189533713518953 (1 row) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE (l_orderkey < 4000 OR l_orderkey > 9030); sum | avg --------+-------------------- +--------------------------------------------------------------------- 30184 | 3.0159872102318145 (1 row) -- The following query should prune out all shards and return empty results SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 20000; sum | avg ------+----- +--------------------------------------------------------------------- | (1 row) @@ -59,7 +59,7 @@ CREATE TABLE varchar_partitioned_table ); SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -87,7 +87,7 @@ CREATE TABLE array_partitioned_table ); SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -123,7 +123,7 @@ CREATE TABLE composite_partitioned_table ); SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -151,7 +151,7 @@ SET client_min_messages TO ERROR; EXPLAIN (COSTS OFF) SELECT count(*) FROM varchar_partitioned_table WHERE varchar_column = 'BA2'; QUERY PLAN -------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 1 @@ -164,7 +164,7 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM array_partitioned_table WHERE array_column > '{BA1000U2AMO4ZGX, BZZXSP27F21T6}'; QUERY PLAN -------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 1 @@ -177,7 +177,7 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM composite_partitioned_table WHERE composite_column < '(b,5,c)'::composite_type; QUERY PLAN -------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 1 diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index 152870c4b..98c0dae41 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -30,7 +30,7 @@ SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -38,14 +38,14 @@ SELECT create_distributed_table('partitioning_hash_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -54,7 +54,7 @@ SELECT * FROM partitioning_test ORDER BY 1; SELECT * FROM partitioning_hash_test ORDER BY 1; id | subid -----+------- +--------------------------------------------------------------------- 1 | 2 2 | 13 3 | 7 @@ -70,7 +70,7 @@ WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; logicalrelid ------------------------- +--------------------------------------------------------------------- partitioning_test partitioning_test_2009 partitioning_test_2010 @@ -85,7 +85,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | count -------------------------+------- +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2009 | 4 partitioning_test_2010 | 4 @@ -99,7 +99,7 @@ WHERE logicalrelid IN ('partitioning_hash_test', 'partitioning_hash_test_0', 'partitioning_hash_test_1') ORDER BY 1; logicalrelid --------------------------- +--------------------------------------------------------------------- partitioning_hash_test partitioning_hash_test_0 partitioning_hash_test_1 @@ -114,7 +114,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | count ---------------------------+------- +--------------------------------------------------------------------- partitioning_hash_test | 4 partitioning_hash_test_0 | 4 partitioning_hash_test_1 | 4 @@ -131,7 +131,7 @@ WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') ORDER BY 1; logicalrelid ------------------------- +--------------------------------------------------------------------- partitioning_test partitioning_test_2011 (2 rows) @@ -145,7 +145,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | count -------------------------+------- +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2011 | 4 (2 rows) @@ -166,7 +166,7 @@ WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') ORDER BY 1; logicalrelid ------------------------- +--------------------------------------------------------------------- partitioning_test partitioning_test_2012 (2 rows) @@ -180,7 +180,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | count -------------------------+------- +--------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2012 | 4 (2 rows) @@ -203,7 +203,7 @@ INSERT INTO partitioning_hash_test VALUES (9, 12); -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -214,7 +214,7 @@ SELECT * FROM partitioning_test ORDER BY 1; SELECT * FROM partitioning_hash_test ORDER BY 1; id | subid -----+------- +--------------------------------------------------------------------- 1 | 2 2 | 13 3 | 7 @@ -227,7 +227,7 @@ SELECT * FROM partitioning_hash_test ORDER BY 1; CREATE TABLE partitioning_test_2013(id int, time date); SELECT create_distributed_table('partitioning_test_2013', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -238,7 +238,7 @@ ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 @@ -270,7 +270,7 @@ DROP TABLE partitioning_test_failure_2009; CREATE TABLE partitioning_test_failure_2009(id int, time date); SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -287,7 +287,7 @@ DETAIL: Relation "partitioning_test_failure_2009" is partitioned table itself a DROP TABLE partitioning_test_failure_2009; SELECT create_distributed_table('partitioning_test_failure', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -305,7 +305,7 @@ COPY partitioning_test_2009 FROM STDIN WITH CSV; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 9 ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 9 | 01-01-2009 10 | 01-01-2010 11 | 01-01-2011 @@ -326,7 +326,7 @@ INSERT INTO partitioning_test VALUES(20, '2010-02-02'); -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 15 ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 15 | 02-01-2009 16 | 02-01-2010 17 | 02-01-2011 @@ -343,7 +343,7 @@ INSERT INTO partitioning_test_2012 SELECT * FROM partitioning_test WHERE time >= -- see the data is loaded to shards (rows in the given range should be duplicated) SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2013-01-01' ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 5 | 06-06-2012 5 | 06-06-2012 6 | 07-07-2012 @@ -366,7 +366,7 @@ UPDATE partitioning_test_2013 SET time = '2013-08-08' WHERE id = 8; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 7 | 07-07-2013 8 | 08-08-2013 (2 rows) @@ -393,7 +393,7 @@ WHERE -- see the data is updated SELECT * FROM partitioning_test WHERE id = 1 OR id = 2 ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 1 | 06-07-2009 2 | 07-07-2010 (2 rows) @@ -406,7 +406,7 @@ DELETE FROM partitioning_test_2010 WHERE id = 10; -- see the data is deleted SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1; id | time -----+------ +--------------------------------------------------------------------- (0 rows) -- create default partition @@ -414,7 +414,7 @@ CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT; \d+ partitioning_test Partitioned table "public.partitioning_test" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- +--------------------------------------------------------------------- id | integer | | | | plain | | time | date | | | | plain | | Partition key: RANGE ("time") @@ -430,14 +430,14 @@ INSERT INTO partitioning_test VALUES(22, '2015-04-02'); -- see they are inserted into default partition SELECT * FROM partitioning_test WHERE id > 20 ORDER BY 1, 2; id | time -----+------------ +--------------------------------------------------------------------- 21 | 02-02-2014 22 | 04-02-2015 (2 rows) SELECT * FROM partitioning_test_default ORDER BY 1, 2; id | time -----+------------ +--------------------------------------------------------------------- 21 | 02-02-2014 22 | 04-02-2015 (2 rows) @@ -456,14 +456,14 @@ END; -- see data is in the table, but some moved out from default partition SELECT * FROM partitioning_test WHERE id > 20 ORDER BY 1, 2; id | time -----+------------ +--------------------------------------------------------------------- 21 | 02-02-2014 22 | 04-02-2015 (2 rows) SELECT * FROM partitioning_test_default ORDER BY 1, 2; id | time -----+------------ +--------------------------------------------------------------------- 22 | 04-02-2015 (1 row) @@ -472,7 +472,7 @@ UPDATE partitioning_test SET time = time + INTERVAL '1 day'; -- see rows are UPDATED SELECT * FROM partitioning_test ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 1 | 06-08-2009 2 | 07-08-2010 3 | 09-10-2009 @@ -506,7 +506,7 @@ UPDATE partitioning_test_2009 SET time = time + INTERVAL '1 day'; -- see rows are UPDATED SELECT * FROM partitioning_test_2009 ORDER BY 1; id | time -----+------------ +--------------------------------------------------------------------- 1 | 06-09-2009 3 | 09-11-2009 13 | 01-04-2009 @@ -534,7 +534,7 @@ CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id); -- see index is created SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test_%' ORDER BY indexname; tablename | indexname ----------------------------+---------------------------------- +--------------------------------------------------------------------- partitioning_test_2010 | partitioned_2010_index partitioning_test_2009 | partitioning_2009_index partitioning_test_2009 | partitioning_test_2009_id_idx @@ -565,7 +565,7 @@ CREATE INDEX non_distributed_partitioned_table_index ON non_distributed_partitio -- see index is created SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed_partitioned_table_%' ORDER BY indexname; tablename | indexname --------------------------------------+------------------------------------------- +--------------------------------------------------------------------- non_distributed_partitioned_table_1 | non_distributed_partitioned_table_1_a_idx (1 row) @@ -573,7 +573,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distribute DROP INDEX non_distributed_partitioned_table_index; SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed%' ORDER BY indexname; tablename | indexname ------------+----------- +--------------------------------------------------------------------- (0 rows) -- test add COLUMN @@ -585,7 +585,7 @@ ERROR: cannot add column to a partition -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; name | type -------------+--------- +--------------------------------------------------------------------- id | integer new_column | integer time | date @@ -593,7 +593,7 @@ SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass O SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test_2010'::regclass ORDER BY 1; name | type -------------+--------- +--------------------------------------------------------------------- id | integer new_column | integer time | date @@ -617,7 +617,7 @@ WHERE table_name = 'partitioning_test_2009' AND constraint_name = 'partitioning_2009_primary'; table_name | constraint_name | constraint_type -------------------------+---------------------------+----------------- +--------------------------------------------------------------------- partitioning_test_2009 | partitioning_2009_primary | PRIMARY KEY (1 row) @@ -635,7 +635,7 @@ WHERE constraint_type = 'PRIMARY KEY' ORDER BY 1; table_name | constraint_name | constraint_type ---------------------------+-------------------------------+----------------- +--------------------------------------------------------------------- partitioning_hash_test | partitioning_hash_primary | PRIMARY KEY partitioning_hash_test_0 | partitioning_hash_test_0_pkey | PRIMARY KEY partitioning_hash_test_1 | partitioning_hash_test_1_pkey | PRIMARY KEY @@ -655,7 +655,7 @@ ALTER TABLE partitioning_test_2012 ADD CONSTRAINT partitioning_2012_foreign FORE -- see FOREIGN KEY is created SELECT "Constraint" FROM table_fkeys WHERE relid = 'partitioning_test_2012'::regclass ORDER BY 1; Constraint ---------------------------- +--------------------------------------------------------------------- partitioning_2012_foreign (1 row) @@ -664,12 +664,12 @@ DELETE FROM partitioning_test_2009 WHERE id = 5; -- see that element is deleted from both partitions SELECT * FROM partitioning_test_2009 WHERE id = 5 ORDER BY 1; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM partitioning_test_2012 WHERE id = 5 ORDER BY 1; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) -- test DETACH partition @@ -677,14 +677,14 @@ ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009; -- see DETACHed partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2009-01-01' AND time < '2010-01-01' ORDER BY 1; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) -- delete from default partition DELETE FROM partitioning_test WHERE time >= '2015-01-01'; SELECT * FROM partitioning_test_default; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) -- create a reference table for foreign key test @@ -693,7 +693,7 @@ INSERT INTO partitioning_test_reference SELECT a, a FROM generate_series(1, 50) SELECT create_reference_table('partitioning_test_reference'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -701,7 +701,7 @@ ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_reference_fkey FOREIGN CREATE TABLE partitioning_test_foreign_key(id int PRIMARY KEY, value int); SELECT create_distributed_table('partitioning_test_foreign_key', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -716,7 +716,7 @@ WHERE ORDER BY 1,2; table_name | constraint_name | constraint_type ---------------------------+--------------------------------+----------------- +--------------------------------------------------------------------- partitioning_hash_test | partitioning_reference_fk_test | FOREIGN KEY partitioning_hash_test_0 | partitioning_reference_fk_test | FOREIGN KEY partitioning_hash_test_1 | partitioning_reference_fk_test | FOREIGN KEY @@ -745,7 +745,7 @@ SELECT right(table_name, 7)::int as shardid, * FROM ( $$) ) w ORDER BY 1, 2, 3, 4; shardid | table_name | constraint_name | constraint_type ----------+----------------------------------+----------------------------------------+----------------- +--------------------------------------------------------------------- 1660012 | partitioning_hash_test_1660012 | partitioning_reference_fk_test_1660012 | FOREIGN KEY 1660013 | partitioning_hash_test_1660013 | partitioning_reference_fk_test_1660013 | FOREIGN KEY 1660014 | partitioning_hash_test_1660014 | partitioning_reference_fk_test_1660014 | FOREIGN KEY @@ -770,7 +770,7 @@ DROP TYPE foreign_key_details; SET citus.shard_replication_factor TO 1; SELECT * FROM partitioning_test WHERE id = 11 or id = 12; id | time | new_column -----+------------+------------ +--------------------------------------------------------------------- 11 | 01-02-2011 | 11 | 01-02-2011 | 12 | 01-02-2012 | @@ -780,7 +780,7 @@ SELECT * FROM partitioning_test WHERE id = 11 or id = 12; DELETE FROM partitioning_test_reference WHERE id = 11 or id = 12; SELECT * FROM partitioning_hash_test ORDER BY 1, 2; id | subid -----+------- +--------------------------------------------------------------------- 1 | 2 2 | 13 3 | 7 @@ -793,12 +793,12 @@ DELETE FROM partitioning_test_foreign_key WHERE id = 2 OR id = 9; -- see data is deleted from referencing table SELECT * FROM partitioning_test WHERE id = 11 or id = 12; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM partitioning_hash_test ORDER BY 1, 2; id | subid -----+------- +--------------------------------------------------------------------- 1 | 2 3 | 7 4 | 4 @@ -814,7 +814,7 @@ ALTER TABLE partitioning_test ADD newer_column int; -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; name | type ---------------+--------- +--------------------------------------------------------------------- id | integer new_column | integer newer_column | integer @@ -825,7 +825,7 @@ ROLLBACK; -- see rollback is successful SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; name | type -------------+--------- +--------------------------------------------------------------------- id | integer new_column | integer time | date @@ -837,19 +837,19 @@ COPY partitioning_test FROM STDIN WITH CSV; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 22 ORDER BY 1; id | time | new_column -----+------------+------------ +--------------------------------------------------------------------- 22 | 01-01-2010 | 22 (1 row) SELECT * FROM partitioning_test WHERE id = 23 ORDER BY 1; id | time | new_column -----+------------+------------ +--------------------------------------------------------------------- 23 | 01-01-2011 | 23 (1 row) SELECT * FROM partitioning_test WHERE id = 24 ORDER BY 1; id | time | new_column -----+------------+------------ +--------------------------------------------------------------------- 24 | 01-01-2013 | 24 (1 row) @@ -857,7 +857,7 @@ ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id >= 22 ORDER BY 1; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) -- DML in transaction @@ -867,7 +867,7 @@ INSERT INTO partitioning_test VALUES(25, '2010-02-02'); -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; id | time | new_column -----+------------+------------ +--------------------------------------------------------------------- 25 | 02-02-2010 | (1 row) @@ -876,7 +876,7 @@ INSERT INTO partitioning_test SELECT * FROM partitioning_test WHERE id = 25; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; id | time | new_column -----+------------+------------ +--------------------------------------------------------------------- 25 | 02-02-2010 | 25 | 02-02-2010 | (2 rows) @@ -886,7 +886,7 @@ UPDATE partitioning_test SET time = '2010-10-10' WHERE id = 25; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; id | time | new_column -----+------------+------------ +--------------------------------------------------------------------- 25 | 10-10-2010 | 25 | 10-10-2010 | (2 rows) @@ -899,7 +899,7 @@ COPY partitioning_test_2010 FROM STDIN WITH CSV; -- see the data is loaded to shards (we should see 4 rows with same content) SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; id | time | new_column -----+------------+------------ +--------------------------------------------------------------------- 26 | 02-02-2010 | 26 26 | 02-02-2010 | 26 26 | 02-02-2010 | 26 @@ -910,7 +910,7 @@ ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) -- DETACH and DROP in a transaction @@ -921,7 +921,7 @@ COMMIT; -- see DROPed partitions content is not accessible SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2012-01-01' ORDER BY 1; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) -- @@ -933,7 +933,7 @@ TRUNCATE partitioning_test_2012; -- see partition is TRUNCATEd SELECT * FROM partitioning_test_2012 ORDER BY 1; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) -- test TRUNCATE partitioned table @@ -941,7 +941,7 @@ TRUNCATE partitioning_test; -- see partitioned table is TRUNCATEd SELECT * FROM partitioning_test ORDER BY 1; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) -- test DROP @@ -951,7 +951,7 @@ DROP TABLE partitioning_test_2010; -- see DROPped partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2010-01-01' AND time < '2011-01-01' ORDER BY 1; id | time | new_column -----+------+------------ +--------------------------------------------------------------------- (0 rows) -- test DROP partitioned table @@ -960,7 +960,7 @@ DROP TABLE partitioning_test_reference; -- dropping the parent should CASCADE to the children as well SELECT table_name FROM information_schema.tables WHERE table_name LIKE 'partitioning_test%' ORDER BY 1; table_name -------------------------------- +--------------------------------------------------------------------- partitioning_test_2009 partitioning_test_failure partitioning_test_foreign_key @@ -971,13 +971,13 @@ CREATE TABLE partitioned_users_table (user_id int, time timestamp, value_1 int, CREATE TABLE partitioned_events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioned_users_table', 'user_id', colocate_with => 'users_table'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('partitioned_events_table', 'user_id', colocate_with => 'events_table'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1032,7 +1032,7 @@ FROM GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 43 1 | 44 2 | 8 @@ -1111,7 +1111,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 367 2 | 360 3 | 57 @@ -1125,7 +1125,7 @@ CREATE TABLE list_partitioned_events_table_2014_01_11_15 PARTITION OF list_parti -- test distributing partitioned table colocated with another partitioned table SELECT create_distributed_table('list_partitioned_events_table', 'user_id', colocate_with => 'partitioned_events_table'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1182,7 +1182,7 @@ count(*) AS cnt, "generated_group_field" cnt DESC, generated_group_field ASC LIMIT 10; cnt | generated_group_field -------+----------------------- +--------------------------------------------------------------------- 1851 | 1 1077 | 4 963 | 2 @@ -1199,7 +1199,7 @@ CREATE TABLE multi_column_partitioning(c1 int, c2 int) PARTITION BY RANGE (c1, c CREATE TABLE multi_column_partitioning_0_0_10_0 PARTITION OF multi_column_partitioning FOR VALUES FROM (0, 0) TO (10, 0); SELECT create_distributed_table('multi_column_partitioning', 'c1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1224,7 +1224,7 @@ CONTEXT: while executing command on localhost:xxxxx -- see data is loaded to multi-column partitioned table SELECT * FROM multi_column_partitioning ORDER BY 1, 2; c1 | c2 -----+----- +--------------------------------------------------------------------- 1 | 1 5 | -5 11 | -11 @@ -1241,7 +1241,7 @@ CREATE TABLE partitioning_locks_2010 PARTITION OF partitioning_locks FOR VALUES -- distribute partitioned table SELECT create_distributed_table('partitioning_locks', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1249,12 +1249,12 @@ SELECT create_distributed_table('partitioning_locks', 'id'); BEGIN; SELECT * FROM partitioning_locks WHERE id = 1 ORDER BY 1, 2; id | ref_id | time -----+--------+------ +--------------------------------------------------------------------- (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------+----------+----------------- +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2010 | relation | AccessShareLock @@ -1265,12 +1265,12 @@ COMMIT; BEGIN; SELECT * FROM partitioning_locks ORDER BY 1, 2; id | ref_id | time -----+--------+------ +--------------------------------------------------------------------- (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------+----------+----------------- +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2010 | relation | AccessShareLock @@ -1282,12 +1282,12 @@ SET citus.task_executor_type TO 'task-tracker'; BEGIN; SELECT * FROM partitioning_locks AS pl1 JOIN partitioning_locks AS pl2 ON pl1.id = pl2.ref_id ORDER BY 1, 2; id | ref_id | time | id | ref_id | time -----+--------+------+----+--------+------ +--------------------------------------------------------------------- (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------+----------+----------------- +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2010 | relation | AccessShareLock @@ -1300,7 +1300,7 @@ BEGIN; INSERT INTO partitioning_locks VALUES(1, 1, '2009-01-01'); SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------+----------+------------------ +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock @@ -1315,7 +1315,7 @@ BEGIN; UPDATE partitioning_locks SET time = '2009-02-01' WHERE id = 1; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------+----------+------------------ +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock @@ -1330,7 +1330,7 @@ BEGIN; DELETE FROM partitioning_locks WHERE id = 1; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------+----------+------------------ +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock @@ -1344,7 +1344,7 @@ COMMIT; CREATE TABLE partitioning_locks_for_select(id int, ref_id int, time date); SELECT create_distributed_table('partitioning_locks_for_select', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1352,7 +1352,7 @@ BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------------+----------+------------------ +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock @@ -1368,7 +1368,7 @@ BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select LIMIT 5; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------------+----------+------------------ +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | RowExclusiveLock @@ -1382,7 +1382,7 @@ BEGIN; UPDATE partitioning_locks SET time = '2009-03-01'; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------+----------+------------------ +--------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock @@ -1397,7 +1397,7 @@ BEGIN; ALTER TABLE partitioning_locks ADD COLUMN new_column int; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------+----------+--------------------- +--------------------------------------------------------------------- partitioning_locks | relation | AccessExclusiveLock partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessExclusiveLock @@ -1412,7 +1412,7 @@ BEGIN; TRUNCATE partitioning_locks; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode --------------------------+----------+--------------------- +--------------------------------------------------------------------- partitioning_locks | relation | AccessExclusiveLock partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessExclusiveLock @@ -1442,7 +1442,7 @@ WHERE ORDER BY 1, 2, 3; logicalrelid | locktype | mode --------------------------+----------+-------------------------- +--------------------------------------------------------------------- partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock @@ -1476,7 +1476,7 @@ WHERE ORDER BY 1, 2, 3; logicalrelid | locktype | mode --------------------------+----------+----------- +--------------------------------------------------------------------- partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock @@ -1502,7 +1502,7 @@ WHERE ORDER BY 1, 2, 3; logicalrelid | locktype | mode --------------------------+----------+-------------------------- +--------------------------------------------------------------------- partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock @@ -1525,48 +1525,48 @@ CREATE TABLE partitioning_hash_join_test_1 PARTITION OF partitioning_hash_join_t CREATE TABLE partitioning_hash_join_test_2 PARTITION OF partitioning_hash_join_test FOR VALUES WITH (MODULUS 3, REMAINDER 2); SELECT create_distributed_table('partitioning_hash_join_test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT success FROM run_command_on_workers('alter system set enable_mergejoin to off'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_nestloop to off'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_indexscan to off'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_indexonlyscan to off'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_partitionwise_join to off'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('select pg_reload_conf()'); success ---------- +--------------------------------------------------------------------- t t (2 rows) @@ -1574,7 +1574,7 @@ SELECT success FROM run_command_on_workers('select pg_reload_conf()'); EXPLAIN (COSTS OFF) SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, subid); QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 @@ -1598,21 +1598,21 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, -- set partition-wise join on and parallel to off SELECT success FROM run_command_on_workers('alter system set max_parallel_workers_per_gather = 0'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_partitionwise_join to on'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('select pg_reload_conf()'); success ---------- +--------------------------------------------------------------------- t t (2 rows) @@ -1622,7 +1622,7 @@ ANALYZE partitioning_hash_test, partitioning_hash_join_test; EXPLAIN (COSTS OFF) SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, subid); QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 @@ -1652,7 +1652,7 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, EXPLAIN (COSTS OFF) SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id); QUERY PLAN ---------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 @@ -1674,49 +1674,49 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id) -- reset partition-wise join SELECT success FROM run_command_on_workers('alter system reset enable_partitionwise_join'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_mergejoin'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_nestloop'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_indexscan'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_indexonlyscan'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset max_parallel_workers_per_gather'); success ---------- +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('select pg_reload_conf()'); success ---------- +--------------------------------------------------------------------- t t (2 rows) @@ -1737,7 +1737,7 @@ CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT create_distributed_table('partitioning_test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1747,7 +1747,7 @@ CREATE SCHEMA partitioning_schema; CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_schema."schema-test"', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1762,7 +1762,7 @@ WHERE logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) ORDER BY 1; logicalrelid ----------------------------------------- +--------------------------------------------------------------------- partitioning_schema."schema-test" partitioning_schema."schema-test_2009" (2 rows) @@ -1778,7 +1778,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | count -----------------------------------------+------- +--------------------------------------------------------------------- partitioning_schema."schema-test" | 4 partitioning_schema."schema-test_2009" | 4 (2 rows) @@ -1788,7 +1788,7 @@ DROP TABLE partitioning_schema."schema-test"; CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_schema."schema-test"', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1802,7 +1802,7 @@ WHERE logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) ORDER BY 1; logicalrelid ----------------------------------------- +--------------------------------------------------------------------- partitioning_schema."schema-test" partitioning_schema."schema-test_2009" (2 rows) @@ -1818,7 +1818,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | count -----------------------------------------+------- +--------------------------------------------------------------------- partitioning_schema."schema-test" | 4 partitioning_schema."schema-test_2009" | 4 (2 rows) @@ -1829,7 +1829,7 @@ CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY R SET search_path = partitioning_schema; SELECT create_distributed_table('"schema-test"', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1843,7 +1843,7 @@ WHERE logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) ORDER BY 1; logicalrelid --------------------- +--------------------------------------------------------------------- "schema-test" "schema-test_2009" (2 rows) @@ -1859,7 +1859,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | count ---------------------+------- +--------------------------------------------------------------------- "schema-test" | 4 "schema-test_2009" | 4 (2 rows) @@ -1869,14 +1869,14 @@ ORDER BY CREATE TABLE reference_table(id int PRIMARY KEY); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE reference_table_2(id int PRIMARY KEY); SELECT create_reference_table('reference_table_2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1888,25 +1888,25 @@ CREATE TABLE partitioning_test_2011 (LIKE partitioning_test); -- distributing partitioning_test will also distribute partitioning_test_2008 SELECT create_distributed_table('partitioning_test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('partitioning_test_2009', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('partitioning_test_2010', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('partitioning_test_2011', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_partitioning_utils.out b/src/test/regress/expected/multi_partitioning_utils.out index 9ef8f9016..7588140e8 100644 --- a/src/test/regress/expected/multi_partitioning_utils.out +++ b/src/test/regress/expected/multi_partitioning_utils.out @@ -75,21 +75,21 @@ CREATE TABLE date_partitioned_table(id int, time date) PARTITION BY RANGE (time) -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('date_partitioned_table'); generate_partition_information --------------------------------- +--------------------------------------------------------------------- RANGE ("time") (1 row) -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('date_partitioned_table'); drop_and_recreate_partitioned_table -------------------------------------- +--------------------------------------------------------------------- (1 row) -- we should also be able to see the PARTITION BY ... for the parent table SELECT master_get_table_ddl_events('date_partitioned_table'); master_get_table_ddl_events ---------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.date_partitioned_table (id integer, "time" date) PARTITION BY RANGE ("time") ALTER TABLE public.date_partitioned_table OWNER TO postgres (2 rows) @@ -100,20 +100,20 @@ CREATE TABLE date_partition_2007 PARTITION OF date_partitioned_table FOR VALUES -- we should be able to get the partitioning information after the partitions are created SELECT generate_partition_information('date_partitioned_table'); generate_partition_information --------------------------------- +--------------------------------------------------------------------- RANGE ("time") (1 row) -- lets get the attach partition commands SELECT generate_alter_table_attach_partition_command('date_partition_2006'); generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- ALTER TABLE public.date_partitioned_table ATTACH PARTITION public.date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'); (1 row) SELECT generate_alter_table_attach_partition_command('date_partition_2007'); generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- ALTER TABLE public.date_partitioned_table ATTACH PARTITION public.date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008'); (1 row) @@ -121,7 +121,7 @@ SELECT generate_alter_table_attach_partition_command('date_partition_2007'); \d+ date_partitioned_table Table "public.date_partitioned_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- +--------------------------------------------------------------------- id | integer | | | | plain | | time | date | | | | plain | | Partition key: RANGE ("time") @@ -130,7 +130,7 @@ Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007') SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_table'); detach_and_attach_partition ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -138,7 +138,7 @@ SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_tabl \d+ date_partitioned_table Table "public.date_partitioned_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- +--------------------------------------------------------------------- id | integer | | | | plain | | time | date | | | | plain | | Partition key: RANGE ("time") @@ -154,7 +154,7 @@ SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_ referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table ATTACH PARTITION date_partition_2007 FOR VALUES FROM (''2007-01-01'') TO (''2008-01-02'')' ); worker_apply_inter_shard_ddl_command --------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -162,7 +162,7 @@ SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_ \d+ date_partitioned_table_100 Table "public.date_partitioned_table_100" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- +--------------------------------------------------------------------- id | integer | | | | plain | | time | date | | | | plain | | Partition key: RANGE ("time") @@ -171,7 +171,7 @@ Partitions: date_partition_2007_100 FOR VALUES FROM ('01-01-2007') TO ('01-02-20 -- Citus can also get the DDL events for the partitions as regular tables SELECT master_get_table_ddl_events('date_partition_2007_100'); master_get_table_ddl_events ------------------------------------------------------------------------ +--------------------------------------------------------------------- CREATE TABLE public.date_partition_2007_100 (id integer, "time" date) ALTER TABLE public.date_partition_2007_100 OWNER TO postgres (2 rows) @@ -181,7 +181,7 @@ SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_ referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table DETACH PARTITION date_partition_2007' ); worker_apply_inter_shard_ddl_command --------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -189,7 +189,7 @@ SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_ \d+ date_partitioned_table_100 Table "public.date_partitioned_table_100" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- +--------------------------------------------------------------------- id | integer | | | | plain | | time | date | | | | plain | | Partition key: RANGE ("time") @@ -206,14 +206,14 @@ CREATE TABLE partition_child_2_schema.child_2 (id int NOT NULL, time date ); -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('partition_parent_schema.parent_table'); generate_partition_information --------------------------------- +--------------------------------------------------------------------- RANGE ("time") (1 row) -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('partition_parent_schema.parent_table'); drop_and_recreate_partitioned_table -------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -222,21 +222,21 @@ SET search_path = 'partition_parent_schema'; ALTER TABLE parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); SELECT public.generate_partition_information('parent_table'); generate_partition_information --------------------------------- +--------------------------------------------------------------------- RANGE ("time") (1 row) -- lets get the attach partition commands SELECT public.generate_alter_table_attach_partition_command('partition_child_1_schema.child_1'); generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'); (1 row) SET search_path = 'partition_child_2_schema'; SELECT public.generate_alter_table_attach_partition_command('child_2'); generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'); (1 row) @@ -245,7 +245,7 @@ SET search_path = 'partition_parent_schema'; \d+ parent_table Table "partition_parent_schema.parent_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- +--------------------------------------------------------------------- id | integer | | not null | | plain | | time | date | | | now() | plain | | Partition key: RANGE ("time") @@ -254,7 +254,7 @@ Partitions: partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ( SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'parent_table'); detach_and_attach_partition ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -262,7 +262,7 @@ SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'p \d+ parent_table Table "partition_parent_schema.parent_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- +--------------------------------------------------------------------- id | integer | | not null | | plain | | time | date | | | now() | plain | | Partition key: RANGE ("time") @@ -279,7 +279,7 @@ ERROR: "child_1" is not a parent table -- now pring the partitions SELECT public.print_partitions('parent_table'); print_partitions ------------------- +--------------------------------------------------------------------- child_1,child_2 (1 row) @@ -310,20 +310,20 @@ CREATE TABLE multi_column_partition_2( -- partitioning information SELECT generate_partition_information('multi_column_partitioned'); generate_partition_information ------------------------------------------------------ +--------------------------------------------------------------------- RANGE (a, (((a + b) + 1)), some_function(upper(c))) (1 row) SELECT master_get_table_ddl_events('multi_column_partitioned'); master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.multi_column_partitioned (a integer, b integer, c text) PARTITION BY RANGE (a, (((a + b) + 1)), public.some_function(upper(c))) ALTER TABLE public.multi_column_partitioned OWNER TO postgres (2 rows) SELECT drop_and_recreate_partitioned_table('multi_column_partitioned'); drop_and_recreate_partitioned_table -------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -331,20 +331,20 @@ SELECT drop_and_recreate_partitioned_table('multi_column_partitioned'); ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250'); SELECT generate_alter_table_attach_partition_command('multi_column_partition_1'); generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER TABLE public.multi_column_partitioned ATTACH PARTITION public.multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250'); (1 row) ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE); SELECT generate_alter_table_attach_partition_command('multi_column_partition_2'); generate_alter_table_attach_partition_command --------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER TABLE public.multi_column_partitioned ATTACH PARTITION public.multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE); (1 row) SELECT generate_alter_table_detach_partition_command('multi_column_partition_2'); generate_alter_table_detach_partition_command ---------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER TABLE IF EXISTS public.multi_column_partitioned DETACH PARTITION public.multi_column_partition_2; (1 row) @@ -352,27 +352,27 @@ SELECT generate_alter_table_detach_partition_command('multi_column_partition_2') CREATE TABLE list_partitioned (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION BY LIST (col1) ; SELECT generate_partition_information('list_partitioned'); generate_partition_information --------------------------------- +--------------------------------------------------------------------- LIST (col1) (1 row) SELECT master_get_table_ddl_events('list_partitioned'); master_get_table_ddl_events -------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE public.list_partitioned (col1 numeric, col2 numeric, col3 character varying(10)) PARTITION BY LIST (col1) ALTER TABLE public.list_partitioned OWNER TO postgres (2 rows) SELECT drop_and_recreate_partitioned_table('list_partitioned'); drop_and_recreate_partitioned_table -------------------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE list_partitioned_1 PARTITION OF list_partitioned FOR VALUES IN (100, 101, 102, 103, 104); SELECT generate_alter_table_attach_partition_command('list_partitioned_1'); generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- ALTER TABLE public.list_partitioned ATTACH PARTITION public.list_partitioned_1 FOR VALUES IN ('100', '101', '102', '103', '104'); (1 row) @@ -388,7 +388,7 @@ CREATE TABLE capitals ( -- returns true since capitals inherits from cities SELECT table_inherits('capitals'); table_inherits ----------------- +--------------------------------------------------------------------- t (1 row) @@ -396,14 +396,14 @@ SELECT table_inherits('capitals'); -- returns false since the hierarcy is formed via partitioning SELECT table_inherits('date_partition_2006'); table_inherits ----------------- +--------------------------------------------------------------------- f (1 row) -- returns true since cities inherited by capitals SELECT table_inherited('cities'); table_inherited ------------------ +--------------------------------------------------------------------- t (1 row) @@ -411,7 +411,7 @@ SELECT table_inherited('cities'); -- returns false since the hierarcy is formed via partitioning SELECT table_inherited('date_partitioned_table'); table_inherited ------------------ +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/multi_prepare_plsql.out b/src/test/regress/expected/multi_prepare_plsql.out index 0e168bbdc..d2f9c99c2 100644 --- a/src/test/regress/expected/multi_prepare_plsql.out +++ b/src/test/regress/expected/multi_prepare_plsql.out @@ -142,111 +142,111 @@ SET client_min_messages TO INFO; -- now, run PL/pgsql functions SELECT plpgsql_test_1(); plpgsql_test_1 ----------------- +--------------------------------------------------------------------- 2985 (1 row) SELECT plpgsql_test_2(); plpgsql_test_2 ----------------- +--------------------------------------------------------------------- 12000 (1 row) SELECT plpgsql_test_3(); plpgsql_test_3 ----------------- +--------------------------------------------------------------------- 1956 (1 row) SELECT plpgsql_test_4(); plpgsql_test_4 ----------------- +--------------------------------------------------------------------- 7806 (1 row) SELECT plpgsql_test_5(); plpgsql_test_5 ----------------- +--------------------------------------------------------------------- 39 (1 row) -- run PL/pgsql functions with different parameters SELECT plpgsql_test_6(155); plpgsql_test_6 ----------------- +--------------------------------------------------------------------- 11813 (1 row) SELECT plpgsql_test_6(1555); plpgsql_test_6 ----------------- +--------------------------------------------------------------------- 10185 (1 row) SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA'); plpgsql_test_7 ----------------------------------------- +--------------------------------------------------------------------- ("UNITED KINGDOM",CHINA,1996,18560.22) (1 row) SELECT plpgsql_test_7('FRANCE', 'GERMANY'); plpgsql_test_7 ---------------------------------- +--------------------------------------------------------------------- (GERMANY,FRANCE,1995,2399.2948) (1 row) -- now, PL/pgsql functions with random order SELECT plpgsql_test_6(155); plpgsql_test_6 ----------------- +--------------------------------------------------------------------- 11813 (1 row) SELECT plpgsql_test_3(); plpgsql_test_3 ----------------- +--------------------------------------------------------------------- 1956 (1 row) SELECT plpgsql_test_7('FRANCE', 'GERMANY'); plpgsql_test_7 ---------------------------------- +--------------------------------------------------------------------- (GERMANY,FRANCE,1995,2399.2948) (1 row) SELECT plpgsql_test_5(); plpgsql_test_5 ----------------- +--------------------------------------------------------------------- 39 (1 row) SELECT plpgsql_test_1(); plpgsql_test_1 ----------------- +--------------------------------------------------------------------- 2985 (1 row) SELECT plpgsql_test_6(1555); plpgsql_test_6 ----------------- +--------------------------------------------------------------------- 10185 (1 row) SELECT plpgsql_test_4(); plpgsql_test_4 ----------------- +--------------------------------------------------------------------- 7806 (1 row) SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA'); plpgsql_test_7 ----------------------------------------- +--------------------------------------------------------------------- ("UNITED KINGDOM",CHINA,1996,18560.22) (1 row) SELECT plpgsql_test_2(); plpgsql_test_2 ----------------- +--------------------------------------------------------------------- 12000 (1 row) @@ -256,26 +256,26 @@ RESET citus.task_executor_type; -- now, run PL/pgsql functions SELECT plpgsql_test_1(); plpgsql_test_1 ----------------- +--------------------------------------------------------------------- 2985 (1 row) SELECT plpgsql_test_2(); plpgsql_test_2 ----------------- +--------------------------------------------------------------------- 12000 (1 row) -- run PL/pgsql functions with different parameters SELECT plpgsql_test_6(155); plpgsql_test_6 ----------------- +--------------------------------------------------------------------- 11813 (1 row) SELECT plpgsql_test_6(1555); plpgsql_test_6 ----------------- +--------------------------------------------------------------------- 10185 (1 row) @@ -287,7 +287,7 @@ CREATE TABLE plpgsql_table ( SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('plpgsql_table','key','hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -299,37 +299,37 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT no_parameter_insert(); no_parameter_insert ---------------------- +--------------------------------------------------------------------- (1 row) SELECT no_parameter_insert(); no_parameter_insert ---------------------- +--------------------------------------------------------------------- (1 row) SELECT no_parameter_insert(); no_parameter_insert ---------------------- +--------------------------------------------------------------------- (1 row) SELECT no_parameter_insert(); no_parameter_insert ---------------------- +--------------------------------------------------------------------- (1 row) SELECT no_parameter_insert(); no_parameter_insert ---------------------- +--------------------------------------------------------------------- (1 row) SELECT no_parameter_insert(); no_parameter_insert ---------------------- +--------------------------------------------------------------------- (1 row) @@ -342,37 +342,37 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT single_parameter_insert(1); single_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) SELECT single_parameter_insert(2); single_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) SELECT single_parameter_insert(3); single_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) SELECT single_parameter_insert(4); single_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) SELECT single_parameter_insert(5); single_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) SELECT single_parameter_insert(6); single_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) @@ -385,37 +385,37 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT double_parameter_insert(1, 10); double_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) SELECT double_parameter_insert(2, 20); double_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) SELECT double_parameter_insert(3, 30); double_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) SELECT double_parameter_insert(4, 40); double_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) SELECT double_parameter_insert(5, 50); double_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) SELECT double_parameter_insert(6, 60); double_parameter_insert -------------------------- +--------------------------------------------------------------------- (1 row) @@ -428,44 +428,44 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_insert(10); non_partition_parameter_insert --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_insert(20); non_partition_parameter_insert --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_insert(30); non_partition_parameter_insert --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_insert(40); non_partition_parameter_insert --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_insert(50); non_partition_parameter_insert --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_insert(60); non_partition_parameter_insert --------------------------------- +--------------------------------------------------------------------- (1 row) -- check inserted values SELECT * FROM plpgsql_table ORDER BY key, value; key | value ------+------- +--------------------------------------------------------------------- 0 | 10 0 | 20 0 | 30 @@ -513,42 +513,42 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT router_partition_column_select(1); router_partition_column_select --------------------------------- +--------------------------------------------------------------------- (1,10) (1,) (2 rows) SELECT router_partition_column_select(2); router_partition_column_select --------------------------------- +--------------------------------------------------------------------- (2,20) (2,) (2 rows) SELECT router_partition_column_select(3); router_partition_column_select --------------------------------- +--------------------------------------------------------------------- (3,30) (3,) (2 rows) SELECT router_partition_column_select(4); router_partition_column_select --------------------------------- +--------------------------------------------------------------------- (4,40) (4,) (2 rows) SELECT router_partition_column_select(5); router_partition_column_select --------------------------------- +--------------------------------------------------------------------- (5,50) (5,) (2 rows) SELECT router_partition_column_select(6); router_partition_column_select --------------------------------- +--------------------------------------------------------------------- (6,60) (6,) (2 rows) @@ -574,37 +574,37 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT router_non_partition_column_select(10); router_non_partition_column_select ------------------------------------- +--------------------------------------------------------------------- (0,10) (1 row) SELECT router_non_partition_column_select(20); router_non_partition_column_select ------------------------------------- +--------------------------------------------------------------------- (0,20) (1 row) SELECT router_non_partition_column_select(30); router_non_partition_column_select ------------------------------------- +--------------------------------------------------------------------- (0,30) (1 row) SELECT router_non_partition_column_select(40); router_non_partition_column_select ------------------------------------- +--------------------------------------------------------------------- (0,40) (1 row) SELECT router_non_partition_column_select(50); router_non_partition_column_select ------------------------------------- +--------------------------------------------------------------------- (0,50) (1 row) SELECT router_non_partition_column_select(60); router_non_partition_column_select ------------------------------------- +--------------------------------------------------------------------- (0,60) (1 row) @@ -629,42 +629,42 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT real_time_non_partition_column_select(10); real_time_non_partition_column_select ---------------------------------------- +--------------------------------------------------------------------- (0,10) (1,10) (2 rows) SELECT real_time_non_partition_column_select(20); real_time_non_partition_column_select ---------------------------------------- +--------------------------------------------------------------------- (0,20) (2,20) (2 rows) SELECT real_time_non_partition_column_select(30); real_time_non_partition_column_select ---------------------------------------- +--------------------------------------------------------------------- (0,30) (3,30) (2 rows) SELECT real_time_non_partition_column_select(40); real_time_non_partition_column_select ---------------------------------------- +--------------------------------------------------------------------- (0,40) (4,40) (2 rows) SELECT real_time_non_partition_column_select(50); real_time_non_partition_column_select ---------------------------------------- +--------------------------------------------------------------------- (0,50) (5,50) (2 rows) SELECT real_time_non_partition_column_select(60); real_time_non_partition_column_select ---------------------------------------- +--------------------------------------------------------------------- (0,60) (6,60) (2 rows) @@ -690,7 +690,7 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT real_time_partition_column_select(1); real_time_partition_column_select ------------------------------------ +--------------------------------------------------------------------- (0,10) (1,10) (1,) @@ -698,7 +698,7 @@ SELECT real_time_partition_column_select(1); SELECT real_time_partition_column_select(2); real_time_partition_column_select ------------------------------------ +--------------------------------------------------------------------- (0,10) (1,10) (2,20) @@ -707,7 +707,7 @@ SELECT real_time_partition_column_select(2); SELECT real_time_partition_column_select(3); real_time_partition_column_select ------------------------------------ +--------------------------------------------------------------------- (0,10) (1,10) (3,30) @@ -716,7 +716,7 @@ SELECT real_time_partition_column_select(3); SELECT real_time_partition_column_select(4); real_time_partition_column_select ------------------------------------ +--------------------------------------------------------------------- (0,10) (1,10) (4,40) @@ -725,7 +725,7 @@ SELECT real_time_partition_column_select(4); SELECT real_time_partition_column_select(5); real_time_partition_column_select ------------------------------------ +--------------------------------------------------------------------- (0,10) (1,10) (5,50) @@ -734,7 +734,7 @@ SELECT real_time_partition_column_select(5); SELECT real_time_partition_column_select(6); real_time_partition_column_select ------------------------------------ +--------------------------------------------------------------------- (0,10) (1,10) (6,60) @@ -763,42 +763,42 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT task_tracker_non_partition_column_select(10); task_tracker_non_partition_column_select ------------------------------------------- +--------------------------------------------------------------------- (0,10) (1,10) (2 rows) SELECT task_tracker_non_partition_column_select(20); task_tracker_non_partition_column_select ------------------------------------------- +--------------------------------------------------------------------- (0,20) (2,20) (2 rows) SELECT task_tracker_non_partition_column_select(30); task_tracker_non_partition_column_select ------------------------------------------- +--------------------------------------------------------------------- (0,30) (3,30) (2 rows) SELECT task_tracker_non_partition_column_select(40); task_tracker_non_partition_column_select ------------------------------------------- +--------------------------------------------------------------------- (0,40) (4,40) (2 rows) SELECT task_tracker_non_partition_column_select(50); task_tracker_non_partition_column_select ------------------------------------------- +--------------------------------------------------------------------- (0,50) (5,50) (2 rows) SELECT real_time_non_partition_column_select(60); real_time_non_partition_column_select ---------------------------------------- +--------------------------------------------------------------------- (0,60) (6,60) (2 rows) @@ -824,7 +824,7 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT task_tracker_partition_column_select(1); task_tracker_partition_column_select --------------------------------------- +--------------------------------------------------------------------- (0,10) (1,10) (1,) @@ -832,7 +832,7 @@ SELECT task_tracker_partition_column_select(1); SELECT task_tracker_partition_column_select(2); task_tracker_partition_column_select --------------------------------------- +--------------------------------------------------------------------- (0,10) (1,10) (2,20) @@ -841,7 +841,7 @@ SELECT task_tracker_partition_column_select(2); SELECT task_tracker_partition_column_select(3); task_tracker_partition_column_select --------------------------------------- +--------------------------------------------------------------------- (0,10) (1,10) (3,30) @@ -850,7 +850,7 @@ SELECT task_tracker_partition_column_select(3); SELECT task_tracker_partition_column_select(4); task_tracker_partition_column_select --------------------------------------- +--------------------------------------------------------------------- (0,10) (1,10) (4,40) @@ -859,7 +859,7 @@ SELECT task_tracker_partition_column_select(4); SELECT task_tracker_partition_column_select(5); task_tracker_partition_column_select --------------------------------------- +--------------------------------------------------------------------- (0,10) (1,10) (5,50) @@ -868,7 +868,7 @@ SELECT task_tracker_partition_column_select(5); SELECT task_tracker_partition_column_select(6); task_tracker_partition_column_select --------------------------------------- +--------------------------------------------------------------------- (0,10) (1,10) (6,60) @@ -885,37 +885,37 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT partition_parameter_update(1, 11); partition_parameter_update ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT partition_parameter_update(2, 21); partition_parameter_update ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT partition_parameter_update(3, 31); partition_parameter_update ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT partition_parameter_update(4, 41); partition_parameter_update ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT partition_parameter_update(5, 51); partition_parameter_update ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT partition_parameter_update(6, 61); partition_parameter_update ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -927,44 +927,44 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_update(10, 12); non_partition_parameter_update --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_update(20, 22); non_partition_parameter_update --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_update(30, 32); non_partition_parameter_update --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_update(40, 42); non_partition_parameter_update --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_update(50, 52); non_partition_parameter_update --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_update(60, 62); non_partition_parameter_update --------------------------------- +--------------------------------------------------------------------- (1 row) -- check table after updates SELECT * FROM plpgsql_table ORDER BY key, value; key | value ------+------- +--------------------------------------------------------------------- 0 | 12 0 | 22 0 | 32 @@ -1000,37 +1000,37 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT partition_parameter_delete(1, 11); partition_parameter_delete ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT partition_parameter_delete(2, 21); partition_parameter_delete ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT partition_parameter_delete(3, 31); partition_parameter_delete ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT partition_parameter_delete(4, 41); partition_parameter_delete ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT partition_parameter_delete(5, 51); partition_parameter_delete ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT partition_parameter_delete(6, 61); partition_parameter_delete ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -1042,44 +1042,44 @@ $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_delete(12); non_partition_parameter_delete --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_delete(22); non_partition_parameter_delete --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_delete(32); non_partition_parameter_delete --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_delete(42); non_partition_parameter_delete --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_delete(52); non_partition_parameter_delete --------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_delete(62); non_partition_parameter_delete --------------------------------- +--------------------------------------------------------------------- (1 row) -- check table after deletes SELECT * FROM plpgsql_table ORDER BY key, value; key | value ------+------- +--------------------------------------------------------------------- 0 | 0 | 0 | @@ -1092,7 +1092,7 @@ SELECT * FROM plpgsql_table ORDER BY key, value; CREATE TABLE execute_parameter_test (key int, val date); SELECT create_distributed_table('execute_parameter_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1113,7 +1113,7 @@ CREATE TABLE func_parameter_test ( ); SELECT create_distributed_table('func_parameter_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1136,43 +1136,43 @@ $BODY$ LANGUAGE plpgsql; SELECT insert_with_max('key'); insert_with_max ------------------ +--------------------------------------------------------------------- (1 row) SELECT insert_with_max('key'); insert_with_max ------------------ +--------------------------------------------------------------------- (1 row) SELECT insert_with_max('key'); insert_with_max ------------------ +--------------------------------------------------------------------- (1 row) SELECT insert_with_max('key'); insert_with_max ------------------ +--------------------------------------------------------------------- (1 row) SELECT insert_with_max('key'); insert_with_max ------------------ +--------------------------------------------------------------------- (1 row) SELECT insert_with_max('key'); insert_with_max ------------------ +--------------------------------------------------------------------- (1 row) SELECT key, seq FROM func_parameter_test ORDER BY seq; key | seq ------+----- +--------------------------------------------------------------------- key | 1 key | 2 key | 3 @@ -1188,7 +1188,7 @@ SET citus.multi_shard_commit_protocol TO '2pc'; CREATE TABLE prepare_ddl (x int, y int); SELECT create_distributed_table('prepare_ddl', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1202,13 +1202,13 @@ END; $BODY$ LANGUAGE plpgsql; SELECT ddl_in_plpgsql(); ddl_in_plpgsql ----------------- +--------------------------------------------------------------------- (1 row) SELECT ddl_in_plpgsql(); ddl_in_plpgsql ----------------- +--------------------------------------------------------------------- (1 row) @@ -1225,7 +1225,7 @@ CREATE SCHEMA otherschema; SET search_path TO otherschema, public; SELECT ddl_in_plpgsql(); ddl_in_plpgsql ----------------- +--------------------------------------------------------------------- (1 row) @@ -1235,20 +1235,20 @@ DROP INDEX prepared_index; CREATE TABLE prepare_ddl (x int, y int); SELECT create_distributed_table('prepare_ddl', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT ddl_in_plpgsql(); ddl_in_plpgsql ----------------- +--------------------------------------------------------------------- (1 row) -- verify the index is created in the correct schema SELECT schemaname, indexrelname FROM pg_stat_all_indexes WHERE indexrelname = 'prepared_index'; schemaname | indexrelname --------------+---------------- +--------------------------------------------------------------------- otherschema | prepared_index (1 row) @@ -1265,13 +1265,13 @@ END; $BODY$ LANGUAGE plpgsql; SELECT copy_in_plpgsql(); copy_in_plpgsql ------------------ +--------------------------------------------------------------------- (1 row) SELECT copy_in_plpgsql(); copy_in_plpgsql ------------------ +--------------------------------------------------------------------- (1 row) @@ -1286,13 +1286,13 @@ END; $BODY$ LANGUAGE plpgsql; SELECT local_copy_in_plpgsql(); local_copy_in_plpgsql ------------------------ +--------------------------------------------------------------------- (1 row) SELECT local_copy_in_plpgsql(); local_copy_in_plpgsql ------------------------ +--------------------------------------------------------------------- (1 row) @@ -1310,7 +1310,7 @@ END; $function$; SELECT type_ddl_plpgsql(); type_ddl_plpgsql ------------------- +--------------------------------------------------------------------- (1 row) @@ -1318,14 +1318,14 @@ SELECT type_ddl_plpgsql(); CREATE TYPE prepare_ddl_type AS (x int, y int); SELECT type_ddl_plpgsql(); type_ddl_plpgsql ------------------- +--------------------------------------------------------------------- (1 row) -- find all renamed types to verify the schema name didn't leak, nor a crash happened SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup'; nspname | typname --------------+------------------------- +--------------------------------------------------------------------- public | prepare_ddl_type_backup otherschema | prepare_ddl_type_backup (2 rows) diff --git a/src/test/regress/expected/multi_prepare_sql.out b/src/test/regress/expected/multi_prepare_sql.out index 3ded28e5a..c5e3adcae 100644 --- a/src/test/regress/expected/multi_prepare_sql.out +++ b/src/test/regress/expected/multi_prepare_sql.out @@ -104,111 +104,111 @@ SET client_min_messages TO INFO; -- execute prepared statements EXECUTE prepared_test_1; count -------- +--------------------------------------------------------------------- 2985 (1 row) EXECUTE prepared_test_2; count -------- +--------------------------------------------------------------------- 12000 (1 row) EXECUTE prepared_test_3; count -------- +--------------------------------------------------------------------- 1956 (1 row) EXECUTE prepared_test_4; count -------- +--------------------------------------------------------------------- 7806 (1 row) EXECUTE prepared_test_5; count -------- +--------------------------------------------------------------------- 39 (1 row) -- execute prepared statements with different parameters EXECUTE prepared_test_6(155); count -------- +--------------------------------------------------------------------- 11813 (1 row) EXECUTE prepared_test_6(1555); count -------- +--------------------------------------------------------------------- 10185 (1 row) EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+------------ +--------------------------------------------------------------------- UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) EXECUTE prepared_test_7('FRANCE', 'GERMANY'); supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) -- now, execute prepared statements with random order EXECUTE prepared_test_6(155); count -------- +--------------------------------------------------------------------- 11813 (1 row) EXECUTE prepared_test_3; count -------- +--------------------------------------------------------------------- 1956 (1 row) EXECUTE prepared_test_7('FRANCE', 'GERMANY'); supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) EXECUTE prepared_test_5; count -------- +--------------------------------------------------------------------- 39 (1 row) EXECUTE prepared_test_1; count -------- +--------------------------------------------------------------------- 2985 (1 row) EXECUTE prepared_test_6(1555); count -------- +--------------------------------------------------------------------- 10185 (1 row) EXECUTE prepared_test_4; count -------- +--------------------------------------------------------------------- 7806 (1 row) EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+------------ +--------------------------------------------------------------------- UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) EXECUTE prepared_test_2; count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -216,7 +216,7 @@ EXECUTE prepared_test_2; CREATE TEMP TABLE prepared_sql_test_7 AS EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); SELECT * from prepared_sql_test_7; supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+------------ +--------------------------------------------------------------------- UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) @@ -225,26 +225,26 @@ RESET citus.task_executor_type; -- execute prepared statements EXECUTE prepared_test_1; count -------- +--------------------------------------------------------------------- 2985 (1 row) EXECUTE prepared_test_2; count -------- +--------------------------------------------------------------------- 12000 (1 row) -- execute prepared statements with different parameters EXECUTE prepared_test_6(155); count -------- +--------------------------------------------------------------------- 11813 (1 row) EXECUTE prepared_test_6(1555); count -------- +--------------------------------------------------------------------- 10185 (1 row) @@ -262,7 +262,7 @@ CREATE TABLE router_executor_table ( SET citus.shard_count TO 2; SELECT create_distributed_table('router_executor_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -277,7 +277,7 @@ EXECUTE prepared_insert('comment-5', '(5, 50)'); EXECUTE prepared_insert('comment-6', '(6, 60)'); SELECT * FROM router_executor_table ORDER BY comment; id | comment | stats -----+-----------+-------- +--------------------------------------------------------------------- 1 | comment-1 | (1,10) 1 | comment-2 | (2,20) 1 | comment-3 | (3,30) @@ -292,37 +292,37 @@ PREPARE prepared_select(integer, integer) AS WHERE id = 1 AND stats = ROW($1, $2)::test_composite_type; EXECUTE prepared_select(1, 10); count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(2, 20); count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(3, 30); count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(4, 40); count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(5, 50); count -------- +--------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(6, 60); count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -349,7 +349,7 @@ SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('prepare_table','key','hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -401,7 +401,7 @@ EXECUTE prepared_non_partition_parameter_insert(60); -- check inserted values SELECT * FROM prepare_table ORDER BY key, value; key | value ------+------- +--------------------------------------------------------------------- 0 | 10 0 | 20 0 | 30 @@ -455,42 +455,42 @@ PREPARE prepared_router_partition_column_select(int) AS value; EXECUTE prepared_router_partition_column_select(1); key | value ------+------- +--------------------------------------------------------------------- 1 | 10 1 | (2 rows) EXECUTE prepared_router_partition_column_select(2); key | value ------+------- +--------------------------------------------------------------------- 2 | 20 2 | (2 rows) EXECUTE prepared_router_partition_column_select(3); key | value ------+------- +--------------------------------------------------------------------- 3 | 30 3 | (2 rows) EXECUTE prepared_router_partition_column_select(4); key | value ------+------- +--------------------------------------------------------------------- 4 | 40 4 | (2 rows) EXECUTE prepared_router_partition_column_select(5); key | value ------+------- +--------------------------------------------------------------------- 5 | 50 5 | (2 rows) EXECUTE prepared_router_partition_column_select(6); key | value ------+------- +--------------------------------------------------------------------- 6 | 60 6 | (2 rows) @@ -509,37 +509,37 @@ PREPARE prepared_router_non_partition_column_select(int) AS value; EXECUTE prepared_router_non_partition_column_select(10); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 (1 row) EXECUTE prepared_router_non_partition_column_select(20); key | value ------+------- +--------------------------------------------------------------------- 0 | 20 (1 row) EXECUTE prepared_router_non_partition_column_select(30); key | value ------+------- +--------------------------------------------------------------------- 0 | 30 (1 row) EXECUTE prepared_router_non_partition_column_select(40); key | value ------+------- +--------------------------------------------------------------------- 0 | 40 (1 row) EXECUTE prepared_router_non_partition_column_select(50); key | value ------+------- +--------------------------------------------------------------------- 0 | 50 (1 row) EXECUTE prepared_router_non_partition_column_select(60); key | value ------+------- +--------------------------------------------------------------------- 0 | 60 (1 row) @@ -557,42 +557,42 @@ PREPARE prepared_real_time_non_partition_column_select(int) AS value; EXECUTE prepared_real_time_non_partition_column_select(10); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(20); key | value ------+------- +--------------------------------------------------------------------- 0 | 20 2 | 20 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(30); key | value ------+------- +--------------------------------------------------------------------- 0 | 30 3 | 30 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(40); key | value ------+------- +--------------------------------------------------------------------- 0 | 40 4 | 40 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(50); key | value ------+------- +--------------------------------------------------------------------- 0 | 50 5 | 50 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(60); key | value ------+------- +--------------------------------------------------------------------- 0 | 60 6 | 60 (2 rows) @@ -611,7 +611,7 @@ PREPARE prepared_real_time_partition_column_select(int) AS value; EXECUTE prepared_real_time_partition_column_select(1); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 1 | @@ -619,7 +619,7 @@ EXECUTE prepared_real_time_partition_column_select(1); EXECUTE prepared_real_time_partition_column_select(2); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 2 | 20 @@ -628,7 +628,7 @@ EXECUTE prepared_real_time_partition_column_select(2); EXECUTE prepared_real_time_partition_column_select(3); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 3 | 30 @@ -637,7 +637,7 @@ EXECUTE prepared_real_time_partition_column_select(3); EXECUTE prepared_real_time_partition_column_select(4); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 4 | 40 @@ -646,7 +646,7 @@ EXECUTE prepared_real_time_partition_column_select(4); EXECUTE prepared_real_time_partition_column_select(5); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 5 | 50 @@ -655,7 +655,7 @@ EXECUTE prepared_real_time_partition_column_select(5); EXECUTE prepared_real_time_partition_column_select(6); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 6 | 60 @@ -677,42 +677,42 @@ PREPARE prepared_task_tracker_non_partition_column_select(int) AS value; EXECUTE prepared_task_tracker_non_partition_column_select(10); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(20); key | value ------+------- +--------------------------------------------------------------------- 0 | 20 2 | 20 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(30); key | value ------+------- +--------------------------------------------------------------------- 0 | 30 3 | 30 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(40); key | value ------+------- +--------------------------------------------------------------------- 0 | 40 4 | 40 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(50); key | value ------+------- +--------------------------------------------------------------------- 0 | 50 5 | 50 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(60); key | value ------+------- +--------------------------------------------------------------------- 0 | 60 6 | 60 (2 rows) @@ -731,7 +731,7 @@ PREPARE prepared_task_tracker_partition_column_select(int) AS value; EXECUTE prepared_task_tracker_partition_column_select(1); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 1 | @@ -739,7 +739,7 @@ EXECUTE prepared_task_tracker_partition_column_select(1); EXECUTE prepared_task_tracker_partition_column_select(2); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 2 | 20 @@ -748,7 +748,7 @@ EXECUTE prepared_task_tracker_partition_column_select(2); EXECUTE prepared_task_tracker_partition_column_select(3); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 3 | 30 @@ -757,7 +757,7 @@ EXECUTE prepared_task_tracker_partition_column_select(3); EXECUTE prepared_task_tracker_partition_column_select(4); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 4 | 40 @@ -766,7 +766,7 @@ EXECUTE prepared_task_tracker_partition_column_select(4); EXECUTE prepared_task_tracker_partition_column_select(5); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 5 | 50 @@ -775,7 +775,7 @@ EXECUTE prepared_task_tracker_partition_column_select(5); EXECUTE prepared_task_tracker_partition_column_select(6); key | value ------+------- +--------------------------------------------------------------------- 0 | 10 1 | 10 6 | 60 @@ -805,7 +805,7 @@ EXECUTE prepared_non_partition_parameter_update(60, 62); -- check after updates SELECT * FROM prepare_table ORDER BY key, value; key | value ------+------- +--------------------------------------------------------------------- 0 | 12 0 | 22 0 | 32 @@ -853,7 +853,7 @@ EXECUTE prepared_non_partition_parameter_delete(62); -- check after deletes SELECT * FROM prepare_table ORDER BY key, value; key | value ------+------- +--------------------------------------------------------------------- 0 | 0 | 0 | @@ -871,7 +871,7 @@ CREATE TABLE prepare_func_table ( ); SELECT create_distributed_table('prepare_func_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -887,7 +887,7 @@ EXECUTE prepared_function_evaluation_insert(5); EXECUTE prepared_function_evaluation_insert(6); SELECT key, value1 FROM prepare_func_table ORDER BY key; key | value1 ------+-------- +--------------------------------------------------------------------- 2 | 0 3 | 0 4 | 0 @@ -908,7 +908,7 @@ EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); SELECT key, value2 FROM prepare_func_table; key | value2 ------+-------- +--------------------------------------------------------------------- key | value key | value key | value @@ -925,7 +925,7 @@ CREATE TABLE text_partition_column_table ( ); SELECT create_distributed_table('text_partition_column_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -939,7 +939,7 @@ EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); SELECT key, value FROM text_partition_column_table ORDER BY key; key | value -------+------- +--------------------------------------------------------------------- test | 1 test | 1 test | 1 @@ -955,7 +955,7 @@ SELECT run_command_on_workers($$ CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$') $$); run_command_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"CREATE DOMAIN") (localhost,57638,t,"CREATE DOMAIN") (2 rows) @@ -966,7 +966,7 @@ CREATE TABLE domain_partition_column_table ( ); SELECT create_distributed_table('domain_partition_column_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -980,7 +980,7 @@ EXECUTE prepared_coercion_to_domain_insert('test-5'); EXECUTE prepared_coercion_to_domain_insert('test-6'); SELECT key, value FROM domain_partition_column_table ORDER BY key; key | value ---------+------- +--------------------------------------------------------------------- test-1 | 1 test-2 | 1 test-3 | 1 @@ -1002,7 +1002,7 @@ CREATE TABLE http_request ( ); SELECT create_distributed_table('http_request', 'site_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1021,7 +1021,7 @@ EXECUTE foo; EXECUTE foo; SELECT count(distinct ingest_time) FROM http_request WHERE site_id = 1; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -1045,7 +1045,7 @@ SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('test_table', 'test_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1055,7 +1055,7 @@ SELECT create_distributed_table('test_table', 'test_id', 'hash'); SELECT count(*) FROM test_table HAVING COUNT(*) = immutable_bleat('replanning'); NOTICE: replanning count -------- +--------------------------------------------------------------------- (0 rows) --prepared statement @@ -1063,12 +1063,12 @@ PREPARE countsome AS SELECT count(*) FROM test_table HAVING COUNT(*) = immutable EXECUTE countsome; -- should indicate planning NOTICE: replanning count -------- +--------------------------------------------------------------------- (0 rows) EXECUTE countsome; -- no replanning count -------- +--------------------------------------------------------------------- (0 rows) -- invalidate half of the placements using SQL, should invalidate via trigger @@ -1079,12 +1079,12 @@ WHERE shardid IN ( EXECUTE countsome; -- should indicate replanning NOTICE: replanning count -------- +--------------------------------------------------------------------- (0 rows) EXECUTE countsome; -- no replanning count -------- +--------------------------------------------------------------------- (0 rows) -- repair shards, should invalidate via master_metadata_utility.c @@ -1094,7 +1094,7 @@ WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_table'::regclass) AND nodeport = :worker_1_port; master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (2 rows) @@ -1102,12 +1102,12 @@ WHERE shardid IN ( EXECUTE countsome; -- should indicate replanning NOTICE: replanning count -------- +--------------------------------------------------------------------- (0 rows) EXECUTE countsome; -- no replanning count -------- +--------------------------------------------------------------------- (0 rows) -- reset diff --git a/src/test/regress/expected/multi_prune_shard_list.out b/src/test/regress/expected/multi_prune_shard_list.out index a2a34a582..20ebd7cfe 100644 --- a/src/test/regress/expected/multi_prune_shard_list.out +++ b/src/test/regress/expected/multi_prune_shard_list.out @@ -36,63 +36,63 @@ CREATE TABLE pruning ( species text, last_pruned date, plant_id integer ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('pruning', 'species', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- with no values, expect all shards SELECT prune_using_no_values('pruning'); prune_using_no_values -------------------------------- +--------------------------------------------------------------------- {800000,800001,800002,800003} (1 row) -- with a single value, expect a single shard SELECT prune_using_single_value('pruning', 'tomato'); prune_using_single_value --------------------------- +--------------------------------------------------------------------- {800002} (1 row) -- null values should result in no pruning SELECT prune_using_single_value('pruning', NULL); prune_using_single_value -------------------------------- +--------------------------------------------------------------------- {800000,800001,800002,800003} (1 row) -- build an OR clause and expect more than one sahrd SELECT prune_using_either_value('pruning', 'tomato', 'petunia'); prune_using_either_value --------------------------- +--------------------------------------------------------------------- {800002,800001} (1 row) -- an AND clause with values on different shards returns no shards SELECT prune_using_both_values('pruning', 'tomato', 'petunia'); prune_using_both_values -------------------------- +--------------------------------------------------------------------- {} (1 row) -- even if both values are on the same shard, a value can't be equal to two others SELECT prune_using_both_values('pruning', 'tomato', 'rose'); prune_using_both_values -------------------------- +--------------------------------------------------------------------- {} (1 row) -- unit test of the equality expression generation code SELECT debug_equality_expression('pruning'); debug_equality_expression --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- {OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1} (1 row) -- print the initial ordering of shard intervals SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals -------------------------------- +--------------------------------------------------------------------- {800000,800001,800002,800003} (1 row) @@ -106,32 +106,32 @@ UPDATE pg_dist_shard set shardminvalue = -1073741824 WHERE shardid = 800001; CREATE TABLE pruning_range ( species text, last_pruned date, plant_id integer ); SELECT create_distributed_table('pruning_range', 'species', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- create worker shards SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 800004 (1 row) SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 800005 (1 row) SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 800006 (1 row) SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 800007 (1 row) @@ -143,7 +143,7 @@ UPDATE pg_dist_shard SET shardminvalue = 'g', shardmaxvalue = 'h' WHERE shardid -- print the ordering of shard intervals with range partitioning as well SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals -------------------------------- +--------------------------------------------------------------------- {800004,800005,800006,800007} (1 row) @@ -151,7 +151,7 @@ SELECT print_sorted_shard_intervals('pruning_range'); UPDATE pg_dist_shard set shardminvalue = NULL WHERE shardid = 800005; SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals -------------------------------- +--------------------------------------------------------------------- {800004,800006,800007,800005} (1 row) @@ -159,7 +159,7 @@ SELECT print_sorted_shard_intervals('pruning_range'); UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 800006; SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals -------------------------------- +--------------------------------------------------------------------- {800004,800007,800005,800006} (1 row) @@ -167,7 +167,7 @@ SELECT print_sorted_shard_intervals('pruning_range'); UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 800004; SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals -------------------------------- +--------------------------------------------------------------------- {800007,800004,800005,800006} (1 row) @@ -175,7 +175,7 @@ SELECT print_sorted_shard_intervals('pruning_range'); UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 800007; SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals -------------------------------- +--------------------------------------------------------------------- {800004,800005,800006,800007} (1 row) @@ -188,7 +188,7 @@ CREATE TABLE coerce_hash ( ); SELECT create_distributed_table('coerce_hash', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -197,14 +197,14 @@ INSERT INTO coerce_hash VALUES (1, 'test value'); -- SELECT with same type as partition column SELECT * FROM coerce_hash WHERE id = 1::bigint; id | value -----+------------ +--------------------------------------------------------------------- 1 | test value (1 row) -- SELECT with similar type to partition column SELECT * FROM coerce_hash WHERE id = 1; id | value -----+------------ +--------------------------------------------------------------------- 1 | test value (1 row) @@ -217,13 +217,13 @@ SELECT * FROM coerce_hash WHERE id = 1; -- test now, but if the old behavior is restored, it should crash again. SELECT * FROM coerce_hash WHERE id = 1.0; id | value -----+------------ +--------------------------------------------------------------------- 1 | test value (1 row) SELECT * FROM coerce_hash WHERE id = 1.0::numeric; id | value -----+------------ +--------------------------------------------------------------------- 1 | test value (1 row) diff --git a/src/test/regress/expected/multi_query_directory_cleanup.out b/src/test/regress/expected/multi_query_directory_cleanup.out index 5d3e38f5e..70259e929 100644 --- a/src/test/regress/expected/multi_query_directory_cleanup.out +++ b/src/test/regress/expected/multi_query_directory_cleanup.out @@ -18,7 +18,7 @@ with silence as ( select count(*) * 0 zero from silence; zero ------- +--------------------------------------------------------------------- 0 (1 row) @@ -28,61 +28,61 @@ BEGIN; -- number of jobs executed prior to running this test. SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) COMMIT; SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) BEGIN; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) -- Test that multiple job directories are all cleaned up correctly, @@ -92,146 +92,146 @@ BEGIN; DECLARE c_00 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_00; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_01 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_01; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_02 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_02; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_03 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_03; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_04 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_04; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_05 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_05; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_06 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_06; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_07 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_07; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_08 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_08; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_09 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_09; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_10 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_10; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_11 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_11; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_12 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_12; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_13 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_13; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_14 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_14; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_15 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_15; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_16 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_16; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_17 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_17; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_18 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_18; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_19 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_19; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; f ---- +--------------------------------------------------------------------- (0 rows) -- close first, 17th (first after re-alloc) and last cursor. @@ -240,12 +240,12 @@ CLOSE c_16; CLOSE c_19; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; f ---- +--------------------------------------------------------------------- (0 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_query_directory_cleanup_0.out b/src/test/regress/expected/multi_query_directory_cleanup_0.out index ed98c36b4..1af0a6514 100644 --- a/src/test/regress/expected/multi_query_directory_cleanup_0.out +++ b/src/test/regress/expected/multi_query_directory_cleanup_0.out @@ -18,7 +18,7 @@ with silence as ( select count(*) * 0 zero from silence; zero ------- +--------------------------------------------------------------------- 0 (1 row) @@ -28,61 +28,61 @@ BEGIN; -- number of jobs executed prior to running this test. SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) COMMIT; SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) BEGIN; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) -- Test that multiple job directories are all cleaned up correctly, @@ -92,146 +92,146 @@ BEGIN; DECLARE c_00 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_00; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_01 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_01; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_02 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_02; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_03 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_03; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_04 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_04; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_05 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_05; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_06 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_06; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_07 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_07; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_08 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_08; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_09 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_09; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_10 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_10; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_11 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_11; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_12 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_12; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_13 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_13; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_14 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_14; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_15 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_15; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_16 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_16; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_17 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_17; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_18 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_18; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_19 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_19; revenue ---------------- +--------------------------------------------------------------------- 22770844.7654 (1 row) SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; f ------------------ +--------------------------------------------------------------------- master_job_0007 master_job_0008 master_job_0009 @@ -260,7 +260,7 @@ CLOSE c_16; CLOSE c_19; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; f ------------------ +--------------------------------------------------------------------- master_job_0008 master_job_0009 master_job_0010 @@ -283,6 +283,6 @@ SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ------------ +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_read_from_secondaries.out b/src/test/regress/expected/multi_read_from_secondaries.out index 31572c204..6df549840 100644 --- a/src/test/regress/expected/multi_read_from_secondaries.out +++ b/src/test/regress/expected/multi_read_from_secondaries.out @@ -9,13 +9,13 @@ DETAIL: citus.use_secondary_nodes is set to 'always' \c "dbname=regression options='-c\ citus.use_secondary_nodes=never'" SELECT create_distributed_table('dest_table', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('source_table', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -25,7 +25,7 @@ INSERT INTO source_table (a, b) VALUES (10, 10); -- simluate actually having secondary nodes SELECT nodeid, groupid, nodename, nodeport, noderack, isactive, noderole, nodecluster FROM pg_dist_node; nodeid | groupid | nodename | nodeport | noderack | isactive | noderole | nodecluster ---------+---------+-----------+----------+----------+----------+----------+------------- +--------------------------------------------------------------------- 1 | 1 | localhost | 57637 | default | t | primary | default 2 | 2 | localhost | 57638 | default | t | primary | default (2 rows) @@ -39,14 +39,14 @@ DETAIL: citus.use_secondary_nodes is set to 'always' -- router selects are allowed SELECT a FROM dest_table WHERE a = 1 ORDER BY 1; a ---- +--------------------------------------------------------------------- 1 (1 row) -- real-time selects are also allowed SELECT a FROM dest_table ORDER BY 1; a ---- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -71,7 +71,7 @@ DEBUG: generating subplan 4_1 for CTE cte: SELECT DISTINCT dest_table.a FROM pu DEBUG: generating subplan 4_2 for subquery SELECT a FROM (SELECT intermediate_result.a FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) cte ORDER BY a DESC LIMIT 5 DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT a FROM (SELECT intermediate_result.a FROM read_intermediate_result('4_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) foo ORDER BY a a ---- +--------------------------------------------------------------------- (0 rows) SET client_min_messages TO DEFAULT; diff --git a/src/test/regress/expected/multi_real_time_transaction.out b/src/test/regress/expected/multi_real_time_transaction.out index f14d0feea..3c25cb7db 100644 --- a/src/test/regress/expected/multi_real_time_transaction.out +++ b/src/test/regress/expected/multi_real_time_transaction.out @@ -8,7 +8,7 @@ SET citus.shard_replication_factor to 1; CREATE TABLE test_table(id int, col_1 int, col_2 text); SELECT create_distributed_table('test_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -16,7 +16,7 @@ SELECT create_distributed_table('test_table','id'); CREATE TABLE co_test_table(id int, col_1 int, col_2 text); SELECT create_distributed_table('co_test_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -24,7 +24,7 @@ SELECT create_distributed_table('co_test_table','id'); CREATE TABLE ref_test_table(id int, col_1 int, col_2 text); SELECT create_reference_table('ref_test_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -33,14 +33,14 @@ SELECT create_reference_table('ref_test_table'); BEGIN; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 6 (1 row) INSERT INTO test_table VALUES(7,8,'gg'); SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 7 (1 row) @@ -49,14 +49,14 @@ ROLLBACK; BEGIN; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 6 (1 row) INSERT INTO test_table VALUES (7,8,'gg'),(8,9,'hh'),(9,10,'ii'); SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 9 (1 row) @@ -65,14 +65,14 @@ ROLLBACK; BEGIN; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 6 (1 row) INSERT INTO test_table SELECT * FROM co_test_table; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 12 (1 row) @@ -81,14 +81,14 @@ ROLLBACK; BEGIN; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 6 (1 row) \COPY test_table FROM stdin delimiter ','; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 9 (1 row) @@ -97,7 +97,7 @@ ROLLBACK; BEGIN; SELECT SUM(col_1) FROM test_table; sum ------ +--------------------------------------------------------------------- 27 (1 row) @@ -105,7 +105,7 @@ UPDATE test_table SET col_1 = 0 WHERE id = 2; DELETE FROM test_table WHERE id = 3; SELECT SUM(col_1) FROM test_table; sum ------ +--------------------------------------------------------------------- 20 (1 row) @@ -114,14 +114,14 @@ ROLLBACK; BEGIN; SELECT SUM(col_1) FROM test_table; sum ------ +--------------------------------------------------------------------- 27 (1 row) UPDATE test_table SET col_1 = 5; SELECT SUM(col_1) FROM test_table; sum ------ +--------------------------------------------------------------------- 30 (1 row) @@ -130,7 +130,7 @@ ROLLBACK; BEGIN; SELECT SUM(col_1) FROM test_table; sum ------ +--------------------------------------------------------------------- 27 (1 row) @@ -143,7 +143,7 @@ WHERE AND test_table.id = 1; SELECT SUM(col_1) FROM test_table; sum ------ +--------------------------------------------------------------------- 29 (1 row) @@ -161,14 +161,14 @@ SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) BEGIN; SELECT COUNT(*) FROM partitioning_test; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -176,7 +176,7 @@ INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09'); INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); SELECT COUNT(*) FROM partitioning_test; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -187,14 +187,14 @@ BEGIN; CREATE TABLE test_table_inn(id int, num_1 int); SELECT create_distributed_table('test_table_inn','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO test_table_inn VALUES(1,3),(4,5),(6,7); SELECT COUNT(*) FROM test_table_inn; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -204,7 +204,7 @@ COMMIT; BEGIN; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -212,7 +212,7 @@ CREATE INDEX tt_ind_1 ON test_table(col_1); ALTER TABLE test_table ADD CONSTRAINT num_check CHECK (col_1 < 50); SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -220,7 +220,7 @@ ROLLBACK; -- We don't get a distributed transaction id outside a transaction block SELECT (get_current_transaction_id()).transaction_number > 0 FROM test_table LIMIT 1; ?column? ----------- +--------------------------------------------------------------------- f (1 row) @@ -228,7 +228,7 @@ SELECT (get_current_transaction_id()).transaction_number > 0 FROM test_table LIM BEGIN; SELECT (get_current_transaction_id()).transaction_number > 0 FROM test_table LIMIT 1; ?column? ----------- +--------------------------------------------------------------------- t (1 row) @@ -245,7 +245,7 @@ END; $BODY$ LANGUAGE plpgsql; $$); run_command_on_master_and_workers ------------------------------------ +--------------------------------------------------------------------- (1 row) @@ -253,14 +253,14 @@ $$); BEGIN; SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 6 (1 row) -- Sneakily insert directly into shards SELECT insert_row_test(pg_typeof(test_table)::name) FROM test_table; insert_row_test ------------------ +--------------------------------------------------------------------- t t t @@ -271,14 +271,14 @@ SELECT insert_row_test(pg_typeof(test_table)::name) FROM test_table; SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 12 (1 row) ABORT; SELECT count(*) FROM test_table; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -289,7 +289,7 @@ BEGIN; DELETE FROM test_table where id = 1 or id = 3; SELECT * FROM co_test_table; id | col_1 | col_2 -----+-------+-------- +--------------------------------------------------------------------- 2 | 30 | 'bb10' (1 row) @@ -300,7 +300,7 @@ SET client_min_messages TO ERROR; alter system set deadlock_timeout TO '250ms'; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -349,7 +349,7 @@ ROLLBACK; CREATE USER rls_user; SELECT run_command_on_workers('CREATE USER rls_user'); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) @@ -358,14 +358,14 @@ GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user; GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user; SELECT run_command_on_workers('GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user'); run_command_on_workers ---------------------------- +--------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user'); run_command_on_workers ---------------------------- +--------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) @@ -382,7 +382,7 @@ SET search_path = 'multi_real_time_transaction'; -- shouldn't see all rows because of RLS SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -391,7 +391,7 @@ BEGIN; SET LOCAL app.show_rows TO TRUE; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -400,7 +400,7 @@ SET LOCAL citus.propagate_set_commands TO 'local'; SET LOCAL app.show_rows TO TRUE; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -408,14 +408,14 @@ SAVEPOINT disable_rls; SET LOCAL app.show_rows TO FALSE; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 4 (1 row) ROLLBACK TO SAVEPOINT disable_rls; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -424,7 +424,7 @@ SET LOCAL app.show_rows TO FALSE; RELEASE SAVEPOINT disable_rls_for_real; SELECT COUNT(*) FROM test_table; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -437,7 +437,7 @@ SET ROLE rls_user; SET search_path = 'multi_real_time_transaction'; SELECT * FROM co_test_table ORDER BY id, col_1; id | col_1 | col_2 -----+-------+-------- +--------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' 2 | 30 | 'bb10' @@ -451,14 +451,14 @@ SET search_path = 'multi_real_time_transaction'; -- shard xxxxx contains data from tenant id 1 SELECT * FROM co_test_table_1610004 ORDER BY id, col_1; id | col_1 | col_2 -----+-------+-------- +--------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' (2 rows) SELECT * FROM co_test_table_1610006 ORDER BY id, col_1; id | col_1 | col_2 -----+-------+------- +--------------------------------------------------------------------- (0 rows) \c - - - :worker_2_port @@ -466,7 +466,7 @@ SET search_path = 'multi_real_time_transaction'; -- shard xxxxx contains data from tenant id 3 SELECT * FROM co_test_table_1610005 ORDER BY id, col_1; id | col_1 | col_2 -----+-------+-------- +--------------------------------------------------------------------- 3 | 4 | 'cc1' 3 | 5 | 'cc2' 3 | 40 | 'cc10' @@ -475,7 +475,7 @@ SELECT * FROM co_test_table_1610005 ORDER BY id, col_1; -- shard xxxxx contains data from tenant id 2 SELECT * FROM co_test_table_1610007 ORDER BY id, col_1; id | col_1 | col_2 -----+-------+-------- +--------------------------------------------------------------------- 2 | 30 | 'bb10' (1 row) @@ -489,7 +489,7 @@ SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table', $cmd$CREATE POLICY filter_by_tenant_id ON %s TO PUBLIC USING (id = ANY(string_to_array(current_setting('app.tenant_id'), ',')::int[]));$cmd$); run_command_on_shards ------------------------------ +--------------------------------------------------------------------- (1610004,t,"CREATE POLICY") (1610005,t,"CREATE POLICY") (1610006,t,"CREATE POLICY") @@ -502,7 +502,7 @@ ALTER TABLE co_test_table ENABLE ROW LEVEL SECURITY; SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table','ALTER TABLE %s ENABLE ROW LEVEL SECURITY;'); run_command_on_shards ---------------------------- +--------------------------------------------------------------------- (1610004,t,"ALTER TABLE") (1610005,t,"ALTER TABLE") (1610006,t,"ALTER TABLE") @@ -518,7 +518,7 @@ SET LOCAL citus.propagate_set_commands TO 'local'; SET LOCAL app.tenant_id TO 1; SELECT * FROM co_test_table ORDER BY id, col_1; id | col_1 | col_2 -----+-------+-------- +--------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' (2 rows) @@ -527,7 +527,7 @@ SAVEPOINT disable_rls; SET LOCAL app.tenant_id TO 3; SELECT * FROM co_test_table ORDER BY id, col_1; id | col_1 | col_2 -----+-------+-------- +--------------------------------------------------------------------- 3 | 4 | 'cc1' 3 | 5 | 'cc2' 3 | 40 | 'cc10' @@ -536,7 +536,7 @@ SELECT * FROM co_test_table ORDER BY id, col_1; ROLLBACK TO SAVEPOINT disable_rls; SELECT * FROM co_test_table ORDER BY id, col_1; id | col_1 | col_2 -----+-------+-------- +--------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' (2 rows) @@ -546,7 +546,7 @@ SET LOCAL app.tenant_id TO 3; RELEASE SAVEPOINT disable_rls_for_real; SELECT * FROM co_test_table ORDER BY id, col_1; id | col_1 | col_2 -----+-------+-------- +--------------------------------------------------------------------- 3 | 4 | 'cc1' 3 | 5 | 'cc2' 3 | 40 | 'cc10' @@ -558,7 +558,7 @@ RELEASE SAVEPOINT disable_rls; SET LOCAL app.tenant_id TO '1,3'; SELECT * FROM co_test_table ORDER BY id, col_1; id | col_1 | col_2 -----+-------+-------- +--------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' 3 | 4 | 'cc1' @@ -574,7 +574,7 @@ ALTER TABLE co_test_table DISABLE ROW LEVEL SECURITY; SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table','ALTER TABLE %s DISABLE ROW LEVEL SECURITY;'); run_command_on_shards ---------------------------- +--------------------------------------------------------------------- (1610004,t,"ALTER TABLE") (1610005,t,"ALTER TABLE") (1610006,t,"ALTER TABLE") @@ -586,7 +586,7 @@ DROP POLICY filter_by_tenant_id ON co_test_table; SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table', 'DROP POLICY filter_by_tenant_id ON %s;'); run_command_on_shards ---------------------------- +--------------------------------------------------------------------- (1610004,t,"DROP POLICY") (1610005,t,"DROP POLICY") (1610006,t,"DROP POLICY") @@ -599,7 +599,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT id, pg_advisory_lock(15) FROM test_table ORDER BY 1 DESC; id | pg_advisory_lock -----+------------------ +--------------------------------------------------------------------- 6 | 5 | 4 | @@ -613,7 +613,7 @@ SET client_min_messages TO DEFAULT; alter system set deadlock_timeout TO DEFAULT; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -624,7 +624,7 @@ SET citus.select_opens_transaction_block TO off; -- still holds the advisory locks since the sessions are still active SELECT id, pg_advisory_xact_lock(16) FROM test_table ORDER BY id; id | pg_advisory_xact_lock -----+----------------------- +--------------------------------------------------------------------- 1 | 2 | 3 | diff --git a/src/test/regress/expected/multi_reference_table.out b/src/test/regress/expected/multi_reference_table.out index ce740f8b4..23f0c8f10 100644 --- a/src/test/regress/expected/multi_reference_table.out +++ b/src/test/regress/expected/multi_reference_table.out @@ -6,7 +6,7 @@ INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); SELECT create_reference_table('reference_table_test'); NOTICE: Copying data from local table... create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -18,7 +18,7 @@ FROM WHERE logicalrelid = 'reference_table_test'::regclass; partmethod | partkeyisnull | repmodel -------------+---------------+---------- +--------------------------------------------------------------------- n | t | t (1 row) @@ -30,7 +30,7 @@ FROM WHERE logicalrelid = 'reference_table_test'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1250000 | t | t (1 row) @@ -44,14 +44,14 @@ WHERE GROUP BY shardid ORDER BY shardid; shardid | all_placements_healthy | replicated_to_all ----------+------------------------+------------------- +--------------------------------------------------------------------- 1250000 | t | t (1 row) -- check whether data was copied into distributed table SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -69,7 +69,7 @@ SELECT FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 @@ -84,7 +84,7 @@ FROM WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -96,7 +96,7 @@ FROM ORDER BY 2 ASC LIMIT 3; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -111,7 +111,7 @@ WHERE ORDER BY 2 LIMIT 3; value_1 | value_3 ----------+--------- +--------------------------------------------------------------------- 4 | 4 5 | 5 (2 rows) @@ -124,7 +124,7 @@ ORDER BY 2 ASC LIMIT 2; value_1 | ?column? ----------+---------- +--------------------------------------------------------------------- 1 | 15 2 | 30 (2 rows) @@ -136,7 +136,7 @@ FROM ORDER BY 2 ASC LIMIT 2 OFFSET 2; value_1 | ?column? ----------+---------- +--------------------------------------------------------------------- 3 | 45 4 | 60 (2 rows) @@ -148,7 +148,7 @@ FROM WHERE value_2 = 2 OR value_2 = 3; value_2 | value_4 ----------+-------------------------- +--------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -160,7 +160,7 @@ FROM WHERE value_2 = 2 AND value_2 = 3; value_2 | value_4 ----------+--------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -170,7 +170,7 @@ FROM WHERE value_3 = '2' OR value_1 = 3; value_2 | value_4 ----------+-------------------------- +--------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -185,7 +185,7 @@ WHERE ) AND FALSE; value_2 | value_4 ----------+--------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -202,7 +202,7 @@ WHERE ) AND value_1 < 3; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (2 rows) @@ -217,7 +217,7 @@ WHERE '1', '2' ); value_4 --------------------------- +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 (2 rows) @@ -232,7 +232,7 @@ WHERE '5', '2' ); date_part ------------ +--------------------------------------------------------------------- 2 5 (2 rows) @@ -244,7 +244,7 @@ FROM WHERE value_2 <= 2 AND value_2 >= 4; value_4 ---------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -254,7 +254,7 @@ FROM WHERE value_2 <= 20 AND value_2 >= 4; value_4 --------------------------- +--------------------------------------------------------------------- Sun Dec 04 00:00:00 2016 Mon Dec 05 00:00:00 2016 (2 rows) @@ -266,7 +266,7 @@ FROM WHERE value_2 >= 5 AND value_2 <= random(); value_4 ---------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -276,7 +276,7 @@ FROM WHERE value_4 BETWEEN '2016-12-01' AND '2016-12-03'; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -289,7 +289,7 @@ FROM WHERE FALSE; value_1 ---------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -299,7 +299,7 @@ FROM WHERE int4eq(1, 2); value_1 ---------- +--------------------------------------------------------------------- (0 rows) -- rename output name and do some operations @@ -308,7 +308,7 @@ SELECT FROM reference_table_test; id | age -----+----- +--------------------------------------------------------------------- 1 | 15 2 | 30 3 | 45 @@ -323,7 +323,7 @@ SELECT FROM some_data; value_2 | value_4 ----------+-------------------------- +--------------------------------------------------------------------- 3 | Sat Dec 03 00:00:00 2016 4 | Sun Dec 04 00:00:00 2016 5 | Mon Dec 05 00:00:00 2016 @@ -333,7 +333,7 @@ FROM WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -345,7 +345,7 @@ FROM WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 | position ----------+---------+---------+--------------------------+---------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 (1 row) @@ -356,7 +356,7 @@ FROM WHERE value_1 = 1 OR value_1 = 2; value_1 | value_2 | value_3 | value_4 | position ----------+---------+---------+--------------------------+---------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 | 3 (2 rows) @@ -369,7 +369,7 @@ SELECT * FROM ( ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 (2 rows) @@ -381,7 +381,7 @@ SELECT * FROM ( ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -392,7 +392,7 @@ SELECT * FROM ( ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- +--------------------------------------------------------------------- (0 rows) -- to make the tests more interested for aggregation tests, ingest some more data @@ -411,7 +411,7 @@ HAVING ORDER BY 1; value_4 | sum ---------------------------+----- +--------------------------------------------------------------------- Fri Dec 02 00:00:00 2016 | 4 Sat Dec 03 00:00:00 2016 | 6 Sun Dec 04 00:00:00 2016 | 4 @@ -428,7 +428,7 @@ GROUP BY GROUPING sets ((value_4), (value_3)) ORDER BY 1, 2, 3; value_4 | value_3 | sum ---------------------------+---------+----- +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | | 2 Fri Dec 02 00:00:00 2016 | | 4 Sat Dec 03 00:00:00 2016 | | 6 @@ -449,7 +449,7 @@ FROM ORDER BY 1; value_4 --------------------------- +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 Sat Dec 03 00:00:00 2016 @@ -463,7 +463,7 @@ SELECT FROM reference_table_test; value_4 | rank ---------------------------+------ +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1 Thu Dec 01 00:00:00 2016 | 1 Fri Dec 02 00:00:00 2016 | 1 @@ -480,7 +480,7 @@ SELECT FROM reference_table_test; value_4 | avg ---------------------------+------------------------ +--------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Fri Dec 02 00:00:00 2016 | 2.0000000000000000 @@ -503,7 +503,7 @@ SELECT FROM reference_table_test; c ---- +--------------------------------------------------------------------- 3 (1 row) @@ -524,7 +524,7 @@ SELECT ORDER BY 1; value_1 | c ----------+--- +--------------------------------------------------------------------- 1 | 0 2 | 0 3 | 1 @@ -536,7 +536,7 @@ SELECT BEGIN; SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 @@ -549,7 +549,7 @@ SELECT * FROM reference_table_test; SELECT * FROM reference_table_test WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (2 rows) @@ -564,13 +564,13 @@ DECLARE test_cursor CURSOR FOR ORDER BY value_1; FETCH test_cursor; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) FETCH ALL test_cursor; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 @@ -578,12 +578,12 @@ FETCH ALL test_cursor; FETCH test_cursor; -- fetch one row after the last value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) @@ -598,14 +598,14 @@ CREATE TEMP TABLE temp_reference_test as CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_second'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_third'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -625,7 +625,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -640,7 +640,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 4 5 (2 rows) @@ -654,7 +654,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- (0 rows) -- join on different columns and different data types via casts @@ -667,7 +667,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -682,7 +682,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -697,7 +697,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -714,7 +714,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 3 (1 row) @@ -728,7 +728,7 @@ WHERE ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 3 (1 row) @@ -741,7 +741,7 @@ FROM ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 3 (1 row) @@ -754,7 +754,7 @@ FROM ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 1 2 3 @@ -770,7 +770,7 @@ FROM ORDER BY 1; value_1 ---------- +--------------------------------------------------------------------- 3 (2 rows) @@ -779,7 +779,7 @@ ORDER BY CREATE TABLE reference_table_test_fourth (value_1 int, value_2 float PRIMARY KEY, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_fourth'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -796,14 +796,14 @@ ERROR: null value in column "value_2" violates not-null constraint -- lets run some upserts INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '1', '2016-12-01') ON CONFLICT DO NOTHING RETURNING *; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- +--------------------------------------------------------------------- (0 rows) INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON CONFLICT (value_2) DO UPDATE SET value_3 = EXCLUDED.value_3, value_2 = EXCLUDED.value_2 RETURNING *; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 10 | Thu Dec 01 00:00:00 2016 (1 row) @@ -812,7 +812,7 @@ INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON C UPDATE SET value_3 = EXCLUDED.value_3 || '+10', value_2 = EXCLUDED.value_2 + 10, value_1 = EXCLUDED.value_1 + 10, value_4 = '2016-12-10' RETURNING *; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 11 | 11 | 10+10 | Sat Dec 10 00:00:00 2016 (1 row) @@ -826,7 +826,7 @@ WHERE GROUP BY shardid ORDER BY shardid; shardid | all_placements_healthy | replicated_to_all ----------+------------------------+------------------- +--------------------------------------------------------------------- 1250003 | t | t (1 row) @@ -837,7 +837,7 @@ WHERE value_1 = 1 RETURNING *; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (2 rows) @@ -848,7 +848,7 @@ WHERE value_4 = '2016-12-05' RETURNING *; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 5 | 5 | 5 | Mon Dec 05 00:00:00 2016 (1 row) @@ -860,7 +860,7 @@ WHERE value_2 = 2 RETURNING *; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 2 | 15 | 2 | Fri Dec 02 00:00:00 2016 2 | 15 | 2 | Fri Dec 02 00:00:00 2016 (2 rows) @@ -872,7 +872,7 @@ SET value_2 = 15, value_1 = 45 RETURNING *; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 3 | Sat Dec 03 00:00:00 2016 @@ -884,7 +884,7 @@ DELETE FROM reference_table_test RETURNING *; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 3 | Sat Dec 03 00:00:00 2016 @@ -896,7 +896,7 @@ RETURNING *; CREATE TABLE reference_table_test_fifth (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_fifth'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -906,7 +906,7 @@ INSERT INTO reference_table_test_fifth (value_2) VALUES (2) RETURNING value_1, value_2; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 2 (1 row) @@ -914,7 +914,7 @@ INSERT INTO reference_table_test_fifth (value_2) VALUES (2) RETURNING value_1, value_2; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 2 | 2 (1 row) @@ -922,7 +922,7 @@ INSERT INTO reference_table_test_fifth (value_2, value_3) VALUES (nextval('example_ref_value_seq'), nextval('example_ref_value_seq')::text) RETURNING value_1, value_2, value_3; value_1 | value_2 | value_3 ----------+---------+--------- +--------------------------------------------------------------------- 3 | 1 | 2 (1 row) @@ -932,7 +932,7 @@ WHERE value_1 = 1 RETURNING value_1, value_2, value_4 > '2000-01-01'; value_1 | value_2 | ?column? ----------+---------+---------- +--------------------------------------------------------------------- 1 | 2 | t (1 row) @@ -960,7 +960,7 @@ INSERT INTO reference_table_test RETURNING *; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Fri Jan 01 00:00:00 2016 | 2 | 2 | Sat Jan 02 00:00:00 2016 | | 3 | @@ -975,7 +975,7 @@ INSERT INTO reference_table_test JOIN reference_table_test_second USING (value_1) RETURNING *; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- +--------------------------------------------------------------------- | 1 | | (1 row) @@ -984,14 +984,14 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test', 'value_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test_2', 'value_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1012,7 +1012,7 @@ WHERE colocated_table_test.value_1 = reference_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] value_1 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1025,7 +1025,7 @@ WHERE colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] value_2 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1038,7 +1038,7 @@ WHERE reference_table_test.value_1 = colocated_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] value_2 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1053,7 +1053,7 @@ WHERE ORDER BY colocated_table_test.value_2; LOG: join order: [ "colocated_table_test_2" ][ cartesian product reference join "reference_table_test" ][ dual partition join "colocated_table_test" ] value_2 ---------- +--------------------------------------------------------------------- 1 1 2 @@ -1069,7 +1069,7 @@ WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ local partition join "colocated_table_test_2" ] value_2 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1083,7 +1083,7 @@ WHERE colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] value_2 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1096,7 +1096,7 @@ WHERE colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] value_2 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1139,7 +1139,7 @@ WHERE colocated_table_test_2.value_4 = reference_table_test.value_4 RETURNING value_1, value_2; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -1155,7 +1155,7 @@ WHERE colocated_table_test_2.value_1 > reference_table_test.value_2 RETURNING value_1, value_2; value_1 | value_2 ----------+--------- +--------------------------------------------------------------------- 2 | 1 (1 row) @@ -1191,7 +1191,7 @@ DETAIL: Replication models don't match for colocated_table_test_2 and reference -- should work sliently SELECT mark_tables_colocated('reference_table_test', ARRAY['reference_table_test_fifth']); mark_tables_colocated ------------------------ +--------------------------------------------------------------------- (1 row) @@ -1202,7 +1202,7 @@ CREATE SCHEMA reference_schema; CREATE TABLE reference_schema.reference_table_test_sixth (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_schema.reference_table_test_sixth'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1211,7 +1211,7 @@ SET search_path TO 'reference_schema'; CREATE TABLE reference_table_test_seventh (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_seventh'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1228,7 +1228,7 @@ SELECT FROM reference_schema.reference_table_test_sixth; value_1 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1241,7 +1241,7 @@ FROM WHERE reference_table_test_sixth.value_4 = reference_table_test_seventh.value_4; value_1 ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -1255,7 +1255,7 @@ FROM WHERE colocated_table_test_2.value_4 = reftable.value_4; value_2 | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -1273,7 +1273,7 @@ SELECT FROM reference_table_test; count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -1284,7 +1284,7 @@ SELECT FROM reference_table_test; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1292,32 +1292,32 @@ FROM -- and check the metadata SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; logicalrelid ----------------------------- +--------------------------------------------------------------------- reference_table_test_fifth (1 row) SELECT logicalrelid FROM pg_dist_shard WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; logicalrelid ----------------------------- +--------------------------------------------------------------------- reference_table_test_fifth (1 row) DROP TABLE reference_table_test_fifth; SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; logicalrelid --------------- +--------------------------------------------------------------------- (0 rows) SELECT logicalrelid FROM pg_dist_shard WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; logicalrelid --------------- +--------------------------------------------------------------------- (0 rows) -- now test DDL changes CREATE TABLE reference_schema.reference_table_ddl (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_schema.reference_table_ddl'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1335,7 +1335,7 @@ ALTER TABLE reference_schema.reference_table_ddl ALTER COLUMN value_3 SET NOT NU -- see that Citus applied all DDLs to the table SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl'::regclass; Column | Type | Modifiers ----------+-----------------------------+-------------- +--------------------------------------------------------------------- value_2 | double precision | default 25.0 value_3 | text | not null value_4 | timestamp without time zone | @@ -1345,7 +1345,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_sche SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'reference_schema.reference_index_2'::regclass; Column | Type | Definition ----------+------------------+------------ +--------------------------------------------------------------------- value_2 | double precision | value_2 value_3 | text | value_3 (2 rows) @@ -1354,7 +1354,7 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl_1250019'::regclass; Column | Type | Modifiers ----------+-----------------------------+-------------- +--------------------------------------------------------------------- value_2 | double precision | default 25.0 value_3 | text | not null value_4 | timestamp without time zone | @@ -1364,7 +1364,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_sche SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'reference_schema.reference_index_2_1250019'::regclass; Column | Type | Definition ----------+------------------+------------ +--------------------------------------------------------------------- value_2 | double precision | value_2 value_3 | text | value_3 (2 rows) @@ -1374,7 +1374,7 @@ DROP INDEX reference_schema.reference_index_2; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl_1250019'::regclass; Column | Type | Modifiers ----------+-----------------------------+-------------- +--------------------------------------------------------------------- value_2 | double precision | default 25.0 value_3 | text | not null value_4 | timestamp without time zone | @@ -1384,7 +1384,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_sche \di reference_schema.reference_index_2* List of relations Schema | Name | Type | Owner | Table ---------+------+------+-------+------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -1405,14 +1405,14 @@ SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('reference_schema.reference_table_ddl'); part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy --------------------+----------+--------------------+---------------+----------------------- +--------------------------------------------------------------------- t | | 2 | 1536000 | 2 (1 row) SELECT shardid AS a_shard_id FROM pg_dist_shard WHERE logicalrelid = 'reference_schema.reference_table_ddl'::regclass \gset SELECT master_update_shard_statistics(:a_shard_id); master_update_shard_statistics --------------------------------- +--------------------------------------------------------------------- 8192 (1 row) @@ -1422,7 +1422,7 @@ ERROR: cannot append to shardId 1250019 DETAIL: We currently don't support appending to shards in hash-partitioned or reference tables SELECT master_get_table_ddl_events('reference_schema.reference_table_ddl'); master_get_table_ddl_events ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- CREATE TABLE reference_schema.reference_table_ddl (value_2 double precision DEFAULT 25.0, value_3 text NOT NULL, value_4 timestamp without time zone, value_5 double precision) ALTER TABLE reference_schema.reference_table_ddl OWNER TO postgres (2 rows) @@ -1434,13 +1434,13 @@ SELECT placementid AS b_placement_id FROM pg_dist_shard_placement WHERE shardid UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE placementid = :a_placement_id; SELECT master_copy_shard_placement(:a_shard_id, 'localhost', :worker_2_port, 'localhost', :worker_1_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT shardid, shardstate FROM pg_dist_shard_placement WHERE placementid = :a_placement_id; shardid | shardstate ----------+------------ +--------------------------------------------------------------------- 1250019 | 1 (1 row) @@ -1458,49 +1458,49 @@ RETURNS void AS ' TRUNCATE reference_table_test; SELECT select_count_all(); select_count_all ------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT insert_into_ref_table(1, 1.0, '1', '2016-12-01'); insert_into_ref_table ------------------------ +--------------------------------------------------------------------- (1 row) SELECT insert_into_ref_table(2, 2.0, '2', '2016-12-02'); insert_into_ref_table ------------------------ +--------------------------------------------------------------------- (1 row) SELECT insert_into_ref_table(3, 3.0, '3', '2016-12-03'); insert_into_ref_table ------------------------ +--------------------------------------------------------------------- (1 row) SELECT insert_into_ref_table(4, 4.0, '4', '2016-12-04'); insert_into_ref_table ------------------------ +--------------------------------------------------------------------- (1 row) SELECT insert_into_ref_table(5, 5.0, '5', '2016-12-05'); insert_into_ref_table ------------------------ +--------------------------------------------------------------------- (1 row) SELECT insert_into_ref_table(6, 6.0, '6', '2016-12-06'); insert_into_ref_table ------------------------ +--------------------------------------------------------------------- (1 row) SELECT select_count_all(); select_count_all ------------------- +--------------------------------------------------------------------- 6 (1 row) @@ -1518,7 +1518,7 @@ EXECUTE insert_into_ref_table_pr(6, 6.0, '6', '2016-12-06'); -- see the count, then truncate the table SELECT select_count_all(); select_count_all ------------------- +--------------------------------------------------------------------- 6 (1 row) @@ -1531,7 +1531,7 @@ CREATE TYPE reference_comp_key as (key text, value text); CREATE TABLE reference_table_composite (id int PRIMARY KEY, data reference_comp_key); SELECT create_reference_table('reference_table_composite'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1540,14 +1540,14 @@ INSERT INTO reference_table_composite (id, data) VALUES (1, ('key_1', 'value_1') INSERT INTO reference_table_composite (id, data) VALUES (2, ('key_2', 'value_2')::reference_comp_key); SELECT * FROM reference_table_composite; id | data -----+----------------- +--------------------------------------------------------------------- 1 | (key_1,value_1) 2 | (key_2,value_2) (2 rows) SELECT (data).key FROM reference_table_composite; key -------- +--------------------------------------------------------------------- key_1 key_2 (2 rows) @@ -1558,14 +1558,14 @@ BEGIN; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) ROLLBACK; SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+--------- +--------------------------------------------------------------------- (0 rows) -- now insert a row and commit @@ -1574,7 +1574,7 @@ INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); COMMIT; SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) @@ -1584,7 +1584,7 @@ UPDATE reference_table_test SET value_1 = 10 WHERE value_1 = 2; COMMIT; SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ----------+---------+---------+-------------------------- +--------------------------------------------------------------------- 10 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) diff --git a/src/test/regress/expected/multi_remove_node_reference_table.out b/src/test/regress/expected/multi_remove_node_reference_table.out index cb72249f4..28f04bb92 100644 --- a/src/test/regress/expected/multi_remove_node_reference_table.out +++ b/src/test/regress/expected/multi_remove_node_reference_table.out @@ -12,7 +12,7 @@ DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; -- make worker 1 receive metadata changes SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -23,20 +23,20 @@ ERROR: node at "localhost:xxxxx" does not exist -- verify node exist before removal SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) -- verify node is removed SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -46,7 +46,7 @@ SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeid=:worker_2_nodeid -- add a secondary to check we don't attempt to replicate the table to it SELECT 1 FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -54,72 +54,72 @@ SELECT 1 FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noder CREATE TABLE remove_node_reference_table(column1 int); SELECT create_reference_table('remove_node_reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) -- make sure when we add a secondary we don't attempt to add placements to it SELECT 1 FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; count -------- +--------------------------------------------------------------------- 1 (1 row) -- make sure when we disable a secondary we don't remove any placements SELECT master_disable_node('localhost', 9001); master_disable_node ---------------------- +--------------------------------------------------------------------- (1 row) SELECT isactive FROM pg_dist_node WHERE nodeport = 9001; isactive ----------- +--------------------------------------------------------------------- f (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; count -------- +--------------------------------------------------------------------- 1 (1 row) -- make sure when we activate a secondary we don't add any placements SELECT 1 FROM master_activate_node('localhost', 9001); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; count -------- +--------------------------------------------------------------------- 1 (1 row) -- make sure when we remove a secondary we don't remove any placements SELECT master_remove_node('localhost', 9001); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; count -------- +--------------------------------------------------------------------- 1 (1 row) -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -130,7 +130,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -141,14 +141,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -159,21 +159,21 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -184,7 +184,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -194,14 +194,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -212,7 +212,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -223,20 +223,20 @@ ERROR: node at "localhost:xxxxx" does not exist SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- try to disable the node before removing it (this used to crash) SELECT master_disable_node('localhost', :worker_2_port); master_disable_node ---------------------- +--------------------------------------------------------------------- (1 row) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -244,7 +244,7 @@ SELECT master_remove_node('localhost', :worker_2_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -252,7 +252,7 @@ NOTICE: Replicating reference table "remove_node_reference_table" to the node l -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -263,7 +263,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -274,14 +274,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -292,7 +292,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -300,7 +300,7 @@ WHERE BEGIN; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -308,7 +308,7 @@ ROLLBACK; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -319,7 +319,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -330,14 +330,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -348,7 +348,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -357,7 +357,7 @@ WHERE -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -368,7 +368,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -379,14 +379,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -397,7 +397,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -405,7 +405,7 @@ WHERE BEGIN; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -413,7 +413,7 @@ COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -424,7 +424,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -434,14 +434,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -452,7 +452,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -460,7 +460,7 @@ WHERE SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -468,7 +468,7 @@ NOTICE: Replicating reference table "remove_node_reference_table" to the node l -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -479,7 +479,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -490,14 +490,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -508,7 +508,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -517,7 +517,7 @@ BEGIN; INSERT INTO remove_node_reference_table VALUES(1); SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -525,7 +525,7 @@ COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -536,7 +536,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -546,21 +546,21 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) --verify the data is inserted SELECT * FROM remove_node_reference_table; column1 ---------- +--------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -571,12 +571,12 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM remove_node_reference_table; column1 ---------- +--------------------------------------------------------------------- 1 (1 row) @@ -585,7 +585,7 @@ SELECT * FROM remove_node_reference_table; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -593,7 +593,7 @@ NOTICE: Replicating reference table "remove_node_reference_table" to the node l -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -604,7 +604,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -615,14 +615,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -633,7 +633,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -642,7 +642,7 @@ BEGIN; ALTER TABLE remove_node_reference_table ADD column2 int; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -650,7 +650,7 @@ COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -661,7 +661,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -671,14 +671,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -689,7 +689,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -697,7 +697,7 @@ SET citus.next_shard_id TO 1380001; -- verify table structure is changed SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_node_reference_table'::regclass; Column | Type | Modifiers ----------+---------+----------- +--------------------------------------------------------------------- column1 | integer | column2 | integer | (2 rows) @@ -706,7 +706,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_ SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -714,7 +714,7 @@ NOTICE: Replicating reference table "remove_node_reference_table" to the node l -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -725,7 +725,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -736,14 +736,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) BEGIN; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -752,7 +752,7 @@ COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -763,18 +763,18 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid = 1380000; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- (0 rows) -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -782,7 +782,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); CREATE TABLE remove_node_reference_table(column1 int); SELECT create_reference_table('remove_node_reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -791,14 +791,14 @@ CREATE SCHEMA remove_node_reference_table_schema; CREATE TABLE remove_node_reference_table_schema.table1(column1 int); SELECT create_reference_table('remove_node_reference_table_schema.table1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -811,7 +811,7 @@ WHERE ORDER BY shardid; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) @@ -823,14 +823,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -843,7 +843,7 @@ WHERE ORDER BY shardid; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) @@ -851,14 +851,14 @@ ORDER BY \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -869,7 +869,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -879,14 +879,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -897,7 +897,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -906,7 +906,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx NOTICE: Replicating reference table "table1" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -914,7 +914,7 @@ NOTICE: Replicating reference table "table1" to the node localhost:xxxxx -- status before master_disable_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -927,7 +927,7 @@ WHERE ORDER BY shardid; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) @@ -939,14 +939,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -958,7 +958,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid ASC; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) @@ -966,14 +966,14 @@ ORDER BY shardid ASC; \c - - - :master_port SELECT master_disable_node('localhost', :worker_2_port); master_disable_node ---------------------- +--------------------------------------------------------------------- (1 row) -- status after master_disable_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -984,7 +984,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -994,14 +994,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -1012,7 +1012,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -1021,7 +1021,7 @@ SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx NOTICE: Replicating reference table "table1" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -1031,7 +1031,7 @@ DROP TABLE remove_node_reference_table_schema.table1; DROP SCHEMA remove_node_reference_table_schema CASCADE; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_repair_shards.out b/src/test/regress/expected/multi_repair_shards.out index 17a9c45eb..93926ec0c 100644 --- a/src/test/regress/expected/multi_repair_shards.out +++ b/src/test/regress/expected/multi_repair_shards.out @@ -16,7 +16,7 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('customer_engagements', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -51,7 +51,7 @@ ROLLBACK; BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -60,7 +60,7 @@ ROLLBACK; BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -77,7 +77,7 @@ ERROR: source placement must be in finalized state -- "copy" this shard from the first placement to the second one SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -86,7 +86,7 @@ UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid AND grou -- get the data from the second placement SELECT * FROM customer_engagements; id | created_at | event_data -----+------------+-------------- +--------------------------------------------------------------------- 1 | 01-01-2015 | first event 2 | 02-01-2015 | second event 1 | 03-01-2015 | third event @@ -105,7 +105,7 @@ SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('remote_engagements', 'id', 'hash'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_repartition_join_planning.out b/src/test/regress/expected/multi_repartition_join_planning.out index f6f49cdf2..8e403d670 100644 --- a/src/test/regress/expected/multi_repartition_join_planning.out +++ b/src/test/regress/expected/multi_repartition_join_planning.out @@ -29,13 +29,13 @@ CREATE TABLE stock ( ); SELECT create_distributed_table('order_line','ol_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('stock','s_w_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -113,7 +113,7 @@ DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 1 DEBUG: completed cleanup query for job 1 l_partkey | o_orderkey | count ------------+------------+------- +--------------------------------------------------------------------- 18 | 12005 | 1 79 | 5121 | 1 91 | 2883 | 1 @@ -216,7 +216,7 @@ DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 5 DEBUG: completed cleanup query for job 5 l_partkey | o_orderkey | count ------------+------------+------- +--------------------------------------------------------------------- (0 rows) -- Check that grouping by primary key allows o_shippriority to be in the target list @@ -290,7 +290,7 @@ DEBUG: completed cleanup query for job 7 DEBUG: completed cleanup query for job 8 DEBUG: completed cleanup query for job 8 o_orderkey | o_shippriority | count -------------+----------------+------- +--------------------------------------------------------------------- (0 rows) -- Check that grouping by primary key allows o_shippriority to be in the target @@ -366,7 +366,7 @@ DEBUG: completed cleanup query for job 10 DEBUG: completed cleanup query for job 11 DEBUG: completed cleanup query for job 11 o_orderkey | o_shippriority | count -------------+----------------+------- +--------------------------------------------------------------------- (0 rows) -- Check that calling any_value manually works as well @@ -440,7 +440,7 @@ DEBUG: completed cleanup query for job 13 DEBUG: completed cleanup query for job 14 DEBUG: completed cleanup query for job 14 o_orderkey | any_value -------------+----------- +--------------------------------------------------------------------- (0 rows) -- Check that grouping by primary key allows s_quantity to be in the having @@ -524,7 +524,7 @@ DEBUG: completed cleanup query for job 16 DEBUG: completed cleanup query for job 17 DEBUG: completed cleanup query for job 17 s_i_id --------- +--------------------------------------------------------------------- (0 rows) -- Reset client logging level to its previous value diff --git a/src/test/regress/expected/multi_repartition_join_pruning.out b/src/test/regress/expected/multi_repartition_join_pruning.out index aab563930..427754fa4 100644 --- a/src/test/regress/expected/multi_repartition_join_pruning.out +++ b/src/test/regress/expected/multi_repartition_join_pruning.out @@ -29,7 +29,7 @@ DETAIL: Creating dependency on merge taskId 6 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 9 QUERY PLAN -------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 3 @@ -59,7 +59,7 @@ DETAIL: Creating dependency on merge taskId 6 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 9 count -------- +--------------------------------------------------------------------- 2985 (1 row) @@ -75,7 +75,7 @@ WHERE o_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN -------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 0 @@ -94,7 +94,7 @@ WHERE o_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -110,7 +110,7 @@ WHERE c_custkey < 0; DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN -------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 0 @@ -129,7 +129,7 @@ WHERE c_custkey < 0; DEBUG: Router planner does not support append-partitioned tables. count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -173,7 +173,7 @@ DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 16 QUERY PLAN -------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 4 @@ -222,7 +222,7 @@ DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 16 count -------- +--------------------------------------------------------------------- 125 (1 row) @@ -238,7 +238,7 @@ WHERE l_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN -------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 0 @@ -260,7 +260,7 @@ WHERE l_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -274,7 +274,7 @@ WHERE false; DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN -------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: None, not supported for re-partition queries @@ -292,7 +292,7 @@ WHERE false; DEBUG: Router planner does not support append-partitioned tables. o_orderkey ------------- +--------------------------------------------------------------------- (0 rows) EXPLAIN (COSTS OFF) @@ -304,7 +304,7 @@ WHERE 1=0 AND c_custkey < 0; DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN -------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: None, not supported for re-partition queries @@ -320,7 +320,7 @@ FROM orders INNER JOIN customer_append ON (o_custkey = c_custkey AND false); DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN ----------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: All @@ -335,7 +335,7 @@ WHERE o_custkey = c_custkey AND false; DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN ----------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: All diff --git a/src/test/regress/expected/multi_repartition_join_ref.out b/src/test/regress/expected/multi_repartition_join_ref.out index b0fc05bda..85e101dc9 100644 --- a/src/test/regress/expected/multi_repartition_join_ref.out +++ b/src/test/regress/expected/multi_repartition_join_ref.out @@ -16,7 +16,7 @@ LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 l_partkey | l_suppkey | count ------------+-----------+------- +--------------------------------------------------------------------- 195 | 196 | 804 245 | 246 | 754 278 | 279 | 721 @@ -44,7 +44,7 @@ LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 l_partkey | l_suppkey | count ------------+-----------+------- +--------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 278 | 279 | 1 @@ -72,7 +72,7 @@ LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 l_partkey | l_suppkey | count ------------+-----------+------- +--------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 278 | 279 | 1 @@ -99,7 +99,7 @@ LIMIT 10; LOG: join order: [ "lineitem" ][ single range partition join "part_append" ][ cartesian product reference join "supplier" ] DEBUG: push down of limit count: 10 l_partkey | l_suppkey | count ------------+-----------+------- +--------------------------------------------------------------------- 18 | 7519 | 1000 79 | 7580 | 1000 91 | 2592 | 1000 @@ -127,7 +127,7 @@ LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 l_partkey | l_suppkey | count ------------+-----------+------- +--------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 278 | 279 | 1 @@ -155,7 +155,7 @@ LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 l_partkey | l_suppkey | count ------------+-----------+------- +--------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 278 | 279 | 1 @@ -183,7 +183,7 @@ LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 l_partkey | l_suppkey | count ------------+-----------+------- +--------------------------------------------------------------------- 18 | 7519 | 1 79 | 7580 | 1 91 | 2592 | 1 @@ -211,7 +211,7 @@ LIMIT 10; LOG: join order: [ "lineitem" ][ single range partition join "part_append" ][ reference join "supplier" ] DEBUG: push down of limit count: 10 l_partkey | l_suppkey | count ------------+-----------+------- +--------------------------------------------------------------------- 18 | 7519 | 1 79 | 7580 | 1 91 | 2592 | 1 diff --git a/src/test/regress/expected/multi_repartition_join_task_assignment.out b/src/test/regress/expected/multi_repartition_join_task_assignment.out index f904d738b..eeca76778 100644 --- a/src/test/regress/expected/multi_repartition_join_task_assignment.out +++ b/src/test/regress/expected/multi_repartition_join_task_assignment.out @@ -36,7 +36,7 @@ DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx count -------- +--------------------------------------------------------------------- 2985 (1 row) @@ -64,7 +64,7 @@ DETAIL: Creating dependency on merge taskId 8 DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx count -------- +--------------------------------------------------------------------- 12000 (1 row) @@ -115,7 +115,7 @@ DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx count -------- +--------------------------------------------------------------------- 125 (1 row) diff --git a/src/test/regress/expected/multi_repartition_udt.out b/src/test/regress/expected/multi_repartition_udt.out index 5e6842292..96ce66c26 100644 --- a/src/test/regress/expected/multi_repartition_udt.out +++ b/src/test/regress/expected/multi_repartition_udt.out @@ -127,14 +127,14 @@ SET citus.shard_count TO 3; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('repartition_udt', 'pk', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SET citus.shard_count TO 5; SELECT create_distributed_table('repartition_udt_other', 'pk', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -158,7 +158,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk = 1; pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- +--------------------------------------------------------------------- (0 rows) -- Query that should result in a repartition join on UDT column. @@ -169,7 +169,7 @@ EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other WHERE repartition_udt.pk > 1; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] QUERY PLAN --------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 Tasks Shown: None, not supported for re-partition queries @@ -187,7 +187,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol -----+--------+--------+----+--------+-------- +--------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo diff --git a/src/test/regress/expected/multi_repartitioned_subquery_udf.out b/src/test/regress/expected/multi_repartitioned_subquery_udf.out index a65177441..493915991 100644 --- a/src/test/regress/expected/multi_repartitioned_subquery_udf.out +++ b/src/test/regress/expected/multi_repartitioned_subquery_udf.out @@ -40,6 +40,6 @@ SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*) FROM lineitem GROUP BY l_partkey) AS a WHERE median > 2; median | count ---------+------- +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out index 306d84c07..17ea5c3fa 100644 --- a/src/test/regress/expected/multi_replicate_reference_table.out +++ b/src/test/regress/expected/multi_replicate_reference_table.out @@ -11,7 +11,7 @@ CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -19,20 +19,20 @@ SELECT master_remove_node('localhost', :worker_2_port); -- verify there is no node with nodeport = :worker_2_port before adding the node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- verify node is added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -44,27 +44,27 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) -- test adding new node with a reference table which does not have any healthy placement SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) -- verify there is no node with nodeport = :worker_2_port before adding the node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) CREATE TABLE replicate_reference_table_unhealthy(column1 int); SELECT create_reference_table('replicate_reference_table_unhealthy'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -74,7 +74,7 @@ ERROR: could not find any healthy placement for shard xxxxx -- verify node is not added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -86,7 +86,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) DROP TABLE replicate_reference_table_unhealthy; @@ -94,7 +94,7 @@ DROP TABLE replicate_reference_table_unhealthy; CREATE TABLE replicate_reference_table_valid(column1 int); SELECT create_reference_table('replicate_reference_table_valid'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -107,7 +107,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -117,14 +117,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_valid" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -137,7 +137,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1370001 | 1 | 0 | localhost | 57638 (1 row) @@ -148,7 +148,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -162,7 +162,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1370001 | 1 | 0 | localhost | 57638 (1 row) @@ -173,13 +173,13 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -192,7 +192,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1370001 | 1 | 0 | localhost | 57638 (1 row) @@ -203,7 +203,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -211,14 +211,14 @@ DROP TABLE replicate_reference_table_valid; -- test replicating a reference table when a new node added in TRANSACTION + ROLLBACK SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE replicate_reference_table_rollback(column1 int); SELECT create_reference_table('replicate_reference_table_rollback'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -231,7 +231,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -241,7 +241,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -249,7 +249,7 @@ BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_rollback" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -263,7 +263,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -273,7 +273,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -282,7 +282,7 @@ DROP TABLE replicate_reference_table_rollback; CREATE TABLE replicate_reference_table_commit(column1 int); SELECT create_reference_table('replicate_reference_table_commit'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -295,7 +295,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -305,7 +305,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_commit'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -313,7 +313,7 @@ BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_commit" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -327,7 +327,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1370003 | 1 | 0 | localhost | 57638 (1 row) @@ -338,7 +338,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_commit'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -346,14 +346,14 @@ DROP TABLE replicate_reference_table_commit; -- test adding new node + upgrading another hash distributed table to reference table + creating new reference table in TRANSACTION SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE replicate_reference_table_reference_one(column1 int); SELECT create_reference_table('replicate_reference_table_reference_one'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -363,7 +363,7 @@ SET citus.replication_model TO 'streaming'; CREATE TABLE replicate_reference_table_hash(column1 int); SELECT create_distributed_table('replicate_reference_table_hash', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -378,7 +378,7 @@ FROM WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -388,7 +388,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_reference_one'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -401,7 +401,7 @@ WHERE logicalrelid IN ('replicate_reference_table_reference_one', 'replicate_reference_table_hash', 'replicate_reference_table_reference_two') ORDER BY logicalrelid; logicalrelid | partmethod | ?column? | repmodel ------------------------------------------+------------+----------+---------- +--------------------------------------------------------------------- replicate_reference_table_reference_one | n | t | t replicate_reference_table_hash | h | f | c (2 rows) @@ -410,19 +410,19 @@ BEGIN; SET LOCAL client_min_messages TO ERROR; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT upgrade_to_reference_table('replicate_reference_table_hash'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('replicate_reference_table_reference_two'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -436,7 +436,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1370004 | 1 | 0 | localhost | 57638 1370005 | 1 | 0 | localhost | 57638 1370006 | 1 | 0 | localhost | 57638 @@ -449,7 +449,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_reference_one'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -462,7 +462,7 @@ WHERE ORDER BY logicalrelid; logicalrelid | partmethod | ?column? | repmodel ------------------------------------------+------------+----------+---------- +--------------------------------------------------------------------- replicate_reference_table_reference_one | n | t | t replicate_reference_table_hash | n | t | t replicate_reference_table_reference_two | n | t | t @@ -474,14 +474,14 @@ DROP TABLE replicate_reference_table_reference_two; -- test inserting a value then adding a new node in a transaction SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE replicate_reference_table_insert(column1 int); SELECT create_reference_table('replicate_reference_table_insert'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -495,7 +495,7 @@ DROP TABLE replicate_reference_table_insert; CREATE TABLE replicate_reference_table_copy(column1 int); SELECT create_reference_table('replicate_reference_table_copy'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -509,7 +509,7 @@ DROP TABLE replicate_reference_table_copy; CREATE TABLE replicate_reference_table_ddl(column1 int); SELECT create_reference_table('replicate_reference_table_ddl'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -523,7 +523,7 @@ DROP TABLE replicate_reference_table_ddl; CREATE TABLE replicate_reference_table_drop(column1 int); SELECT create_reference_table('replicate_reference_table_drop'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -536,7 +536,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -546,7 +546,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_drop'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -554,7 +554,7 @@ BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_drop" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -569,18 +569,18 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid = 1370009; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- (0 rows) -- test adding a node while there is a reference table at another schema SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -588,7 +588,7 @@ CREATE SCHEMA replicate_reference_table_schema; CREATE TABLE replicate_reference_table_schema.table1(column1 int); SELECT create_reference_table('replicate_reference_table_schema.table1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -601,7 +601,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -611,14 +611,14 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "table1" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -631,7 +631,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1370011 | 1 | 0 | localhost | 57638 (1 row) @@ -642,7 +642,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -651,7 +651,7 @@ DROP SCHEMA replicate_reference_table_schema CASCADE; -- test adding a node when there are foreign keys between reference tables SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -662,7 +662,7 @@ SELECT create_reference_table('ref_table_1'), create_reference_table('ref_table_2'), create_reference_table('ref_table_3'); create_reference_table | create_reference_table | create_reference_table -------------------------+------------------------+------------------------ +--------------------------------------------------------------------- | | (1 row) @@ -675,7 +675,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+----------+---------- +--------------------------------------------------------------------- (0 rows) SELECT 1 FROM master_add_node('localhost', :worker_2_port); @@ -683,7 +683,7 @@ NOTICE: Replicating reference table "ref_table_1" to the node localhost:xxxxx NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx NOTICE: Replicating reference table "ref_table_3" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -696,7 +696,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1370012 | 1 | 0 | localhost | 57638 1370013 | 1 | 0 | localhost | 57638 1370014 | 1 | 0 | localhost | 57638 @@ -705,7 +705,7 @@ ORDER BY shardid, nodeport; -- verify constraints have been created on the new node SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname like ''ref_table%'';'); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,2) (localhost,57638,t,2) (2 rows) @@ -714,20 +714,20 @@ DROP TABLE ref_table_1, ref_table_2, ref_table_3; -- do some tests with inactive node SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE initially_not_replicated_reference_table (key int); SELECT create_reference_table('initially_not_replicated_reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -746,7 +746,7 @@ WHERE AND nodeport != :master_port ORDER BY 1,4,5; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1370015 | 1 | 0 | localhost | 57637 (1 row) @@ -754,7 +754,7 @@ ORDER BY 1,4,5; SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "initially_not_replicated_reference_table" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -772,7 +772,7 @@ WHERE AND nodeport != :master_port ORDER BY 1,4,5; shardid | shardstate | shardlength | nodename | nodeport ----------+------------+-------------+-----------+---------- +--------------------------------------------------------------------- 1370015 | 1 | 0 | localhost | 57637 1370015 | 1 | 0 | localhost | 57638 (2 rows) @@ -780,7 +780,7 @@ ORDER BY 1,4,5; -- this should have no effect SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index f98cda2c9..5e7c46584 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -34,38 +34,38 @@ CREATE TABLE authors_reference ( name varchar(20), id bigint ); CREATE TABLE articles_single_shard_hash (LIKE articles_hash); SELECT master_create_distributed_table('articles_hash', 'author_id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_distributed_table('articles_single_shard_hash', 'author_id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) -- test when a table is distributed but no shards created yet SELECT count(*) from articles_hash; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT master_create_worker_shards('articles_hash', 2, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('articles_single_shard_hash', 1, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('authors_reference'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -134,7 +134,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 id | author_id | title | word_count -----+-----------+-----------+------------ +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) @@ -144,7 +144,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 title ------------- +--------------------------------------------------------------------- aggrandize absentness andelee @@ -160,7 +160,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 title | word_count -------------+------------ +--------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 attemper | 14976 @@ -177,7 +177,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 title | id ----------+---- +--------------------------------------------------------------------- aruru | 5 adversa | 15 (2 rows) @@ -190,7 +190,7 @@ SELECT title, author_id FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable title | author_id --------------+----------- +--------------------------------------------------------------------- aseptic | 7 auriga | 7 arsenous | 7 @@ -209,7 +209,7 @@ SELECT title, author_id FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable title | author_id --------------+----------- +--------------------------------------------------------------------- aseptic | 7 agatized | 8 auriga | 7 @@ -232,7 +232,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable author_id | corpus_size ------------+------------- +--------------------------------------------------------------------- 10 | 59955 8 | 55410 7 | 36756 @@ -249,7 +249,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 author_id | corpus_size ------------+------------- +--------------------------------------------------------------------- 1 | 35894 (1 row) @@ -258,7 +258,7 @@ DETAIL: distribution column value: 1 SELECT * FROM articles_hash WHERE author_id <= 1 ORDER BY id; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -270,7 +270,7 @@ SELECT * FROM articles_hash WHERE author_id IN (1, 3) ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -288,7 +288,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -303,7 +303,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -318,7 +318,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 title --------------- +--------------------------------------------------------------------- arsenous alamo arcading @@ -334,7 +334,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | id | title -----+-----------+----+-------------- +--------------------------------------------------------------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo 21 | 1 | 21 | arcading @@ -348,7 +348,7 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title -----+-----------+----+------- +--------------------------------------------------------------------- (0 rows) -- CTE joins are supported because they are both planned recursively @@ -368,20 +368,20 @@ DEBUG: Plan 68 query after replacing subqueries and CTEs: SELECT id_author.id, DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title -----+-----------+----+------- +--------------------------------------------------------------------- (0 rows) -- recursive CTEs are supported when filtered on partition column CREATE TABLE company_employees (company_id int, employee_id int, manager_id int); SELECT master_create_distributed_table('company_employees', 'company_id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('company_employees', 4, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -433,7 +433,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 company_id | employee_id | manager_id | level -------------+-------------+------------+------- +--------------------------------------------------------------------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 1 | 3 | 1 | 2 @@ -482,7 +482,7 @@ DEBUG: Plan 82 query after replacing subqueries and CTEs: SELECT id, author_id, DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9 (1 row) @@ -498,7 +498,7 @@ DEBUG: Plan 84 query after replacing subqueries and CTEs: SELECT id, author_id, DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 10 (1 row) @@ -514,7 +514,7 @@ DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT id, author_id, DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 10 (1 row) @@ -549,7 +549,7 @@ SELECT DEBUG: Creating router plan DEBUG: Plan is router executable id | subtitle | count -----+----------+------- +--------------------------------------------------------------------- 1 | | 1 3 | | 1 11 | | 1 @@ -586,7 +586,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count | position -----+-----------+--------------+------------+---------- +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 21 | 1 | arcading | 5890 | 3 @@ -598,7 +598,7 @@ SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or a DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | position -----+-----------+--------------+------------+---------- +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 3 | 3 | asternal | 10480 | 3 11 | 1 | alamo | 1347 | 3 @@ -616,7 +616,7 @@ SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or a DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 id | author_id | title | word_count | position -----+-----------+------------+------------+---------- +--------------------------------------------------------------------- 12 | 2 | archiblast | 18185 | 3 42 | 2 | ausable | 15885 | 3 2 | 2 | abducing | 13642 | 3 @@ -632,7 +632,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 id | author_id | title | word_count -----+-----------+------------+------------ +--------------------------------------------------------------------- 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 22 | 2 | antipope | 2728 @@ -651,7 +651,7 @@ DEBUG: Plan 94 query after replacing subqueries and CTEs: SELECT articles_hash. DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 id | word_count -----+------------ +--------------------------------------------------------------------- 50 | 19519 14 | 19094 48 | 18610 @@ -671,7 +671,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -694,7 +694,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -709,7 +709,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -725,7 +725,7 @@ SELECT * ORDER BY 4 DESC, 3 DESC, 2 DESC, 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 41 | 1 | aznavour | 11814 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 @@ -741,7 +741,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 article_id | random_value -------------+-------------- +--------------------------------------------------------------------- 1 | 9572 11 | 14817 21 | 123690 @@ -758,7 +758,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- 10 | 17277 10 | 1820 10 | 6363 @@ -774,7 +774,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- 10 | 19519 10 | 19519 10 | 19519 @@ -798,7 +798,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- (0 rows) -- single shard select with limit is router plannable @@ -810,7 +810,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -826,7 +826,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 (2 rows) @@ -842,7 +842,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 (2 rows) @@ -857,7 +857,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -874,7 +874,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -890,7 +890,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 avg --------------------- +--------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -903,7 +903,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 max | min | sum | cnt --------+------+-------+----- +--------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -916,7 +916,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 max -------- +--------------------------------------------------------------------- 11814 (1 row) @@ -930,7 +930,7 @@ ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -949,7 +949,7 @@ UNION DEBUG: Creating router plan DEBUG: Plan is router executable left ------- +--------------------------------------------------------------------- a (1 row) @@ -959,7 +959,7 @@ INTERSECT DEBUG: Creating router plan DEBUG: Plan is router executable left ------- +--------------------------------------------------------------------- a (1 row) @@ -972,7 +972,7 @@ ORDER BY 1; DEBUG: Creating router plan DEBUG: Plan is router executable left ------- +--------------------------------------------------------------------- al ar at @@ -990,7 +990,7 @@ UNION (SELECT * FROM articles_hash WHERE author_id = 2) ORDER BY 1,2,3; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 3 | 3 | asternal | 10480 @@ -1016,7 +1016,7 @@ SELECT * FROM ( ORDER BY 1, 2 LIMIT 5; id | author_id | title | word_count -----+-----------+------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 11 | 1 | alamo | 1347 @@ -1037,7 +1037,7 @@ SELECT * WHERE a.id = b.id AND a.author_id = 1 ORDER BY 1 DESC; id | author_id | title | word_count | id | author_id | title | word_count -----+-----------+--------------+------------+----+-----------+--------------+------------ +--------------------------------------------------------------------- 41 | 1 | aznavour | 11814 | 41 | 1 | aznavour | 11814 31 | 1 | athwartships | 7271 | 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 | 21 | 1 | arcading | 5890 @@ -1053,7 +1053,7 @@ SELECT * WHERE author_id >= 1 AND author_id <= 3 ORDER BY 1,2,3,4; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 3 | 3 | asternal | 10480 @@ -1083,7 +1083,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1097,7 +1097,7 @@ SELECT * WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1113,7 +1113,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 (2 rows) @@ -1126,7 +1126,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- not router plannable due to function call on the right side @@ -1135,7 +1135,7 @@ SELECT * WHERE author_id = (random()::int * 0 + 1); DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1149,7 +1149,7 @@ SELECT * WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1165,7 +1165,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1179,7 +1179,7 @@ SELECT * WHERE 1 = abs(author_id); DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1193,7 +1193,7 @@ SELECT * WHERE author_id = abs(author_id - 2); DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1209,7 +1209,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -1219,7 +1219,7 @@ SELECT * WHERE (author_id = 1) is true; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1235,7 +1235,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1251,7 +1251,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) @@ -1264,7 +1264,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1277,7 +1277,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1290,7 +1290,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1306,7 +1306,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 41 | 1 | aznavour | 11814 @@ -1320,7 +1320,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 prev | title | word_count -----------+----------+------------ +--------------------------------------------------------------------- | afrasia | 864 afrasia | adversa | 3164 adversa | antehall | 7707 @@ -1336,7 +1336,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 prev | title | word_count -----------+----------+------------ +--------------------------------------------------------------------- aminate | aruru | 11389 antehall | aminate | 9089 adversa | antehall | 7707 @@ -1351,7 +1351,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | min -----+----- +--------------------------------------------------------------------- 11 | 11 21 | 11 31 | 11 @@ -1366,7 +1366,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | word_count | avg -----+------------+----------------------- +--------------------------------------------------------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 31 | 7271 | 4836.0000000000000000 @@ -1381,7 +1381,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 word_count | rank -------------+------ +--------------------------------------------------------------------- 1347 | 1 5890 | 2 7271 | 3 @@ -1409,7 +1409,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * @@ -1418,7 +1418,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * @@ -1427,7 +1427,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT a.author_id as first_author, b.word_count as second_word_count @@ -1436,7 +1436,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- (0 rows) SELECT * @@ -1445,7 +1445,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- where false with immutable function returning false @@ -1455,7 +1455,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * @@ -1464,7 +1464,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT a.author_id as first_author, b.word_count as second_word_count @@ -1474,7 +1474,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- 10 | 19519 10 | 19519 10 | 19519 @@ -1488,7 +1488,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- (0 rows) -- partition_column is null clause does not prune out any shards, @@ -1498,7 +1498,7 @@ SELECT * WHERE a.author_id is null; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- partition_column equals to null clause prunes out all shards @@ -1509,7 +1509,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- stable function returning bool @@ -1519,7 +1519,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT a.author_id as first_author, b.word_count as second_word_count @@ -1529,7 +1529,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- (0 rows) -- union/difference /intersection with where false @@ -1545,7 +1545,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1563,7 +1563,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1578,7 +1578,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- CTEs with where false @@ -1590,7 +1590,7 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title -----+-----------+----+------- +--------------------------------------------------------------------- (0 rows) WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), @@ -1599,7 +1599,7 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title -----+-----------+----+------- +--------------------------------------------------------------------- (0 rows) \set VERBOSITY DEFAULT @@ -1618,7 +1618,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 company_id | employee_id | manager_id | level -------------+-------------+------------+------- +--------------------------------------------------------------------- (0 rows) WITH RECURSIVE hierarchy as ( @@ -1636,7 +1636,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 company_id | employee_id | manager_id | level -------------+-------------+------------+------- +--------------------------------------------------------------------- 1 | 1 | 0 | 1 (1 row) @@ -1655,7 +1655,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 company_id | employee_id | manager_id | level -------------+-------------+------------+------- +--------------------------------------------------------------------- (0 rows) -- window functions with where false @@ -1665,7 +1665,7 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) DEBUG: Creating router plan DEBUG: Plan is router executable word_count | rank -------------+------ +--------------------------------------------------------------------- (0 rows) -- function calls in WHERE clause with non-relational arguments @@ -1678,7 +1678,7 @@ SELECT author_id FROM articles_hash DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 author_id ------------ +--------------------------------------------------------------------- 1 (1 row) @@ -1692,7 +1692,7 @@ SELECT author_id FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable author_id ------------ +--------------------------------------------------------------------- (0 rows) -- verify range partitioned tables can be used in router plannable queries @@ -1701,13 +1701,13 @@ DEBUG: Plan is router executable SET citus.shard_replication_factor TO 1; SELECT master_create_distributed_table('authors_range', 'id', 'range'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_distributed_table('articles_range', 'author_id', 'range'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) @@ -1732,14 +1732,14 @@ SELECT * FROM articles_range where author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles_range where author_id = 1 or author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- zero shard select query is router plannable @@ -1747,7 +1747,7 @@ SELECT * FROM articles_range where author_id = 1 and author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- single shard joins on range partitioned table are router plannable @@ -1756,7 +1756,7 @@ SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- +--------------------------------------------------------------------- (0 rows) -- zero shard join is router plannable @@ -1765,7 +1765,7 @@ SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- +--------------------------------------------------------------------- (0 rows) -- This query was intended to test "multi-shard join is not router plannable" @@ -1803,7 +1803,7 @@ DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- +--------------------------------------------------------------------- (0 rows) -- This query was intended to test "this is a bug, it is a single shard join @@ -1841,7 +1841,7 @@ DETAIL: Creating dependency on merge taskId 20 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- +--------------------------------------------------------------------- (0 rows) RESET citus.task_executor_type; @@ -1851,7 +1851,7 @@ SELECT * FROM articles_range ar join authors_range au on (ar.id = au.id) DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- +--------------------------------------------------------------------- (0 rows) -- join between hash and range partition tables are router plannable @@ -1864,7 +1864,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- +--------------------------------------------------------------------- (0 rows) -- not router plannable @@ -1904,7 +1904,7 @@ SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au. DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- +--------------------------------------------------------------------- (0 rows) -- still hits a single shard and router plannable @@ -1913,7 +1913,7 @@ SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au. DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- +--------------------------------------------------------------------- (0 rows) -- it is not router plannable if hit multiple shards @@ -1921,7 +1921,7 @@ SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au. WHERE ar.author_id = 1 or ar.author_id = 15; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count | name | id -----+-----------+-------+------------+------+---- +--------------------------------------------------------------------- (0 rows) -- following is a bug, function should have been @@ -1929,7 +1929,7 @@ DEBUG: Router planner cannot handle multi-shard select queries -- need to use a append distributed table here SELECT master_create_distributed_table('articles_append', 'author_id', 'append'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) @@ -2008,7 +2008,7 @@ SELECT * FROM articles_hash author_id, id LIMIT 5; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2049,7 +2049,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 c ---- +--------------------------------------------------------------------- 5 (1 row) @@ -2070,7 +2070,7 @@ SELECT ORDER BY c; DEBUG: Router planner cannot handle multi-shard select queries c ---- +--------------------------------------------------------------------- 4 5 5 @@ -2093,7 +2093,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2113,7 +2113,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2134,13 +2134,13 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 1 FETCH test_cursor; id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) FETCH ALL test_cursor; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 @@ -2149,12 +2149,12 @@ FETCH ALL test_cursor; FETCH test_cursor; -- fetch one row after the last id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 41 | 1 | aznavour | 11814 (1 row) @@ -2190,7 +2190,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count | count --------+------- +--------------------------------------------------------------------- 5 | 1 (1 row) @@ -2200,7 +2200,7 @@ SELECT count(*), count(*) FILTER (WHERE id < 3) WHERE author_id = 1 or author_id = 2; DEBUG: Router planner cannot handle multi-shard select queries count | count --------+------- +--------------------------------------------------------------------- 10 | 2 (1 row) @@ -2214,7 +2214,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2232,7 +2232,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2262,7 +2262,7 @@ CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) @@ -2290,7 +2290,7 @@ CONTEXT: SQL statement "SELECT ah.id, ah.word_count WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -2306,7 +2306,7 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 1 SELECT * FROM mv_articles_hash_empty; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2319,7 +2319,7 @@ CREATE MATERIALIZED VIEW mv_articles_hash_data AS DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM mv_articles_hash_data ORDER BY 1, 2, 3, 4; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 11 | 1 | alamo | 1347 @@ -2341,7 +2341,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -2361,7 +2361,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -2377,13 +2377,13 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE failure_test (a int, b int); SELECT master_create_distributed_table('failure_test', 'a', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('failure_test', 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -2411,7 +2411,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement ) ORDER BY placementid; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 840017 | 1 | localhost | 57637 840017 | 3 | localhost | 57638 840018 | 1 | localhost | 57638 @@ -2429,7 +2429,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement ) ORDER BY placementid; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 840017 | 1 | localhost | 57637 840017 | 1 | localhost | 57638 840018 | 3 | localhost | 57638 diff --git a/src/test/regress/expected/multi_router_planner_fast_path.out b/src/test/regress/expected/multi_router_planner_fast_path.out index ce9090fc0..9e70f9776 100644 --- a/src/test/regress/expected/multi_router_planner_fast_path.out +++ b/src/test/regress/expected/multi_router_planner_fast_path.out @@ -37,14 +37,14 @@ SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 2; SELECT create_distributed_table('articles_hash', 'author_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE authors_reference ( name varchar(20), id bigint ); SELECT create_reference_table('authors_reference'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -71,7 +71,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 id | author_id | title | word_count -----+-----------+-----------+------------ +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) @@ -82,7 +82,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 title ------------- +--------------------------------------------------------------------- aggrandize absentness andelee @@ -99,7 +99,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 title | word_count -------------+------------ +--------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 attemper | 14976 @@ -117,7 +117,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 title | id ----------+---- +--------------------------------------------------------------------- aruru | 5 adversa | 15 (2 rows) @@ -131,7 +131,7 @@ SELECT title, author_id FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable title | author_id --------------+----------- +--------------------------------------------------------------------- aseptic | 7 auriga | 7 arsenous | 7 @@ -156,7 +156,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 author_id | corpus_size ------------+------------- +--------------------------------------------------------------------- 1 | 35894 (1 row) @@ -164,7 +164,7 @@ DETAIL: distribution column value: 1 SELECT * FROM articles_hash WHERE author_id <= 1; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -176,7 +176,7 @@ SELECT * FROM articles_hash WHERE author_id IN (1, 3); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -196,7 +196,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -212,7 +212,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | id | title -----+-----------+----+-------------- +--------------------------------------------------------------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo 21 | 1 | 21 | arcading @@ -240,13 +240,13 @@ DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT id_author.id, DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title -----+-----------+----+------- +--------------------------------------------------------------------- (0 rows) CREATE TABLE company_employees (company_id int, employee_id int, manager_id int); SELECT master_create_distributed_table('company_employees', 'company_id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) @@ -254,7 +254,7 @@ SELECT master_create_distributed_table('company_employees', 'company_id', 'hash' SET client_min_messages TO DEFAULT; SELECT master_create_worker_shards('company_employees', 4, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -308,7 +308,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 company_id | employee_id | manager_id | level -------------+-------------+------------+------- +--------------------------------------------------------------------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 1 | 3 | 1 | 2 @@ -326,7 +326,7 @@ DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT id, author_id, DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) WITH delete_article AS ( @@ -341,7 +341,7 @@ DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT id, author_id, DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- grouping sets are supported via fast-path @@ -356,7 +356,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | subtitle | count -----+----------+------- +--------------------------------------------------------------------- 1 | | 1 11 | | 1 21 | | 1 @@ -384,7 +384,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count | position -----+-----------+--------------+------------+---------- +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 21 | 1 | arcading | 5890 | 3 @@ -400,7 +400,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 id | author_id | title | word_count -----+-----------+------------+------------ +--------------------------------------------------------------------- 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 22 | 2 | antipope | 2728 @@ -419,7 +419,7 @@ DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT articles_hash. DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 id | word_count -----+------------ +--------------------------------------------------------------------- 50 | 19519 14 | 19094 48 | 18610 @@ -439,7 +439,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -463,7 +463,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -479,7 +479,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -496,7 +496,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 article_id | random_value -------------+-------------- +--------------------------------------------------------------------- 1 | 9572 11 | 14817 21 | 123690 @@ -513,7 +513,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- 10 | 17277 10 | 1820 10 | 6363 @@ -529,7 +529,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -546,7 +546,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 (2 rows) @@ -563,7 +563,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 (2 rows) @@ -579,7 +579,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -597,7 +597,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -614,7 +614,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 avg --------------------- +--------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -628,7 +628,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 max | min | sum | cnt --------+------+-------+----- +--------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -642,7 +642,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 max -------- +--------------------------------------------------------------------- 11814 (1 row) @@ -656,7 +656,7 @@ ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 @@ -687,7 +687,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -706,7 +706,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -729,7 +729,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 68719476736 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- cannot go through fast-path due to @@ -741,7 +741,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -756,7 +756,7 @@ SELECT * WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -775,7 +775,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 (2 rows) @@ -787,7 +787,7 @@ SELECT * WHERE author_id = 1 and id = 1 or id = 41; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 (2 rows) @@ -803,7 +803,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- not router plannable due to function call on the right side @@ -812,7 +812,7 @@ SELECT * WHERE author_id = (random()::int * 0 + 1); DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -829,7 +829,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -844,7 +844,7 @@ SELECT * WHERE 1 = abs(author_id); DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -859,7 +859,7 @@ SELECT * WHERE author_id = abs(author_id - 2); DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -877,7 +877,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -887,7 +887,7 @@ SELECT * WHERE (author_id = 1) is true; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -903,7 +903,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -918,28 +918,28 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 15 count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM articles_hash WHERE (author_id = 15) OR (id = 1 AND word_count > 5); DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM articles_hash WHERE (id = 15) OR (author_id = 1 AND word_count > 5); DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 6 (1 row) SELECT count(*) FROM articles_hash WHERE (id = 15) AND (author_id = 1 OR word_count > 5); DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -949,14 +949,14 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM articles_hash WHERE (id = 15) AND (title ilike 'a%' AND (word_count > 5 OR author_id = 2)); DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -966,7 +966,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -976,7 +976,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -989,7 +989,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) @@ -1003,7 +1003,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1017,7 +1017,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) @@ -1031,7 +1031,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1048,7 +1048,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 41 | 1 | aznavour | 11814 @@ -1063,7 +1063,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 prev | title | word_count -----------+----------+------------ +--------------------------------------------------------------------- | afrasia | 864 afrasia | adversa | 3164 adversa | antehall | 7707 @@ -1080,7 +1080,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 prev | title | word_count -----------+----------+------------ +--------------------------------------------------------------------- aminate | aruru | 11389 antehall | aminate | 9089 adversa | antehall | 7707 @@ -1096,7 +1096,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | min -----+----- +--------------------------------------------------------------------- 11 | 11 21 | 11 31 | 11 @@ -1112,7 +1112,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | word_count | avg -----+------------+----------------------- +--------------------------------------------------------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 31 | 7271 | 4836.0000000000000000 @@ -1128,7 +1128,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 word_count | rank -------------+------ +--------------------------------------------------------------------- 1347 | 1 5890 | 2 7271 | 3 @@ -1153,7 +1153,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 author_id | id | t1 | cnt_with_filter | cnt_with_filter_2 | case_cnt | coalesce ------------+----+------------------------------+-----------------+-------------------+------------------------+---------- +--------------------------------------------------------------------- 1 | 1 | 83.20028854345579490574 | 0 | 1 | | 0 1 | 11 | 629.20816629547141796586 | 1 | 1 | 44.0000000000000000 | 1 1 | 21 | 915.20501693381380745499 | 0 | 1 | 0.00000000000000000000 | 0 @@ -1168,7 +1168,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- fast-path with false @@ -1179,7 +1179,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- fast-path with false @@ -1191,7 +1191,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * @@ -1201,7 +1201,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- we cannot qualify dist_key = X operator Y via @@ -1213,7 +1213,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 id | author_id | title | word_count -----+-----------+------------+------------ +--------------------------------------------------------------------- 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 22 | 2 | antipope | 2728 @@ -1231,7 +1231,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- partition_column is null clause does not prune out any shards, @@ -1242,7 +1242,7 @@ SELECT * WHERE a.author_id is null; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- partition_column equals to null clause prunes out all shards @@ -1254,7 +1254,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- union/difference /intersection with where false @@ -1270,7 +1270,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1288,7 +1288,7 @@ ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- window functions with where false @@ -1300,7 +1300,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 word_count | rank -------------+------ +--------------------------------------------------------------------- (0 rows) -- create a dummy function to be used in filtering @@ -1363,7 +1363,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 c ---- +--------------------------------------------------------------------- 5 (1 row) @@ -1378,7 +1378,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1399,7 +1399,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1422,13 +1422,13 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 1 FETCH test_cursor; id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) FETCH ALL test_cursor; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 @@ -1437,12 +1437,12 @@ FETCH ALL test_cursor; FETCH test_cursor; -- fetch one row after the last id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 41 | 1 | aznavour | 11814 (1 row) @@ -1481,7 +1481,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count | count --------+------- +--------------------------------------------------------------------- 5 | 1 (1 row) @@ -1496,7 +1496,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1506,7 +1506,7 @@ DETAIL: distribution column value: 1 EXECUTE author_1_articles; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1516,7 +1516,7 @@ EXECUTE author_1_articles; EXECUTE author_1_articles; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1526,7 +1526,7 @@ EXECUTE author_1_articles; EXECUTE author_1_articles; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1536,7 +1536,7 @@ EXECUTE author_1_articles; EXECUTE author_1_articles; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1546,7 +1546,7 @@ EXECUTE author_1_articles; EXECUTE author_1_articles; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1565,7 +1565,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1579,7 +1579,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1593,7 +1593,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1607,7 +1607,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1621,7 +1621,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1636,7 +1636,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1664,37 +1664,37 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) @@ -1714,7 +1714,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) @@ -1723,7 +1723,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) @@ -1732,7 +1732,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) @@ -1741,7 +1741,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) @@ -1750,7 +1750,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) @@ -1760,7 +1760,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable author_articles_max_id ------------------------- +--------------------------------------------------------------------- 41 (1 row) @@ -1780,7 +1780,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1790,7 +1790,7 @@ DEBUG: Plan is router executable SELECT * FROM author_articles_id_word_count(); id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1800,7 +1800,7 @@ SELECT * FROM author_articles_id_word_count(); SELECT * FROM author_articles_id_word_count(); id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1810,7 +1810,7 @@ SELECT * FROM author_articles_id_word_count(); SELECT * FROM author_articles_id_word_count(); id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1820,7 +1820,7 @@ SELECT * FROM author_articles_id_word_count(); SELECT * FROM author_articles_id_word_count(); id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1830,7 +1830,7 @@ SELECT * FROM author_articles_id_word_count(); SELECT * FROM author_articles_id_word_count(); id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1854,7 +1854,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1867,7 +1867,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1880,7 +1880,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1893,7 +1893,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1906,7 +1906,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1920,7 +1920,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 11 | 1347 21 | 5890 @@ -1990,7 +1990,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -2000,7 +2000,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -2010,7 +2010,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 3 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -2020,7 +2020,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -2030,7 +2030,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -2041,7 +2041,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 6 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -2053,7 +2053,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2070,7 +2070,7 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 1 SELECT * FROM mv_articles_hash_empty; id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2088,7 +2088,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -2109,7 +2109,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -2137,7 +2137,7 @@ CREATE TABLE collections_list_2 SET citus.shard_count TO 2; SELECT create_distributed_table('collections_list', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -2149,7 +2149,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -2159,7 +2159,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -2169,7 +2169,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -2184,7 +2184,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -2194,7 +2194,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -2204,7 +2204,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 count -------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index 492e5855c..5b9632ab1 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -21,27 +21,27 @@ CREATE TABLE test_schema_support.nation_append( ); SELECT master_create_distributed_table('test_schema_support.nation_append', 'n_nationkey', 'append'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('test_schema_support.nation_append'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1190000 (1 row) -- append table to shard SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); master_append_table_to_shard ------------------------------- +--------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support.nation_append; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -53,26 +53,26 @@ CREATE TABLE test_schema_support."nation._'append" ( n_comment varchar(152)); SELECT master_create_distributed_table('test_schema_support."nation._''append"', 'n_nationkey', 'append'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('test_schema_support."nation._''append"'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1190001 (1 row) SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); master_append_table_to_shard ------------------------------- +--------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support."nation._'append"; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -80,28 +80,28 @@ SELECT COUNT(*) FROM test_schema_support."nation._'append"; SET search_path TO test_schema_support; SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); master_append_table_to_shard ------------------------------- +--------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM nation_append; count -------- +--------------------------------------------------------------------- 12 (1 row) -- test with search_path is set and shard name contains special characters SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); master_append_table_to_shard ------------------------------- +--------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM "nation._'append"; count -------- +--------------------------------------------------------------------- 12 (1 row) @@ -117,7 +117,7 @@ CREATE TABLE nation_append_search_path( ); SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) @@ -131,13 +131,13 @@ CREATE TABLE test_schema_support.nation_hash( ); SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -150,19 +150,19 @@ DECLARE test_cursor CURSOR FOR WHERE n_nationkey = 1; FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH BACKWARD test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -176,19 +176,19 @@ DECLARE test_cursor CURSOR FOR WHERE n_nationkey = 1; FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH BACKWARD test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -199,7 +199,7 @@ INSERT INTO test_schema_support.nation_hash(n_nationkey, n_name, n_regionkey) VA -- verify insertion SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey = 6; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+----------- +--------------------------------------------------------------------- 6 | FRANCE | 3 | (1 row) @@ -209,7 +209,7 @@ INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (7, 'GERMANY', -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 7; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+----------- +--------------------------------------------------------------------- 7 | GERMANY | 3 | (1 row) @@ -255,7 +255,7 @@ LANGUAGE 'plpgsql' IMMUTABLE; -- UDF in public, table in a schema other than public, search_path is not set SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction ---------------- +--------------------------------------------------------------------- 1 10 11 @@ -270,7 +270,7 @@ SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY SET search_path TO test_schema_support; SELECT public.dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction ---------------- +--------------------------------------------------------------------- 1 10 11 @@ -324,7 +324,7 @@ LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO public; SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction2 ----------------- +--------------------------------------------------------------------- 1 10 11 @@ -339,7 +339,7 @@ SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support. SET search_path TO test_schema_support; SELECT dummyFunction2(n_nationkey) FROM nation_hash GROUP BY 1 ORDER BY 1; dummyfunction2 ----------------- +--------------------------------------------------------------------- 1 10 11 @@ -385,7 +385,7 @@ CREATE OPERATOR test_schema_support.=== ( -- test with search_path is not set SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey OPERATOR(test_schema_support.===) 1; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -393,7 +393,7 @@ SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey OPERATOR(test_s SET search_path TO test_schema_support; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------ +--------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -403,7 +403,7 @@ UPDATE test_schema_support.nation_hash SET n_regionkey = n_regionkey + 1; --verify modification SELECT * FROM test_schema_support.nation_hash ORDER BY 1,2,3,4; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- 0 | ALGERIA | 1 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 2 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 2 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -420,7 +420,7 @@ UPDATE nation_hash SET n_regionkey = n_regionkey + 1; --verify modification SELECT * FROM nation_hash ORDER BY 1,2,3,4; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- 0 | ALGERIA | 2 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 3 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 3 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -444,27 +444,27 @@ CREATE TABLE test_schema_support.nation_hash_collation( ); SELECT master_get_table_ddl_events('test_schema_support.nation_hash_collation') ORDER BY 1; master_get_table_ddl_events --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- ALTER TABLE test_schema_support.nation_hash_collation OWNER TO postgres CREATE TABLE test_schema_support.nation_hash_collation (n_nationkey integer NOT NULL, n_name character(25) NOT NULL COLLATE test_schema_support.english, n_regionkey integer NOT NULL, n_comment character varying(152)) (2 rows) SELECT master_create_distributed_table('test_schema_support.nation_hash_collation', 'n_nationkey', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash_collation', 4, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) \copy test_schema_support.nation_hash_collation FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_collation ORDER BY 1,2,3,4; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -475,7 +475,7 @@ SELECT * FROM test_schema_support.nation_hash_collation ORDER BY 1,2,3,4; SELECT n_comment FROM test_schema_support.nation_hash_collation ORDER BY n_comment COLLATE test_schema_support.english; n_comment ------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai @@ -494,20 +494,20 @@ CREATE TABLE nation_hash_collation_search_path( ); SELECT master_create_distributed_table('nation_hash_collation_search_path', 'n_nationkey', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('nation_hash_collation_search_path', 4, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) \copy nation_hash_collation_search_path FROM STDIN with delimiter '|'; SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; n_nationkey | n_name | n_regionkey | n_comment --------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold @@ -518,7 +518,7 @@ SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC, 2 DESC, 3 DESC, SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; n_comment ------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai @@ -539,13 +539,13 @@ CREATE TABLE test_schema_support.nation_hash_composite_types( ); SELECT master_create_distributed_table('test_schema_support.nation_hash_composite_types', 'n_nationkey', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash_composite_types', 4, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -553,7 +553,7 @@ SELECT master_create_worker_shards('test_schema_support.nation_hash_composite_ty \copy test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_composite_types WHERE test_col = '(a,a)'::test_schema_support.new_composite_type; n_nationkey | n_name | n_regionkey | n_comment | test_col --------------+---------------------------+-------------+----------------------------------------------------+---------- +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) @@ -561,7 +561,7 @@ SELECT * FROM test_schema_support.nation_hash_composite_types WHERE test_col = ' SET search_path TO test_schema_support; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type; n_nationkey | n_name | n_regionkey | n_comment | test_col --------------+---------------------------+-------------+----------------------------------------------------+---------- +--------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) @@ -571,7 +571,7 @@ ALTER TABLE test_schema_support.nation_hash ADD COLUMN new_col INT; -- verify column is added SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers --------------+------------------------+----------- +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null @@ -582,7 +582,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_su \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers --------------+------------------------+----------- +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null @@ -597,7 +597,7 @@ ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers --------------+------------------------+----------- +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null @@ -607,7 +607,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_su \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers --------------+------------------------+----------- +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null @@ -621,7 +621,7 @@ ALTER TABLE nation_hash ADD COLUMN new_col INT; -- verify column is added SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers --------------+------------------------+----------- +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null @@ -632,7 +632,7 @@ SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_sc \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers --------------+------------------------+----------- +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null @@ -648,7 +648,7 @@ ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers --------------+------------------------+----------- +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null @@ -658,7 +658,7 @@ SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_sc \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers --------------+------------------------+----------- +--------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null @@ -674,7 +674,7 @@ CREATE INDEX index1 ON test_schema_support.nation_hash(n_name); SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1'::regclass; Column | Type | Definition ---------+---------------+------------ +--------------------------------------------------------------------- n_name | character(25) | n_name (1 row) @@ -682,7 +682,7 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1_1190003'::regclass; Column | Type | Definition ---------+---------------+------------ +--------------------------------------------------------------------- n_name | character(25) | n_name (1 row) @@ -702,7 +702,7 @@ CREATE INDEX index1 ON nation_hash(n_name); SELECT "Column", "Type", "Definition" FROM public.index_attrs WHERE relid = 'test_schema_support.index1'::regclass; Column | Type | Definition ---------+---------------+------------ +--------------------------------------------------------------------- n_name | character(25) | n_name (1 row) @@ -710,7 +710,7 @@ SELECT "Column", "Type", "Definition" FROM public.index_attrs WHERE SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1_1190003'::regclass; Column | Type | Definition ---------+---------------+------------ +--------------------------------------------------------------------- n_name | character(25) | n_name (1 row) @@ -729,14 +729,14 @@ SET search_path TO public; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; shardstate | nodename | nodeport -------------+-----------+---------- +--------------------------------------------------------------------- 1 | localhost | 57637 1 | localhost | 57638 (2 rows) @@ -747,14 +747,14 @@ SET search_path TO test_schema_support; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; shardstate | nodename | nodeport -------------+-----------+---------- +--------------------------------------------------------------------- 1 | localhost | 57637 1 | localhost | 57638 (2 rows) @@ -763,7 +763,7 @@ SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid SET search_path TO public; SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_append') ; master_apply_delete_command ------------------------------ +--------------------------------------------------------------------- 1 (1 row) @@ -776,7 +776,7 @@ SET search_path TO test_schema_support; \copy nation_append FROM STDIN with delimiter '|'; SELECT master_apply_delete_command('DELETE FROM nation_append') ; master_apply_delete_command ------------------------------ +--------------------------------------------------------------------- 1 (1 row) @@ -809,21 +809,21 @@ CREATE TABLE test_schema_support_join_2.nation_hash ( n_comment varchar(152)); SELECT create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) \copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|'; SELECT create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) \copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|'; SELECT create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -839,7 +839,7 @@ FROM WHERE n1.n_nationkey = n2.n_nationkey; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -854,7 +854,7 @@ FROM WHERE n1.n_nationkey = n2.n_nationkey; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -869,7 +869,7 @@ FROM WHERE n1.n_nationkey = n2.n_nationkey; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -884,7 +884,7 @@ FROM WHERE n1.n_nationkey = n2.n_nationkey; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -901,7 +901,7 @@ FROM WHERE n1.n_nationkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -916,7 +916,7 @@ FROM WHERE n1.n_nationkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -931,7 +931,7 @@ FROM WHERE n1.n_nationkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -947,7 +947,7 @@ FROM WHERE n1.n_regionkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 14 (1 row) @@ -962,7 +962,7 @@ FROM WHERE n1.n_regionkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 14 (1 row) @@ -977,7 +977,7 @@ FROM WHERE n1.n_regionkey = n2.n_regionkey; count -------- +--------------------------------------------------------------------- 14 (1 row) @@ -996,13 +996,13 @@ HINT: Connect to worker nodes directly to manually create all necessary users a CONTEXT: SQL statement "CREATE USER "test-user"" PL/pgSQL function run_command_on_coordinator_and_workers(text) line 3 at EXECUTE run_command_on_coordinator_and_workers ----------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT run_command_on_coordinator_and_workers('GRANT ALL ON DATABASE postgres to "test-user"'); run_command_on_coordinator_and_workers ----------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -1010,7 +1010,7 @@ CREATE SCHEMA schema_with_user AUTHORIZATION "test-user"; CREATE TABLE schema_with_user.test_table(column1 int); SELECT create_reference_table('schema_with_user.test_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -1019,7 +1019,7 @@ SELECT create_reference_table('schema_with_user.test_table'); \dn schema_with_user List of schemas Name | Owner -------------------+----------- +--------------------------------------------------------------------- schema_with_user | test-user (1 row) @@ -1029,14 +1029,14 @@ DROP OWNED BY "test-user" CASCADE; NOTICE: drop cascades to table schema_with_user.test_table SELECT run_command_on_workers('DROP OWNED BY "test-user" CASCADE'); run_command_on_workers ----------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP OWNED") (localhost,57638,t,"DROP OWNED") (2 rows) SELECT run_command_on_coordinator_and_workers('DROP USER "test-user"'); run_command_on_coordinator_and_workers ----------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -1046,7 +1046,7 @@ CREATE SCHEMA run_test_schema; CREATE TABLE run_test_schema.test_table(id int); SELECT create_distributed_table('run_test_schema.test_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1057,13 +1057,13 @@ INSERT INTO run_test_schema.test_table VALUES(9); -- try UDFs which call shard_name as a subroutine SELECT sum(result::int) FROM run_command_on_placements('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); sum -------- +--------------------------------------------------------------------- 49152 (1 row) SELECT sum(result::int) FROM run_command_on_shards('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); sum -------- +--------------------------------------------------------------------- 24576 (1 row) @@ -1078,13 +1078,13 @@ CREATE TABLE "CiTUS.TEEN2"."CAPITAL_TABLE"(i int, j int); -- create distributed table with weird names SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('"CiTUS.TEEN2"."CAPITAL_TABLE"', 'i'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1094,7 +1094,7 @@ INSERT INTO "CiTUS.TEEN2"."CAPITAL_TABLE" VALUES(0, 1); TRUNCATE "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE"; SELECT count(*) FROM "CiTUS.TEEN2"."CAPITAL_TABLE"; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -1107,7 +1107,7 @@ FROM "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE" WHERE "CiTUS.TEEN2"."CAPITAL_TABLE".i = "CiTuS.TeeN"."TeeNTabLE.1!?!"."TeNANt_Id" ORDER BY 1,2,3,4; id | TeNANt_Id | i | j -----+-----------+---+--- +--------------------------------------------------------------------- 0 | 1 | 1 | 0 1 | 0 | 0 | 1 1 | 1 | 1 | 0 @@ -1124,7 +1124,7 @@ WHERE "CiTUS.TEEN2"."CAPITAL_TABLE".i = "CiTuS.TeeN"."TeeNTabLE.1!?!"."TeNANt_Id GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY "TeNANt_Id"; id | TeNANt_Id | i | j -----+-----------+---+--- +--------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 @@ -1137,7 +1137,7 @@ GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; id | TeNANt_Id | i | j -----+-----------+---+--- +--------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 @@ -1154,7 +1154,7 @@ GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; id | TeNANt_Id | i | j -----+-----------+---+--- +--------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 @@ -1173,7 +1173,7 @@ GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; id | TeNANt_Id | i | j -----+-----------+---+--- +--------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 diff --git a/src/test/regress/expected/multi_select_distinct.out b/src/test/regress/expected/multi_select_distinct.out index dd0206265..da645625c 100644 --- a/src/test/regress/expected/multi_select_distinct.out +++ b/src/test/regress/expected/multi_select_distinct.out @@ -7,12 +7,12 @@ ANALYZE lineitem_hash_part; -- function calls are supported SELECT DISTINCT l_orderkey, now() FROM lineitem_hash_part LIMIT 0; l_orderkey | now -------------+----- +--------------------------------------------------------------------- (0 rows) SELECT DISTINCT l_partkey, 1 + (random() * 0)::int FROM lineitem_hash_part ORDER BY 1 DESC LIMIT 3; l_partkey | ?column? ------------+---------- +--------------------------------------------------------------------- 199973 | 1 199946 | 1 199943 | 1 @@ -21,7 +21,7 @@ SELECT DISTINCT l_partkey, 1 + (random() * 0)::int FROM lineitem_hash_part ORDER -- const expressions are supported SELECT DISTINCT l_orderkey, 1+1 FROM lineitem_hash_part ORDER BY 1 LIMIT 5; l_orderkey | ?column? -------------+---------- +--------------------------------------------------------------------- 1 | 2 2 | 2 3 | 2 @@ -32,7 +32,7 @@ SELECT DISTINCT l_orderkey, 1+1 FROM lineitem_hash_part ORDER BY 1 LIMIT 5; -- non const expressions are also supported SELECT DISTINCT l_orderkey, l_partkey + 1 FROM lineitem_hash_part ORDER BY 1, 2 LIMIT 5; l_orderkey | ?column? -------------+---------- +--------------------------------------------------------------------- 1 | 2133 1 | 15636 1 | 24028 @@ -43,7 +43,7 @@ SELECT DISTINCT l_orderkey, l_partkey + 1 FROM lineitem_hash_part ORDER BY 1, 2 -- column expressions are supported SELECT DISTINCT l_orderkey, l_shipinstruct || l_shipmode FROM lineitem_hash_part ORDER BY 2 , 1 LIMIT 5; l_orderkey | ?column? -------------+---------------- +--------------------------------------------------------------------- 32 | COLLECT CODAIR 39 | COLLECT CODAIR 66 | COLLECT CODAIR @@ -54,7 +54,7 @@ SELECT DISTINCT l_orderkey, l_shipinstruct || l_shipmode FROM lineitem_hash_part -- function calls with const input are supported SELECT DISTINCT l_orderkey, strpos('AIR', 'A') FROM lineitem_hash_part ORDER BY 1,2 LIMIT 5; l_orderkey | strpos -------------+-------- +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -69,7 +69,7 @@ SELECT DISTINCT l_orderkey, strpos(l_shipmode, 'I') ORDER BY 2, 1 LIMIT 5; l_orderkey | strpos -------------+-------- +--------------------------------------------------------------------- 1 | 2 3 | 2 5 | 2 @@ -80,7 +80,7 @@ SELECT DISTINCT l_orderkey, strpos(l_shipmode, 'I') -- row types are supported SELECT DISTINCT (l_orderkey, l_partkey) AS pair FROM lineitem_hash_part ORDER BY 1 LIMIT 5; pair ------------ +--------------------------------------------------------------------- (1,2132) (1,15635) (1,24027) @@ -93,19 +93,19 @@ SELECT DISTINCT (l_orderkey, l_partkey) AS pair FROM lineitem_hash_part ORDER BY CREATE TEMP TABLE temp_orderkeys AS SELECT DISTINCT l_orderkey FROM lineitem_hash_part; SELECT COUNT(*) FROM temp_orderkeys; count -------- +--------------------------------------------------------------------- 2985 (1 row) SELECT COUNT(DISTINCT l_orderkey) FROM lineitem_hash_part; count -------- +--------------------------------------------------------------------- 2985 (1 row) SELECT DISTINCT l_orderkey FROM lineitem_hash_part WHERE l_orderkey < 500 and l_partkey < 5000 order by 1; l_orderkey ------------- +--------------------------------------------------------------------- 1 3 32 @@ -129,7 +129,7 @@ SELECT DISTINCT l_orderkey FROM lineitem_hash_part WHERE l_orderkey < 500 and l_ -- distinct on non-partition column SELECT DISTINCT l_partkey FROM lineitem_hash_part WHERE l_orderkey > 5 and l_orderkey < 20 order by 1; l_partkey ------------ +--------------------------------------------------------------------- 79251 94780 139636 @@ -142,7 +142,7 @@ SELECT DISTINCT l_partkey FROM lineitem_hash_part WHERE l_orderkey > 5 and l_ord SELECT DISTINCT l_shipmode FROM lineitem_hash_part ORDER BY 1 DESC; l_shipmode ------------- +--------------------------------------------------------------------- TRUCK SHIP REG AIR @@ -158,7 +158,7 @@ SELECT DISTINCT l_orderkey, o_orderdate WHERE l_orderkey < 10 ORDER BY l_orderkey; l_orderkey | o_orderdate -------------+------------- +--------------------------------------------------------------------- 1 | 01-02-1996 2 | 12-01-1996 3 | 10-14-1993 @@ -177,7 +177,7 @@ SELECT DISTINCT l_orderkey, count(*) HAVING count(*) > 5 ORDER BY 2 DESC, 1; l_orderkey | count -------------+------- +--------------------------------------------------------------------- 7 | 7 68 | 7 129 | 7 @@ -208,7 +208,7 @@ EXPLAIN (COSTS FALSE) HAVING count(*) > 5 ORDER BY 2 DESC, 1; QUERY PLAN ----------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: remote_scan.count DESC, remote_scan.l_orderkey -> HashAggregate @@ -235,7 +235,7 @@ EXPLAIN (COSTS FALSE) HAVING count(*) > 5 ORDER BY 2 DESC, 1; QUERY PLAN ----------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: remote_scan.count DESC, remote_scan.l_orderkey -> Unique @@ -261,7 +261,7 @@ SELECT DISTINCT count(*) GROUP BY l_suppkey, l_linenumber ORDER BY 1; count -------- +--------------------------------------------------------------------- 1 2 3 @@ -277,7 +277,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1; QUERY PLAN ----------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)) -> HashAggregate @@ -303,7 +303,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1; QUERY PLAN ----------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)) -> Unique @@ -332,7 +332,7 @@ SELECT DISTINCT l_suppkey, count(*) ORDER BY 1 LIMIT 10; l_suppkey | count ------------+------- +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -353,7 +353,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1 LIMIT 10; QUERY PLAN --------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_suppkey @@ -381,7 +381,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1 LIMIT 10; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_suppkey @@ -411,7 +411,7 @@ SELECT DISTINCT l_suppkey, avg(l_partkey) ORDER BY 1,2 LIMIT 10; l_suppkey | avg ------------+------------------------ +--------------------------------------------------------------------- 1 | 190000.000000000000 2 | 172450.000000000000 3 | 112469.000000000000 @@ -433,7 +433,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1,2 LIMIT 10; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_suppkey, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) @@ -461,7 +461,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1,2 LIMIT 10; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_suppkey, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) @@ -490,7 +490,7 @@ SELECT DISTINCT ON (l_suppkey) avg(l_partkey) ORDER BY l_suppkey,1 LIMIT 10; avg ------------------------- +--------------------------------------------------------------------- 190000.000000000000 172450.000000000000 112469.000000000000 @@ -512,7 +512,7 @@ EXPLAIN (COSTS FALSE) ORDER BY l_suppkey,1 LIMIT 10; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Unique -> Sort @@ -539,7 +539,7 @@ EXPLAIN (COSTS FALSE) ORDER BY l_suppkey,1 LIMIT 10; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Unique -> Sort @@ -566,7 +566,7 @@ SELECT DISTINCT avg(ceil(l_partkey / 2)) ORDER BY 1 LIMIT 10; avg ------ +--------------------------------------------------------------------- 9 39 74 @@ -587,7 +587,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1 LIMIT 10; QUERY PLAN ---------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: ((sum(remote_scan.avg) / (pg_catalog.sum(remote_scan.avg_1))::double precision)) @@ -615,7 +615,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1 LIMIT 10; QUERY PLAN ----------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: ((sum(remote_scan.avg) / (pg_catalog.sum(remote_scan.avg_1))::double precision)) @@ -644,7 +644,7 @@ SELECT DISTINCT sum(l_suppkey) + count(l_partkey) AS dis ORDER BY 1 LIMIT 10; dis ------ +--------------------------------------------------------------------- 2 3 4 @@ -665,7 +665,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1 LIMIT 10; QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: (((pg_catalog.sum(remote_scan.dis))::bigint + COALESCE((pg_catalog.sum(remote_scan.dis_1))::bigint, '0'::bigint))) @@ -693,7 +693,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1 LIMIT 10; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: (((pg_catalog.sum(remote_scan.dis))::bigint + COALESCE((pg_catalog.sum(remote_scan.dis_1))::bigint, '0'::bigint))) @@ -723,7 +723,7 @@ SELECT DISTINCT * ORDER BY 1,2 LIMIT 10; l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment -------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+-------------------------------------------- +--------------------------------------------------------------------- 1 | 2132 | 4633 | 4 | 28.00 | 28955.64 | 0.09 | 0.06 | N | O | 04-21-1996 | 03-30-1996 | 05-16-1996 | NONE | AIR | lites. fluffily even de 1 | 15635 | 638 | 6 | 32.00 | 49620.16 | 0.07 | 0.02 | N | O | 01-30-1996 | 02-07-1996 | 02-03-1996 | DELIVER IN PERSON | MAIL | arefully slyly ex 1 | 24027 | 1534 | 5 | 24.00 | 22824.48 | 0.10 | 0.04 | N | O | 03-30-1996 | 03-14-1996 | 04-01-1996 | NONE | FOB | pending foxes. slyly re @@ -745,7 +745,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1,2 LIMIT 10; QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_orderkey, remote_scan.l_partkey @@ -775,7 +775,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1,2 LIMIT 10; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_orderkey, remote_scan.l_partkey @@ -803,7 +803,7 @@ SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode) GROUP BY l_orderkey ORDER BY 1,2; count | count --------+------- +--------------------------------------------------------------------- 1 | 1 2 | 1 2 | 2 @@ -839,7 +839,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_orderkey ORDER BY 1,2; QUERY PLAN ----------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: remote_scan.count, remote_scan.count_1 -> HashAggregate @@ -865,7 +865,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_orderkey ORDER BY 1,2; QUERY PLAN ----------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: remote_scan.count, remote_scan.count_1 -> Unique @@ -890,7 +890,7 @@ SELECT DISTINCT ceil(count(case when l_partkey > 100000 THEN 1 ELSE 0 END) / 2) GROUP BY l_suppkey ORDER BY 1; count -------- +--------------------------------------------------------------------- 0 1 2 @@ -905,7 +905,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey ORDER BY 1; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Sort Sort Key: (ceil(((COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint) / 2))::double precision)) -> HashAggregate @@ -930,7 +930,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey ORDER BY 1; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: (ceil(((COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint) / 2))::double precision)) -> Unique @@ -959,7 +959,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 2 LIMIT 15; QUERY PLAN ----------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.array_length @@ -986,7 +986,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 2 LIMIT 15; QUERY PLAN ----------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.array_length @@ -1014,7 +1014,7 @@ SELECT DISTINCT l_partkey, count(*) HAVING count(*) > 2 ORDER BY 1; l_partkey | count ------------+------- +--------------------------------------------------------------------- 1051 | 3 1927 | 3 6983 | 3 @@ -1036,7 +1036,7 @@ EXPLAIN (COSTS FALSE) HAVING count(*) > 2 ORDER BY 1; QUERY PLAN ----------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: remote_scan.l_partkey -> HashAggregate @@ -1060,7 +1060,7 @@ SELECT DISTINCT l_partkey, avg(l_linenumber) HAVING avg(l_linenumber) > 2 ORDER BY 1; l_partkey | avg ------------+-------------------- +--------------------------------------------------------------------- 18 | 7.0000000000000000 79 | 6.0000000000000000 149 | 4.5000000000000000 @@ -1084,7 +1084,7 @@ SELECT DISTINCT l_partkey, l_suppkey WHERE l_shipmode = 'AIR' AND l_orderkey < 100 ORDER BY 1, 2; l_partkey | l_suppkey ------------+----------- +--------------------------------------------------------------------- 2132 | 4633 4297 | 1798 37531 | 35 @@ -1108,7 +1108,7 @@ EXPLAIN (COSTS FALSE) WHERE l_shipmode = 'AIR' AND l_orderkey < 100 ORDER BY 1, 2; QUERY PLAN ------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Sort Sort Key: remote_scan.l_partkey, remote_scan.l_suppkey -> HashAggregate @@ -1131,7 +1131,7 @@ SELECT DISTINCT ON (l_orderkey) l_orderkey, l_partkey, l_suppkey WHERE l_orderkey < 35 ORDER BY 1; l_orderkey | l_partkey | l_suppkey -------------+-----------+----------- +--------------------------------------------------------------------- 1 | 155190 | 7706 2 | 106170 | 1191 3 | 4297 | 1798 @@ -1150,7 +1150,7 @@ EXPLAIN (COSTS FALSE) WHERE l_orderkey < 35 ORDER BY 1; QUERY PLAN ----------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Unique -> Sort Sort Key: remote_scan.l_orderkey @@ -1175,7 +1175,7 @@ SELECT DISTINCT ON (l_partkey) l_partkey, l_orderkey ORDER BY 1,2 LIMIT 20; l_partkey | l_orderkey ------------+------------ +--------------------------------------------------------------------- 18 | 12005 79 | 5121 91 | 2883 @@ -1204,7 +1204,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1,2 LIMIT 20; QUERY PLAN ----------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Unique -> Sort @@ -1228,7 +1228,7 @@ SELECT DISTINCT ON (o_custkey) o_custkey, l_orderkey WHERE o_custkey < 15 ORDER BY 1,2; o_custkey | l_orderkey ------------+------------ +--------------------------------------------------------------------- 1 | 9154 2 | 10563 4 | 320 @@ -1249,7 +1249,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1,2; $Q$); coordinator_plan ------------------------------------------------------------------ +--------------------------------------------------------------------- Unique -> Sort Sort Key: remote_scan.o_custkey, remote_scan.l_orderkey @@ -1266,7 +1266,7 @@ EXPLAIN (COSTS FALSE) WHERE o_custkey < 15; $Q$); coordinator_plan ------------------------------------------- +--------------------------------------------------------------------- Unique -> Sort Sort Key: remote_scan.o_custkey @@ -1280,7 +1280,7 @@ SELECT DISTINCT ON (o_custkey, l_orderkey) o_custkey, l_orderkey, l_linenumber, WHERE o_custkey < 20 ORDER BY 1,2,3; o_custkey | l_orderkey | l_linenumber | l_partkey ------------+------------+--------------+----------- +--------------------------------------------------------------------- 1 | 9154 | 1 | 86513 1 | 14656 | 1 | 59539 2 | 10563 | 1 | 147459 @@ -1327,7 +1327,7 @@ EXPLAIN (COSTS FALSE) WHERE o_custkey < 20; $Q$); coordinator_plan ------------------------------------------------------------------ +--------------------------------------------------------------------- Unique -> Sort Sort Key: remote_scan.o_custkey, remote_scan.l_orderkey @@ -1341,7 +1341,7 @@ SELECT DISTINCT ON (o_custkey, l_orderkey) o_custkey, l_orderkey, l_linenumber, WHERE o_custkey < 15 ORDER BY 1,2,3 DESC; o_custkey | l_orderkey | l_linenumber | l_partkey ------------+------------+--------------+----------- +--------------------------------------------------------------------- 1 | 9154 | 7 | 173448 1 | 14656 | 1 | 59539 2 | 10563 | 4 | 110741 @@ -1381,7 +1381,7 @@ SELECT DISTINCT l_orderkey, l_partkey ORDER BY 1,2 LIMIT 10; l_orderkey | l_partkey -------------+----------- +--------------------------------------------------------------------- 1 | 2132 1 | 15635 1 | 24027 @@ -1403,7 +1403,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1,2 LIMIT 10; QUERY PLAN ----------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_orderkey, remote_scan.l_partkey @@ -1431,7 +1431,7 @@ SELECT DISTINCT l_orderkey, cnt ORDER BY 1,2 LIMIT 10; l_orderkey | cnt -------------+----- +--------------------------------------------------------------------- 1 | 6 2 | 1 3 | 6 @@ -1454,7 +1454,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1,2 LIMIT 10; QUERY PLAN ----------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.l_orderkey, remote_scan.cnt @@ -1486,7 +1486,7 @@ SELECT DISTINCT ON (l_orderkey) l_orderkey, l_partkey ORDER BY 1,2 LIMIT 10; l_orderkey | l_partkey -------------+----------- +--------------------------------------------------------------------- 1 | 2132 2 | 106170 3 | 4297 @@ -1509,7 +1509,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 1,2 LIMIT 10; QUERY PLAN ----------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Unique -> Sort @@ -1538,7 +1538,7 @@ SELECT DISTINCT ON (l_partkey) l_orderkey, l_partkey ORDER BY 2,1 LIMIT 10; l_orderkey | l_partkey -------------+----------- +--------------------------------------------------------------------- 12005 | 18 5121 | 79 2883 | 91 @@ -1561,7 +1561,7 @@ EXPLAIN (COSTS FALSE) ORDER BY 2,1 LIMIT 10; QUERY PLAN ----------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Unique -> Sort diff --git a/src/test/regress/expected/multi_select_for_update.out b/src/test/regress/expected/multi_select_for_update.out index f357a27b6..64dfeee83 100644 --- a/src/test/regress/expected/multi_select_for_update.out +++ b/src/test/regress/expected/multi_select_for_update.out @@ -9,7 +9,7 @@ SET citus.shard_replication_factor to 1; CREATE TABLE test_table_1_rf1(id int, val_1 int); SELECT create_distributed_table('test_table_1_rf1','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -17,7 +17,7 @@ INSERT INTO test_table_1_rf1 values(1,2),(2,3),(3,4),(15,16); CREATE TABLE test_table_2_rf1(id int, val_1 int); SELECT create_distributed_table('test_table_2_rf1','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -25,7 +25,7 @@ INSERT INTO test_table_2_rf1 values(1,2),(2,3),(3,4); CREATE TABLE ref_table(id int, val_1 int); SELECT create_reference_table('ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -33,7 +33,7 @@ INSERT INTO ref_table values(1,2),(3,4),(5,6); CREATE TABLE ref_table_2(id int, val_1 int); SELECT create_reference_table('ref_table_2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -42,7 +42,7 @@ SET citus.shard_replication_factor to 2; CREATE TABLE test_table_3_rf2(id int, val_1 int); SELECT create_distributed_table('test_table_3_rf2','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -50,7 +50,7 @@ INSERT INTO test_table_3_rf2 values(1,2),(2,3),(3,4); CREATE TABLE test_table_4_rf2(id int, val_1 int); SELECT create_distributed_table('test_table_4_rf2','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -62,7 +62,7 @@ SELECT * FROM ORDER BY 1 FOR UPDATE; id | val_1 | id | val_1 -----+-------+----+------- +--------------------------------------------------------------------- 1 | 2 | 1 | 2 (1 row) @@ -72,7 +72,7 @@ SELECT * FROM ORDER BY 1 FOR UPDATE; id | val_1 -----+------- +--------------------------------------------------------------------- 1 | 2 15 | 16 (2 rows) @@ -110,7 +110,7 @@ SELECT * FROM ORDER BY 1 FOR UPDATE; id | val_1 | id | val_1 -----+-------+----+------- +--------------------------------------------------------------------- 1 | 2 | 1 | 2 (1 row) @@ -121,7 +121,7 @@ SELECT * FROM ORDER BY 1 FOR SHARE; id | val_1 | id | val_1 -----+-------+----+------- +--------------------------------------------------------------------- 1 | 2 | 1 | 2 (1 row) @@ -132,7 +132,7 @@ SELECT * FROM FOR UPDATE OF rt1; id | val_1 | id | val_1 -----+-------+----+------- +--------------------------------------------------------------------- 3 | 4 | 3 | 4 5 | 6 | 5 | 6 (2 rows) @@ -145,7 +145,7 @@ SELECT * FROM OF rt1 NOWAIT; id | val_1 | id | val_1 -----+-------+----+------- +--------------------------------------------------------------------- 3 | 4 | 3 | 4 5 | 6 | 5 | 6 (2 rows) @@ -155,7 +155,7 @@ WITH first_value AS ( SELECT val_1 FROM test_table_1_rf1 WHERE id = 1 FOR UPDATE) SELECT * FROM first_value; val_1 -------- +--------------------------------------------------------------------- 2 (1 row) @@ -165,14 +165,14 @@ WITH update_table AS ( ) SELECT * FROM update_table FOR UPDATE; id | val_1 -----+------- +--------------------------------------------------------------------- 1 | 10 (1 row) -- Subqueries also supported SELECT * FROM (SELECT * FROM test_table_1_rf1 FOR UPDATE) foo WHERE id = 1; id | val_1 -----+------- +--------------------------------------------------------------------- 1 | 10 (1 row) diff --git a/src/test/regress/expected/multi_shard_modify.out b/src/test/regress/expected/multi_shard_modify.out index 792fb80ac..1eed1c72e 100644 --- a/src/test/regress/expected/multi_shard_modify.out +++ b/src/test/regress/expected/multi_shard_modify.out @@ -9,7 +9,7 @@ CREATE TABLE multi_shard_modify_test ( t_value integer not null); SELECT create_distributed_table('multi_shard_modify_test', 't_key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -21,7 +21,7 @@ SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -29,14 +29,14 @@ SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) ROLLBACK; SELECT count(*) FROM multi_shard_modify_test; count -------- +--------------------------------------------------------------------- 27 (1 row) @@ -54,7 +54,7 @@ SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -63,7 +63,7 @@ SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -76,7 +76,7 @@ ERROR: relation temp_nations is not distributed -- commands with a USING clause are unsupported SELECT create_distributed_table('temp_nations', 'name', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -89,7 +89,7 @@ SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -102,7 +102,7 @@ ERROR: cannot perform an INSERT without a partition column value SET citus.multi_shard_commit_protocol TO '1pc'; SELECT count(*) FROM multi_shard_modify_test; count -------- +--------------------------------------------------------------------- 25 (1 row) @@ -110,13 +110,13 @@ SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM multi_shard_modify_test; count -------- +--------------------------------------------------------------------- 23 (1 row) @@ -126,13 +126,13 @@ SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM multi_shard_modify_test; count -------- +--------------------------------------------------------------------- 21 (1 row) @@ -146,7 +146,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 15 master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -156,7 +156,7 @@ SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -165,13 +165,13 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name= WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_key=17; t_name --------- +--------------------------------------------------------------------- warsaw (1 row) @@ -180,13 +180,13 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name= WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; t_name --------- +--------------------------------------------------------------------- ??? ??? ??? @@ -198,13 +198,13 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; t_value ---------- +--------------------------------------------------------------------- 296 296 296 @@ -216,13 +216,13 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name= WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT t_name, t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; t_name | t_value -----------+--------- +--------------------------------------------------------------------- somename | 333 somename | 333 somename | 333 @@ -234,13 +234,13 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name= WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_value < 0; t_name ------------ +--------------------------------------------------------------------- nice city nice city (2 rows) @@ -255,7 +255,7 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -264,7 +264,7 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name= WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -278,13 +278,13 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; t_value ---------- +--------------------------------------------------------------------- 10 (1 row) @@ -293,13 +293,13 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; t_value ---------- +--------------------------------------------------------------------- 47 (1 row) @@ -309,7 +309,7 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -318,13 +318,13 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; t_value ---------- +--------------------------------------------------------------------- 78 (1 row) @@ -333,7 +333,7 @@ SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -347,7 +347,7 @@ SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly master_modify_multiple_shards -------------------------------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_shard_update_delete.out b/src/test/regress/expected/multi_shard_update_delete.out index 96a21660f..0ff08ba56 100644 --- a/src/test/regress/expected/multi_shard_update_delete.out +++ b/src/test/regress/expected/multi_shard_update_delete.out @@ -8,7 +8,7 @@ SET citus.multi_shard_modify_mode to 'parallel'; CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('users_test_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -16,7 +16,7 @@ SELECT create_distributed_table('users_test_table', 'user_id'); CREATE TABLE events_test_table (user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('events_test_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -24,7 +24,7 @@ SELECT create_distributed_table('events_test_table', 'user_id'); CREATE TABLE events_reference_copy_table (like events_test_table); SELECT create_reference_table('events_reference_copy_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -32,7 +32,7 @@ INSERT INTO events_reference_copy_table SELECT * FROM events_test_table; CREATE TABLE users_reference_copy_table (like users_test_table); SELECT create_reference_table('users_reference_copy_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -41,40 +41,40 @@ INSERT INTO users_reference_copy_table SELECT * FROM users_test_table; UPDATE users_test_table SET value_1 = 1; SELECT COUNT(*), SUM(value_1) FROM users_test_table; count | sum --------+----- +--------------------------------------------------------------------- 15 | 15 (1 row) SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; count | sum --------+----- +--------------------------------------------------------------------- 4 | 52 (1 row) UPDATE users_test_table SET value_2 = value_2 + 1 WHERE user_id = 1 or user_id = 3; SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; count | sum --------+----- +--------------------------------------------------------------------- 4 | 56 (1 row) UPDATE users_test_table SET value_3 = 0 WHERE user_id <> 5; SELECT SUM(value_3) FROM users_test_table WHERE user_id <> 5; sum ------ +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; count -------- +--------------------------------------------------------------------- 4 (1 row) DELETE FROM users_test_table WHERE user_id = 3 or user_id = 5; SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -84,7 +84,7 @@ UPDATE users_test_table SET value_3 = 0; END; SELECT SUM(value_3) FROM users_test_table; sum ------ +--------------------------------------------------------------------- 0 (1 row) @@ -94,7 +94,7 @@ UPDATE users_test_table SET value_3 = 1; ROLLBACK; SELECT SUM(value_3) FROM users_test_table; sum ------ +--------------------------------------------------------------------- 0 (1 row) @@ -107,7 +107,7 @@ UPDATE users_test_table SET value_3 = 1; END; SELECT SUM(value_3) FROM users_test_table; sum ------ +--------------------------------------------------------------------- 16 (1 row) @@ -130,7 +130,7 @@ UPDATE users_test_table SET value_3 = 0; END; SELECT SUM(value_3) FROM users_test_table; sum ------ +--------------------------------------------------------------------- 0 (1 row) @@ -138,14 +138,14 @@ SELECT SUM(value_3) FROM users_test_table; UPDATE users_test_table SET value_3 = 1 WHERE user_id = 3 or true; SELECT COUNT(*), SUM(value_3) FROM users_test_table; count | sum --------+----- +--------------------------------------------------------------------- 16 | 16 (1 row) UPDATE users_test_table SET value_3 = 0 WHERE user_id = 20 and false; SELECT COUNT(*), SUM(value_3) FROM users_test_table; count | sum --------+----- +--------------------------------------------------------------------- 16 | 16 (1 row) @@ -159,7 +159,7 @@ EXECUTE foo_plan(9,45); EXECUTE foo_plan(0,0); SELECT SUM(value_1), SUM(value_3) FROM users_test_table; sum | sum ------+----- +--------------------------------------------------------------------- 0 | 0 (1 row) @@ -177,38 +177,38 @@ INSERT INTO append_stage_table_2 VALUES(10,4); CREATE TABLE test_append_table(id int, col_2 int); SELECT create_distributed_table('test_append_table','id','append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('test_append_table'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1440010 (1 row) SELECT * FROM master_append_table_to_shard(1440010, 'append_stage_table', 'localhost', :master_port); master_append_table_to_shard ------------------------------- +--------------------------------------------------------------------- 0.00533333 (1 row) SELECT master_create_empty_shard('test_append_table') AS new_shard_id; new_shard_id --------------- +--------------------------------------------------------------------- 1440011 (1 row) SELECT * FROM master_append_table_to_shard(1440011, 'append_stage_table_2', 'localhost', :master_port); master_append_table_to_shard ------------------------------- +--------------------------------------------------------------------- 0.00533333 (1 row) UPDATE test_append_table SET col_2 = 5; SELECT * FROM test_append_table ORDER BY 1 DESC, 2 DESC; id | col_2 -----+------- +--------------------------------------------------------------------- 10 | 5 9 | 5 8 | 5 @@ -230,7 +230,7 @@ INSERT INTO tt1 VALUES (1,11), (3,15), (5,17), (6,19), (8,17), (2,12); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -238,7 +238,7 @@ UPDATE tt1 SET col_2 = 13; DELETE FROM tt1 WHERE id = 1 or id = 3 or id = 5; SELECT * FROM tt1 ORDER BY 1 DESC, 2 DESC; id | col_2 -----+------- +--------------------------------------------------------------------- 8 | 13 6 | 13 2 | 13 @@ -256,7 +256,7 @@ UPDATE tt1 SET col_2 = 7 WHERE col_2 < 10 and col_2 > 5; COMMIT; SELECT * FROM tt1 ORDER BY id; id | col_2 -----+------- +--------------------------------------------------------------------- 2 | 12 4 | 7 6 | 12 @@ -274,7 +274,7 @@ DELETE FROM tt1_1120; COMMIT; SELECT * FROM tt1 ORDER BY id; id | col_2 -----+------- +--------------------------------------------------------------------- (0 rows) DROP TABLE tt1; @@ -282,7 +282,7 @@ DROP TABLE tt1; CREATE TABLE tt2(id int, col_2 int); SELECT create_distributed_table('tt2','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -292,7 +292,7 @@ UPDATE tt2 SET col_2 = 1; COMMIT; SELECT * FROM tt2 ORDER BY id; id | col_2 -----+------- +--------------------------------------------------------------------- 1 | 1 2 | 1 3 | 1 @@ -303,7 +303,7 @@ SELECT * FROM tt2 ORDER BY id; -- Test returning with both type of executors UPDATE tt2 SET col_2 = 5 RETURNING id, col_2; id | col_2 -----+------- +--------------------------------------------------------------------- 1 | 5 2 | 5 3 | 5 @@ -314,7 +314,7 @@ UPDATE tt2 SET col_2 = 5 RETURNING id, col_2; SET citus.multi_shard_modify_mode to sequential; UPDATE tt2 SET col_2 = 3 RETURNING id, col_2; id | col_2 -----+------- +--------------------------------------------------------------------- 1 | 3 2 | 3 3 | 3 @@ -330,7 +330,7 @@ SET citus.shard_count to 6; CREATE TABLE events_test_table_2 (user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('events_test_table_2', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -344,7 +344,7 @@ INSERT INTO test_table_1 VALUES(3, '2111-01-12 08:35:19', 9); SELECT create_distributed_table('test_table_1', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -370,7 +370,7 @@ WHERE now() > (SELECT max(date_col) GROUP BY id) RETURNING *; user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- +--------------------------------------------------------------------- 1 | 5 | 7 | 7 1 | 20 | 12 | 25 1 | 60 | 17 | 17 @@ -396,7 +396,7 @@ WHERE user_id IN (SELECT user_id SELECT user_id FROM events_test_table) returning value_3; value_3 ---------- +--------------------------------------------------------------------- 0 0 0 @@ -418,7 +418,7 @@ WHERE user_id IN (SELECT user_id SELECT user_id FROM events_test_table) returning value_3; value_3 ---------- +--------------------------------------------------------------------- 0 0 0 @@ -494,7 +494,7 @@ SELECT * FROM events_test_table WHERE events_test_table.user_id = 1 OR events_test_table.user_id = 5; SELECT SUM(value_2) FROM users_test_table; sum ------ +--------------------------------------------------------------------- 169 (1 row) @@ -504,7 +504,7 @@ FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; SELECT SUM(value_2) FROM users_test_table; sum ------ +--------------------------------------------------------------------- 97 (1 row) @@ -514,7 +514,7 @@ CREATE SCHEMA sec_schema; CREATE TABLE sec_schema.tt1(id int, value_1 int); SELECT create_distributed_table('sec_schema.tt1','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -526,7 +526,7 @@ WHERE id < (SELECT max(value_2) FROM events_test_table_2 GROUP BY user_id) RETURNING *; id | value_1 -----+--------- +--------------------------------------------------------------------- 7 | 11 9 | 11 (2 rows) @@ -564,7 +564,7 @@ WHERE date_col IN (SELECT now()); -- Test with prepared statements SELECT COUNT(*) FROM users_test_table WHERE value_1 = 0; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -580,7 +580,7 @@ EXECUTE foo_plan_2(9,45); EXECUTE foo_plan_2(0,0); SELECT COUNT(*) FROM users_test_table WHERE value_1 = 0; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -755,7 +755,7 @@ BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM users_test_table ORDER BY user_id; FETCH test_cursor; user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- +--------------------------------------------------------------------- 1 | 2 | 5 | 0 (1 row) @@ -765,7 +765,7 @@ ROLLBACK; -- Stable functions are supported SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; id | date_col | col_3 -----+------------------------------+------- +--------------------------------------------------------------------- 3 | Mon Jan 12 08:35:19 2111 PST | 9 2 | Sun Feb 01 08:31:16 2015 PST | 7 1 | Sat Apr 05 08:32:12 2014 PDT | 5 @@ -774,7 +774,7 @@ SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; UPDATE test_table_1 SET col_3 = 3 WHERE date_col < now(); SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; id | date_col | col_3 -----+------------------------------+------- +--------------------------------------------------------------------- 3 | Mon Jan 12 08:35:19 2111 PST | 9 2 | Sun Feb 01 08:31:16 2015 PST | 3 1 | Sat Apr 05 08:32:12 2014 PDT | 3 @@ -783,7 +783,7 @@ SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; DELETE FROM test_table_1 WHERE date_col < current_timestamp; SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; id | date_col | col_3 -----+------------------------------+------- +--------------------------------------------------------------------- 3 | Mon Jan 12 08:35:19 2111 PST | 9 (1 row) @@ -796,7 +796,7 @@ INSERT INTO test_table_2 VALUES(3, random()); SELECT create_distributed_table('test_table_2', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -806,41 +806,41 @@ DROP TABLE test_table_2; -- Run multi shard updates and deletes without transaction on reference tables SELECT COUNT(*) FROM users_reference_copy_table; count -------- +--------------------------------------------------------------------- 15 (1 row) UPDATE users_reference_copy_table SET value_1 = 1; SELECT SUM(value_1) FROM users_reference_copy_table; sum ------ +--------------------------------------------------------------------- 15 (1 row) SELECT COUNT(*), SUM(value_2) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; count | sum --------+----- +--------------------------------------------------------------------- 4 | 52 (1 row) UPDATE users_reference_copy_table SET value_2 = value_2 + 1 WHERE user_id = 3 or user_id = 5; SELECT COUNT(*), SUM(value_2) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; count | sum --------+----- +--------------------------------------------------------------------- 4 | 56 (1 row) UPDATE users_reference_copy_table SET value_3 = 0 WHERE user_id <> 3; SELECT SUM(value_3) FROM users_reference_copy_table WHERE user_id <> 3; sum ------ +--------------------------------------------------------------------- 0 (1 row) DELETE FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; SELECT COUNT(*) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -850,7 +850,7 @@ SET citus.shard_replication_factor to 2; CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('users_test_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -859,40 +859,40 @@ SELECT create_distributed_table('users_test_table', 'user_id'); UPDATE users_test_table SET value_1 = 1; SELECT COUNT(*), SUM(value_1) FROM users_test_table; count | sum --------+----- +--------------------------------------------------------------------- 15 | 15 (1 row) SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; count | sum --------+----- +--------------------------------------------------------------------- 4 | 52 (1 row) UPDATE users_test_table SET value_2 = value_2 + 1 WHERE user_id = 1 or user_id = 3; SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; count | sum --------+----- +--------------------------------------------------------------------- 4 | 56 (1 row) UPDATE users_test_table SET value_3 = 0 WHERE user_id <> 5; SELECT SUM(value_3) FROM users_test_table WHERE user_id <> 5; sum ------ +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; count -------- +--------------------------------------------------------------------- 4 (1 row) DELETE FROM users_test_table WHERE user_id = 3 or user_id = 5; SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; count -------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_simple_queries.out b/src/test/regress/expected/multi_simple_queries.out index 52536160e..c85b9839b 100644 --- a/src/test/regress/expected/multi_simple_queries.out +++ b/src/test/regress/expected/multi_simple_queries.out @@ -18,25 +18,25 @@ CREATE TABLE authors ( name text, id bigint ); CREATE TABLE articles_single_shard (LIKE articles); SELECT master_create_distributed_table('articles', 'author_id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('articles', 2, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('articles_single_shard', 1, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -101,14 +101,14 @@ DELETE FROM articles WHERE author_id = 1 AND author_id = 2; -- test simple select for a single row SELECT * FROM articles WHERE author_id = 10 AND id = 50; id | author_id | title | word_count -----+-----------+-----------+------------ +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) -- get all titles by a single author SELECT title FROM articles WHERE author_id = 10; title ------------- +--------------------------------------------------------------------- aggrandize absentness andelee @@ -121,7 +121,7 @@ SELECT title, word_count FROM articles WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; title | word_count -------------+------------ +--------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 attemper | 14976 @@ -135,7 +135,7 @@ SELECT title, id FROM articles ORDER BY id LIMIT 2; title | id ----------+---- +--------------------------------------------------------------------- aruru | 5 adversa | 15 (2 rows) @@ -145,7 +145,7 @@ SELECT title, author_id FROM articles WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; title | author_id --------------+----------- +--------------------------------------------------------------------- aseptic | 7 auriga | 7 arsenous | 7 @@ -165,7 +165,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles HAVING sum(word_count) > 40000 ORDER BY sum(word_count) DESC; author_id | corpus_size ------------+------------- +--------------------------------------------------------------------- 2 | 61782 10 | 59955 8 | 55410 @@ -176,7 +176,7 @@ SELECT * FROM articles WHERE author_id = 10 UNION SELECT * FROM articles WHERE author_id = 2 ORDER BY 1,2,3; id | author_id | title | word_count -----+-----------+------------+------------ +--------------------------------------------------------------------- 2 | 2 | abducing | 13642 10 | 10 | aggrandize | 17277 12 | 2 | archiblast | 18185 @@ -193,7 +193,7 @@ ORDER BY 1,2,3; WITH long_names AS ( SELECT id FROM authors WHERE char_length(name) > 15 ) SELECT title FROM articles ORDER BY 1 LIMIT 5; title ------------ +--------------------------------------------------------------------- abducing abeyance abhorring @@ -204,7 +204,7 @@ SELECT title FROM articles ORDER BY 1 LIMIT 5; -- queries which involve functions in FROM clause are recursively planned SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DESC LIMIT 5; id | author_id | title | word_count | position -----+-----------+------------+------------+---------- +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 | 3 40 | 10 | attemper | 14976 | 3 30 | 10 | andelee | 6363 | 3 @@ -215,7 +215,7 @@ SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DE -- subqueries are supported in WHERE clause in Citus even if the relations are not distributed SELECT * FROM articles WHERE author_id IN (SELECT id FROM authors WHERE name LIKE '%a'); id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) -- subqueries are supported in FROM clause @@ -223,7 +223,7 @@ SELECT articles.id,test.word_count FROM articles, (SELECT id, word_count FROM articles) AS test WHERE test.id = articles.id ORDER BY articles.id; id | word_count -----+------------ +--------------------------------------------------------------------- 1 | 9572 2 | 13642 3 | 10480 @@ -306,7 +306,7 @@ CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement -- test cross-shard queries SELECT COUNT(*) FROM articles; count -------- +--------------------------------------------------------------------- 50 (1 row) @@ -330,7 +330,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles ORDER BY sum(word_count) DESC LIMIT 5; author_id | corpus_size ------------+------------- +--------------------------------------------------------------------- 4 | 66325 2 | 61782 10 | 59955 @@ -343,7 +343,7 @@ SELECT author_id FROM articles HAVING sum(word_count) > 50000 ORDER BY author_id; author_id ------------ +--------------------------------------------------------------------- 2 4 6 @@ -356,7 +356,7 @@ SELECT author_id FROM articles HAVING sum(word_count) > 50000 AND author_id < 5 ORDER BY author_id; author_id ------------ +--------------------------------------------------------------------- 2 4 (2 rows) @@ -366,7 +366,7 @@ SELECT author_id FROM articles HAVING sum(word_count) > 50000 OR author_id < 5 ORDER BY author_id; author_id ------------ +--------------------------------------------------------------------- 1 2 3 @@ -381,7 +381,7 @@ SELECT author_id FROM articles HAVING author_id <= 2 OR author_id = 8 ORDER BY author_id; author_id ------------ +--------------------------------------------------------------------- 1 2 8 @@ -392,7 +392,7 @@ SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders HAVING count(*) > 1450 OR avg(o_totalprice) > 150000 ORDER BY o_orderstatus; o_orderstatus | count | avg ----------------+-------+--------------------- +--------------------------------------------------------------------- O | 1461 | 143326.447029431896 P | 75 | 164847.914533333333 (2 rows) @@ -403,7 +403,7 @@ SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders HAVING sum(l_linenumber) > 1000 ORDER BY o_orderstatus; o_orderstatus | sum | avg ----------------+------+-------------------- +--------------------------------------------------------------------- F | 8559 | 3.0126715945089757 O | 8904 | 3.0040485829959514 (2 rows) @@ -420,7 +420,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -435,7 +435,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -449,7 +449,7 @@ SELECT * WHERE author_id = 1 OR author_id = 18; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -465,7 +465,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 article_id | random_value -------------+-------------- +--------------------------------------------------------------------- 1 | 9572 11 | 14817 21 | 123690 @@ -483,7 +483,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- 10 | 17277 10 | 1820 10 | 6363 @@ -499,7 +499,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- 10 | 19519 10 | 19519 10 | 19519 @@ -514,7 +514,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) @@ -531,7 +531,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -551,7 +551,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 avg --------------------- +--------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -575,7 +575,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 max | min | sum | cnt --------+------+-------+----- +--------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -627,7 +627,7 @@ SELECT count(*) FROM ( ) x; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 50 (1 row) @@ -637,7 +637,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE BERNOULLI (0) WHERE author_id = 1; @@ -645,7 +645,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE SYSTEM (100) WHERE author_id = 1 ORDER BY id; @@ -653,7 +653,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -666,7 +666,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -682,7 +682,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE BERNOULLI (0) WHERE author_id = 1; @@ -691,7 +691,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE SYSTEM (100) WHERE author_id = 1 ORDER BY id; @@ -700,7 +700,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -714,7 +714,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 diff --git a/src/test/regress/expected/multi_simple_queries_0.out b/src/test/regress/expected/multi_simple_queries_0.out index 6ad020ef4..c69518eea 100644 --- a/src/test/regress/expected/multi_simple_queries_0.out +++ b/src/test/regress/expected/multi_simple_queries_0.out @@ -18,25 +18,25 @@ CREATE TABLE authors ( name text, id bigint ); CREATE TABLE articles_single_shard (LIKE articles); SELECT master_create_distributed_table('articles', 'author_id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('articles', 2, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('articles_single_shard', 1, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -101,14 +101,14 @@ DELETE FROM articles WHERE author_id = 1 AND author_id = 2; -- test simple select for a single row SELECT * FROM articles WHERE author_id = 10 AND id = 50; id | author_id | title | word_count -----+-----------+-----------+------------ +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) -- get all titles by a single author SELECT title FROM articles WHERE author_id = 10; title ------------- +--------------------------------------------------------------------- aggrandize absentness andelee @@ -121,7 +121,7 @@ SELECT title, word_count FROM articles WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; title | word_count -------------+------------ +--------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 attemper | 14976 @@ -135,7 +135,7 @@ SELECT title, id FROM articles ORDER BY id LIMIT 2; title | id ----------+---- +--------------------------------------------------------------------- aruru | 5 adversa | 15 (2 rows) @@ -145,7 +145,7 @@ SELECT title, author_id FROM articles WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; title | author_id --------------+----------- +--------------------------------------------------------------------- aseptic | 7 auriga | 7 arsenous | 7 @@ -165,7 +165,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles HAVING sum(word_count) > 40000 ORDER BY sum(word_count) DESC; author_id | corpus_size ------------+------------- +--------------------------------------------------------------------- 2 | 61782 10 | 59955 8 | 55410 @@ -176,7 +176,7 @@ SELECT * FROM articles WHERE author_id = 10 UNION SELECT * FROM articles WHERE author_id = 2 ORDER BY 1,2,3; id | author_id | title | word_count -----+-----------+------------+------------ +--------------------------------------------------------------------- 2 | 2 | abducing | 13642 10 | 10 | aggrandize | 17277 12 | 2 | archiblast | 18185 @@ -193,7 +193,7 @@ ORDER BY 1,2,3; WITH long_names AS ( SELECT id FROM authors WHERE char_length(name) > 15 ) SELECT title FROM articles ORDER BY 1 LIMIT 5; title ------------ +--------------------------------------------------------------------- abducing abeyance abhorring @@ -204,7 +204,7 @@ SELECT title FROM articles ORDER BY 1 LIMIT 5; -- queries which involve functions in FROM clause are recursively planned SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DESC LIMIT 5; id | author_id | title | word_count | position -----+-----------+------------+------------+---------- +--------------------------------------------------------------------- 50 | 10 | anjanette | 19519 | 3 40 | 10 | attemper | 14976 | 3 30 | 10 | andelee | 6363 | 3 @@ -250,7 +250,7 @@ CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement -- test cross-shard queries SELECT COUNT(*) FROM articles; count -------- +--------------------------------------------------------------------- 50 (1 row) @@ -274,7 +274,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles ORDER BY sum(word_count) DESC LIMIT 5; author_id | corpus_size ------------+------------- +--------------------------------------------------------------------- 4 | 66325 2 | 61782 10 | 59955 @@ -287,7 +287,7 @@ SELECT author_id FROM articles HAVING sum(word_count) > 50000 ORDER BY author_id; author_id ------------ +--------------------------------------------------------------------- 2 4 6 @@ -300,7 +300,7 @@ SELECT author_id FROM articles HAVING sum(word_count) > 50000 AND author_id < 5 ORDER BY author_id; author_id ------------ +--------------------------------------------------------------------- 2 4 (2 rows) @@ -310,7 +310,7 @@ SELECT author_id FROM articles HAVING sum(word_count) > 50000 OR author_id < 5 ORDER BY author_id; author_id ------------ +--------------------------------------------------------------------- 1 2 3 @@ -325,7 +325,7 @@ SELECT author_id FROM articles HAVING author_id <= 2 OR author_id = 8 ORDER BY author_id; author_id ------------ +--------------------------------------------------------------------- 1 2 8 @@ -336,7 +336,7 @@ SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders HAVING count(*) > 1450 OR avg(o_totalprice) > 150000 ORDER BY o_orderstatus; o_orderstatus | count | avg ----------------+-------+--------------------- +--------------------------------------------------------------------- O | 1461 | 143326.447029431896 P | 75 | 164847.914533333333 (2 rows) @@ -347,7 +347,7 @@ SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders HAVING sum(l_linenumber) > 1000 ORDER BY o_orderstatus; o_orderstatus | sum | avg ----------------+------+-------------------- +--------------------------------------------------------------------- F | 8559 | 3.0126715945089757 O | 8904 | 3.0040485829959514 (2 rows) @@ -364,7 +364,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -379,7 +379,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -393,7 +393,7 @@ SELECT * WHERE author_id = 1 OR author_id = 18; DEBUG: Router planner cannot handle multi-shard select queries id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -409,7 +409,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 article_id | random_value -------------+-------------- +--------------------------------------------------------------------- 1 | 9572 11 | 14817 21 | 123690 @@ -427,7 +427,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- 10 | 17277 10 | 1820 10 | 6363 @@ -443,7 +443,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 first_author | second_word_count ---------------+------------------- +--------------------------------------------------------------------- 10 | 19519 10 | 19519 10 | 19519 @@ -458,7 +458,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+----------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) @@ -475,7 +475,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id ----- +--------------------------------------------------------------------- 1 11 21 @@ -495,7 +495,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 avg --------------------- +--------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -519,7 +519,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 max | min | sum | cnt --------+------+-------+----- +--------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -571,7 +571,7 @@ SELECT count(*) FROM ( ) x; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 50 (1 row) @@ -581,7 +581,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE BERNOULLI (0) WHERE author_id = 1; @@ -589,7 +589,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE SYSTEM (100) WHERE author_id = 1 ORDER BY id; @@ -597,7 +597,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -610,7 +610,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -626,7 +626,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE BERNOULLI (0) WHERE author_id = 1; @@ -635,7 +635,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+-------+------------ +--------------------------------------------------------------------- (0 rows) SELECT * FROM articles TABLESAMPLE SYSTEM (100) WHERE author_id = 1 ORDER BY id; @@ -644,7 +644,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -658,7 +658,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 id | author_id | title | word_count -----+-----------+--------------+------------ +--------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 diff --git a/src/test/regress/expected/multi_single_relation_subquery.out b/src/test/regress/expected/multi_single_relation_subquery.out index 6dec98669..ecc692c56 100644 --- a/src/test/regress/expected/multi_single_relation_subquery.out +++ b/src/test/regress/expected/multi_single_relation_subquery.out @@ -28,7 +28,7 @@ order by number_sum desc limit 10; number_sum | total | avg_count -------------+-------+-------------------- +--------------------------------------------------------------------- 10 | 136 | 2.3970588235294118 11 | 97 | 2.6082474226804124 12 | 56 | 2.8392857142857143 @@ -66,7 +66,7 @@ order by number_sum desc limit 10; number_sum | total | avg_count -------------+-------+-------------------- +--------------------------------------------------------------------- 10 | 136 | 2.3970588235294118 11 | 97 | 2.6082474226804124 12 | 56 | 2.8392857142857143 @@ -98,7 +98,7 @@ order by avg_count desc, suppkey_bin DESC limit 20; suppkey_bin | avg_count --------------+-------------------- +--------------------------------------------------------------------- 95 | 1.4851485148514851 90 | 1.4761904761904762 52 | 1.4680851063829787 @@ -151,7 +151,7 @@ group by order by total; total | total_avg_count --------+-------------------- +--------------------------------------------------------------------- 1 | 4.8000000000000000 6 | 3.0000000000000000 10 | 3.5000000000000000 @@ -175,7 +175,7 @@ from (l_orderkey/4)::int, l_suppkey ) as distributed_table; avg ------------------------- +--------------------------------------------------------------------- 1.00083402835696413678 (1 row) @@ -225,7 +225,7 @@ from group by l_partkey) as distributed_table; avg ------------------------- +--------------------------------------------------------------------- 1.02907126318497555956 (1 row) @@ -241,7 +241,7 @@ from having count(distinct l_shipdate) >= 2) as distributed_table; avg --------------------- +--------------------------------------------------------------------- 2.0335365853658537 (1 row) @@ -262,7 +262,7 @@ SELECT max(l_suppkey) FROM l_suppkey) z ) y; max ------- +--------------------------------------------------------------------- 9999 (1 row) diff --git a/src/test/regress/expected/multi_size_queries.out b/src/test/regress/expected/multi_size_queries.out index e94b29af0..d2be0c2fb 100644 --- a/src/test/regress/expected/multi_size_queries.out +++ b/src/test/regress/expected/multi_size_queries.out @@ -32,19 +32,19 @@ VACUUM (FULL) customer_copy_hash; -- Tests on distributed tables with streaming replication. SELECT citus_table_size('customer_copy_hash'); citus_table_size ------------------- +--------------------------------------------------------------------- 548864 (1 row) SELECT citus_relation_size('customer_copy_hash'); citus_relation_size ---------------------- +--------------------------------------------------------------------- 548864 (1 row) SELECT citus_total_relation_size('customer_copy_hash'); citus_total_relation_size ---------------------------- +--------------------------------------------------------------------- 1597440 (1 row) @@ -53,7 +53,7 @@ SELECT citus_table_size('customer_copy_hash'), citus_table_size('customer_copy_hash'), citus_table_size('supplier'); citus_table_size | citus_table_size | citus_table_size -------------------+------------------+------------------ +--------------------------------------------------------------------- 548864 | 548864 | 401408 (1 row) @@ -62,19 +62,19 @@ VACUUM (FULL) customer_copy_hash; -- Tests on distributed table with index. SELECT citus_table_size('customer_copy_hash'); citus_table_size ------------------- +--------------------------------------------------------------------- 548864 (1 row) SELECT citus_relation_size('customer_copy_hash'); citus_relation_size ---------------------- +--------------------------------------------------------------------- 548864 (1 row) SELECT citus_total_relation_size('customer_copy_hash'); citus_total_relation_size ---------------------------- +--------------------------------------------------------------------- 2646016 (1 row) @@ -82,19 +82,19 @@ SELECT citus_total_relation_size('customer_copy_hash'); VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); citus_table_size ------------------- +--------------------------------------------------------------------- 376832 (1 row) SELECT citus_relation_size('supplier'); citus_relation_size ---------------------- +--------------------------------------------------------------------- 376832 (1 row) SELECT citus_total_relation_size('supplier'); citus_total_relation_size ---------------------------- +--------------------------------------------------------------------- 376832 (1 row) @@ -102,19 +102,19 @@ CREATE INDEX index_2 on supplier(s_suppkey); VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); citus_table_size ------------------- +--------------------------------------------------------------------- 376832 (1 row) SELECT citus_relation_size('supplier'); citus_relation_size ---------------------- +--------------------------------------------------------------------- 376832 (1 row) SELECT citus_total_relation_size('supplier'); citus_total_relation_size ---------------------------- +--------------------------------------------------------------------- 458752 (1 row) @@ -126,14 +126,14 @@ ERROR: citus size functions cannot be called in transaction blocks which contai END; show citus.node_conninfo; citus.node_conninfo ---------------------- +--------------------------------------------------------------------- sslmode=require (1 row) ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require'; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -141,19 +141,19 @@ SELECT pg_reload_conf(); -- wouldn't prevent future commands to fail SELECT citus_total_relation_size('customer_copy_hash'); citus_total_relation_size ---------------------------- +--------------------------------------------------------------------- 2646016 (1 row) SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) SELECT citus_total_relation_size('customer_copy_hash'); citus_total_relation_size ---------------------------- +--------------------------------------------------------------------- 2646016 (1 row) @@ -161,7 +161,7 @@ SELECT citus_total_relation_size('customer_copy_hash'); ALTER SYSTEM RESET citus.node_conninfo; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_sql_function.out b/src/test/regress/expected/multi_sql_function.out index 47966e7b0..d7ae68408 100644 --- a/src/test/regress/expected/multi_sql_function.out +++ b/src/test/regress/expected/multi_sql_function.out @@ -38,25 +38,25 @@ SET client_min_messages TO INFO; -- now, run plain SQL functions SELECT sql_test_no_1(); sql_test_no_1 ---------------- +--------------------------------------------------------------------- 2985 (1 row) SELECT sql_test_no_2(); sql_test_no_2 ---------------- +--------------------------------------------------------------------- 12000 (1 row) SELECT sql_test_no_3(); sql_test_no_3 ---------------- +--------------------------------------------------------------------- 1956 (1 row) SELECT sql_test_no_4(); sql_test_no_4 ---------------- +--------------------------------------------------------------------- 7806 (1 row) @@ -66,13 +66,13 @@ RESET citus.task_executor_type; -- now, run plain SQL functions SELECT sql_test_no_1(); sql_test_no_1 ---------------- +--------------------------------------------------------------------- 2985 (1 row) SELECT sql_test_no_2(); sql_test_no_2 ---------------- +--------------------------------------------------------------------- 12000 (1 row) @@ -84,7 +84,7 @@ CREATE TABLE temp_table ( SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('temp_table','key','hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -94,37 +94,37 @@ $$ LANGUAGE SQL; -- execute 6 times SELECT no_parameter_insert_sql(); no_parameter_insert_sql -------------------------- +--------------------------------------------------------------------- (1 row) SELECT no_parameter_insert_sql(); no_parameter_insert_sql -------------------------- +--------------------------------------------------------------------- (1 row) SELECT no_parameter_insert_sql(); no_parameter_insert_sql -------------------------- +--------------------------------------------------------------------- (1 row) SELECT no_parameter_insert_sql(); no_parameter_insert_sql -------------------------- +--------------------------------------------------------------------- (1 row) SELECT no_parameter_insert_sql(); no_parameter_insert_sql -------------------------- +--------------------------------------------------------------------- (1 row) SELECT no_parameter_insert_sql(); no_parameter_insert_sql -------------------------- +--------------------------------------------------------------------- (1 row) @@ -134,44 +134,44 @@ $$ LANGUAGE SQL; -- execute 6 times SELECT non_partition_parameter_insert_sql(10); non_partition_parameter_insert_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_insert_sql(20); non_partition_parameter_insert_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_insert_sql(30); non_partition_parameter_insert_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_insert_sql(40); non_partition_parameter_insert_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_insert_sql(50); non_partition_parameter_insert_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_insert_sql(60); non_partition_parameter_insert_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) -- check inserted values SELECT * FROM temp_table ORDER BY key, value; key | value ------+------- +--------------------------------------------------------------------- 0 | 10 0 | 20 0 | 30 @@ -193,44 +193,44 @@ $$ LANGUAGE SQL; -- execute 6 times SELECT non_partition_parameter_update_sql(10, 12); non_partition_parameter_update_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_update_sql(20, 22); non_partition_parameter_update_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_update_sql(30, 32); non_partition_parameter_update_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_update_sql(40, 42); non_partition_parameter_update_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_update_sql(50, 52); non_partition_parameter_update_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_update_sql(60, 62); non_partition_parameter_update_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) -- check after updates SELECT * FROM temp_table ORDER BY key, value; key | value ------+------- +--------------------------------------------------------------------- 0 | 12 0 | 22 0 | 32 @@ -252,44 +252,44 @@ $$ LANGUAGE SQL; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_delete_sql(12); non_partition_parameter_delete_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_delete_sql(22); non_partition_parameter_delete_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_delete_sql(32); non_partition_parameter_delete_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_delete_sql(42); non_partition_parameter_delete_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_delete_sql(52); non_partition_parameter_delete_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) SELECT non_partition_parameter_delete_sql(62); non_partition_parameter_delete_sql ------------------------------------- +--------------------------------------------------------------------- (1 row) -- check after deletes SELECT * FROM temp_table ORDER BY key, value; key | value ------+------- +--------------------------------------------------------------------- 0 | 0 | 0 | @@ -302,7 +302,7 @@ SELECT * FROM temp_table ORDER BY key, value; CREATE TABLE test_parameterized_sql(id integer, org_id integer); select create_distributed_table('test_parameterized_sql','org_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -338,7 +338,7 @@ CONTEXT: SQL function "test_parameterized_sql_function_in_subquery_where" state CREATE TABLE table_with_unique_constraint (a int UNIQUE); SELECT create_distributed_table('table_with_unique_constraint', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -355,7 +355,7 @@ CONTEXT: while executing command on localhost:xxxxx SQL function "insert_twice" statement 2 SELECT * FROM table_with_unique_constraint ORDER BY a; a ---- +--------------------------------------------------------------------- 1 2 3 diff --git a/src/test/regress/expected/multi_subquery.out b/src/test/regress/expected/multi_subquery.out index 08c35700d..39afbf5ec 100644 --- a/src/test/regress/expected/multi_subquery.out +++ b/src/test/regress/expected/multi_subquery.out @@ -18,7 +18,7 @@ FROM GROUP BY l_orderkey) AS unit_prices; avg -------------------------- +--------------------------------------------------------------------- 142158.8766934673366834 (1 row) @@ -45,7 +45,7 @@ FROM DEBUG: generating subplan 2_1 for subquery SELECT l_suppkey, count(*) AS order_count FROM public.lineitem_subquery GROUP BY l_suppkey DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT avg(order_count) AS avg FROM (SELECT intermediate_result.l_suppkey, intermediate_result.order_count FROM read_intermediate_result('2_1'::text, 'binary'::citus_copy_format) intermediate_result(l_suppkey integer, order_count bigint)) order_counts avg --------------------- +--------------------------------------------------------------------- 1.7199369356456930 (1 row) @@ -69,7 +69,7 @@ SELECT count(*) FROM ( SELECT l_orderkey FROM lineitem_subquery JOIN (SELECT random()::int r) sub ON (l_orderkey = r) WHERE r > 10 ) b; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -90,7 +90,7 @@ DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS cou DEBUG: Creating router plan DEBUG: Plan is router executable count -------- +--------------------------------------------------------------------- 12001 (1 row) @@ -112,7 +112,7 @@ DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Creating router plan DEBUG: Plan is router executable count -------- +--------------------------------------------------------------------- 14496 (1 row) @@ -124,7 +124,7 @@ SELECT count(*) FROM ) b; DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 2985 (1 row) @@ -151,7 +151,7 @@ FROM WHERE lineitem_quantities.l_orderkey = o_orderkey) orders_price ON true; avg ------------------------- +--------------------------------------------------------------------- 17470.0940725222668915 (1 row) @@ -193,7 +193,7 @@ WHERE ORDER BY l_orderkey DESC LIMIT 10; l_orderkey ------------- +--------------------------------------------------------------------- 39 39 39 @@ -219,7 +219,7 @@ WHERE ORDER BY l_orderkey DESC LIMIT 10; l_orderkey ------------- +--------------------------------------------------------------------- 39 39 39 @@ -244,7 +244,7 @@ WHERE ORDER BY l_orderkey DESC LIMIT 10; l_orderkey ------------- +--------------------------------------------------------------------- 39 39 39 @@ -279,7 +279,7 @@ ON (l_orderkey::int8 = o_orderkey::int8) ORDER BY l_orderkey DESC LIMIT 10; l_orderkey ------------- +--------------------------------------------------------------------- 14947 14947 14946 @@ -315,7 +315,7 @@ WHERE ORDER BY l_orderkey DESC LIMIT 10; l_orderkey ------------- +--------------------------------------------------------------------- 39 39 39 @@ -340,7 +340,7 @@ WHERE ORDER BY l_orderkey DESC LIMIT 10; l_orderkey ------------- +--------------------------------------------------------------------- 39 39 39 @@ -397,7 +397,7 @@ LEFT JOIN users_reference_table t2 ON t1.user_id = trunc(t2.user_id) ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- +--------------------------------------------------------------------- 6 | 5 | 2 | 0 5 | 5 | 5 | 1 4 | 5 | 4 | 1 @@ -412,7 +412,7 @@ LEFT JOIN users_reference_table t2 ON t1.user_id > t2.user_id ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- +--------------------------------------------------------------------- 6 | 5 | 5 | 3 5 | 5 | 5 | 3 4 | 5 | 5 | 3 @@ -434,7 +434,7 @@ LEFT JOIN users_reference_table t2 ON t1.user_id = (CASE WHEN t2.user_id > 3 THE ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- +--------------------------------------------------------------------- 6 | | | 5 | | | 4 | | | @@ -451,7 +451,7 @@ SELECT DISTINCT ON (t1.user_id) t1.user_id, t2.value_1, t2.value_2, t2.value_3 ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- +--------------------------------------------------------------------- 6 | 5 | 2 | 0 5 | 5 | 5 | 1 4 | 5 | 4 | 1 @@ -479,7 +479,7 @@ FROM ( ORDER BY 1, 2, 3 LIMIT 5; user_id | value_1 | event_type ----------+---------+------------ +--------------------------------------------------------------------- 1 | 1 | 0 1 | 1 | 0 1 | 1 | 1 @@ -494,7 +494,7 @@ JOIN users_reference_table t2 ON t1.user_id = trunc(t2.user_id) ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; user_id | value_1 | value_2 | value_3 ----------+---------+---------+--------- +--------------------------------------------------------------------- 6 | 5 | 2 | 0 5 | 5 | 5 | 1 4 | 5 | 4 | 1 @@ -514,7 +514,7 @@ WHERE ORDER BY l_orderkey DESC LIMIT 10; l_orderkey ------------- +--------------------------------------------------------------------- 39 38 37 @@ -537,7 +537,7 @@ ON (l_orderkey = o_orderkey) WHERE (o_orderkey < l_quantity); count -------- +--------------------------------------------------------------------- 13 (1 row) @@ -553,7 +553,7 @@ WHERE ORDER BY l_quantity DESC LIMIT 10; l_quantity ------------- +--------------------------------------------------------------------- 50.00 49.00 46.00 @@ -578,7 +578,7 @@ WHERE ORDER BY l_quantity DESC LIMIT 10; l_quantity ------------- +--------------------------------------------------------------------- 50.00 49.00 46.00 @@ -601,7 +601,7 @@ ON (l_orderkey = o_orderkey) WHERE (o_orderkey < l_quantity); count -------- +--------------------------------------------------------------------- 25 (1 row) @@ -617,7 +617,7 @@ FROM ( l_orderkey ) z; count -------- +--------------------------------------------------------------------- 7 (1 row) @@ -669,7 +669,7 @@ ORDER BY o_custkey ASC LIMIT 10; o_custkey | total_order_count ------------+------------------- +--------------------------------------------------------------------- 1462 | 9 619 | 8 643 | 8 @@ -699,7 +699,7 @@ WHERE unit_price > 1000 AND unit_price < 10000; avg ------------------------ +--------------------------------------------------------------------- 4968.4946466804019323 (1 row) @@ -718,7 +718,7 @@ SELECT count(*) FROM WHERE l_orderkey = 1 ) b; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -731,7 +731,7 @@ SELECT count(*) FROM WHERE l_orderkey = 1 ) b; count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -749,7 +749,7 @@ SELECT max(l_orderkey) FROM ) z ) y; max -------- +--------------------------------------------------------------------- 14947 (1 row) @@ -770,7 +770,7 @@ FROM GROUP BY user_id) AS bar WHERE foo.user_id = bar.user_id ) AS baz; user_id | counter | user_id | counter ----------+---------+---------+--------- +--------------------------------------------------------------------- (0 rows) -- Subqueries filter by different users, one of which overlaps @@ -793,7 +793,7 @@ FROM ORDER BY 1,2 LIMIT 5; user_id | counter | user_id | counter ----------+---------+---------+--------- +--------------------------------------------------------------------- 2 | 57 | 2 | 57 (1 row) @@ -827,7 +827,7 @@ CREATE TABLE subquery_pruning_varchar_test_table SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -839,7 +839,7 @@ SELECT * FROM AS foo; DEBUG: Router planner not enabled. count -------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM @@ -847,7 +847,7 @@ SELECT * FROM AS foo; DEBUG: Router planner not enabled. count -------- +--------------------------------------------------------------------- (0 rows) SET client_min_messages TO NOTICE; @@ -880,7 +880,7 @@ SELECT * FROM a_inner) AS foo; a ---- +--------------------------------------------------------------------- (0 rows) DROP TABLE subquery_pruning_varchar_test_table; @@ -911,7 +911,7 @@ FROM tenant_id, user_id) AS subquery; event_average --------------------- +--------------------------------------------------------------------- 3.6666666666666667 (1 row) @@ -980,7 +980,7 @@ GROUP BY ORDER BY event_average DESC; event_average | hasdone ---------------------+--------------------- +--------------------------------------------------------------------- 4.0000000000000000 | Has not done paying 2.5000000000000000 | Has done paying (2 rows) @@ -1056,7 +1056,7 @@ GROUP BY ORDER BY count_pay; event_average | count_pay ---------------------+----------- +--------------------------------------------------------------------- 3.0000000000000000 | 0 (1 row) @@ -1113,7 +1113,7 @@ ORDER BY LIMIT 10; tenant_id | user_id | user_lastseen | event_array ------------+---------+---------------+---------------------------- +--------------------------------------------------------------------- 1 | 1003 | 1472807315 | {click,click,click,submit} 1 | 1002 | 1472807215 | {click,click,submit,pay} 1 | 1001 | 1472807115 | {click,submit,pay} diff --git a/src/test/regress/expected/multi_subquery_behavioral_analytics.out b/src/test/regress/expected/multi_subquery_behavioral_analytics.out index 4b9fdf483..d10e41eef 100644 --- a/src/test/regress/expected/multi_subquery_behavioral_analytics.out +++ b/src/test/regress/expected/multi_subquery_behavioral_analytics.out @@ -9,9 +9,9 @@ -- by non-router code-paths. Thus, this flag should NOT be used in production. Otherwise, the actual -- router queries would fail. SET citus.enable_router_execution TO FALSE; ------------------------------------- +--------------------------------------------------------------------- -- Vanilla funnel query ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table @@ -28,16 +28,16 @@ FROM ( ) q ORDER BY 2 DESC, 1; user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 3 | 187 2 | 180 1 | 28 (3 rows) ------------------------------------- +--------------------------------------------------------------------- -- Funnel grouped by whether or not a user has done an event -- This has multiple subqueries joinin at the top level ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event), hasdone_event FROM ( SELECT @@ -76,7 +76,7 @@ FROM ( ) t GROUP BY user_id, hasdone_event ORDER BY user_id; user_id | sum | length | hasdone_event ----------+-----+--------+---------------- +--------------------------------------------------------------------- 1 | 12 | 14 | Has done event 2 | 20 | 14 | Has done event 3 | 20 | 14 | Has done event @@ -123,7 +123,7 @@ FROM ( ) t GROUP BY user_id, hasdone_event ORDER BY user_id) u; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -158,7 +158,7 @@ FROM ( ) t GROUP BY user_id, hasdone_event ORDER BY user_id; user_id | sum | length | hasdone_event ----------+-----+--------+---------------- +--------------------------------------------------------------------- 1 | 12 | 14 | Has done event 2 | 20 | 14 | Has done event 3 | 20 | 14 | Has done event @@ -197,13 +197,13 @@ FROM ( ) t GROUP BY user_id, hasdone_event ORDER BY user_id) u; count -------- +--------------------------------------------------------------------- 3 (1 row) ------------------------------------- +--------------------------------------------------------------------- -- Funnel, grouped by the number of times a user has done an event ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, avg(array_length(events_table, 1)) AS event_average, @@ -269,7 +269,7 @@ GROUP BY ORDER BY event_average DESC, count_pay DESC, user_id DESC; user_id | event_average | count_pay ----------+---------------------+----------- +--------------------------------------------------------------------- 3 | 19.0000000000000000 | 7 2 | 12.0000000000000000 | 9 1 | 7.0000000000000000 | 5 @@ -342,7 +342,7 @@ HAVING ORDER BY event_average DESC, count_pay DESC, user_id DESC; user_id | event_average | count_pay ----------+---------------------+----------- +--------------------------------------------------------------------- 3 | 19.0000000000000000 | 3 2 | 12.0000000000000000 | 4 1 | 7.0000000000000000 | 3 @@ -407,7 +407,7 @@ GROUP BY ORDER BY event_average DESC, count_pay DESC, user_id DESC; user_id | event_average | count_pay ----------+---------------------+----------- +--------------------------------------------------------------------- 3 | 12.0000000000000000 | 4 2 | 9.0000000000000000 | 5 1 | 5.0000000000000000 | 2 @@ -468,17 +468,17 @@ HAVING ORDER BY event_average DESC, count_pay DESC, user_id DESC; user_id | event_average | count_pay ----------+---------------------+----------- +--------------------------------------------------------------------- 3 | 12.0000000000000000 | 4 2 | 9.0000000000000000 | 5 1 | 5.0000000000000000 | 2 (3 rows) ------------------------------------- +--------------------------------------------------------------------- -- Most recently seen users_table events_table ------------------------------------- +--------------------------------------------------------------------- -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, user_lastseen, @@ -505,14 +505,14 @@ FROM ( ) AS shard_union ORDER BY user_lastseen DESC, user_id; user_id | user_lastseen | array_length ----------+---------------------------------+-------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) ------------------------------------- +--------------------------------------------------------------------- -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------- +--------------------------------------------------------------------- SELECT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 <= 2) @@ -524,7 +524,7 @@ ORDER BY user_id DESC LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -532,9 +532,9 @@ ORDER BY 1 (5 rows) ------------------------------------- +--------------------------------------------------------------------- -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, value_2 FROM users_table WHERE value_1 > 1 AND value_1 < 3 AND value_2 >= 1 @@ -542,7 +542,7 @@ SELECT user_id, value_2 FROM users_table WHERE ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | value_2 ----------+--------- +--------------------------------------------------------------------- 6 | 4 6 | 4 2 | 4 @@ -550,9 +550,9 @@ LIMIT 5; 4 | 3 (5 rows) ------------------------------------- +--------------------------------------------------------------------- -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, value_2 FROM users_table WHERE value_1 = 2 AND value_2 >= 1 @@ -560,15 +560,15 @@ SELECT user_id, value_2 FROM users_table WHERE ORDER BY 1 DESC, 2 DESC LIMIT 3; user_id | value_2 ----------+--------- +--------------------------------------------------------------------- 5 | 5 5 | 5 5 | 2 (3 rows) ------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, sum(value_2) as cnt FROM users_table WHERE value_1 > 1 AND value_2 >= 1 @@ -579,7 +579,7 @@ GROUP BY ORDER BY cnt DESC, user_id DESC LIMIT 5; user_id | cnt ----------+----- +--------------------------------------------------------------------- 4 | 43 2 | 37 3 | 34 @@ -587,9 +587,9 @@ LIMIT 5; 6 | 15 (5 rows) ------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, value_2 FROM users_table WHERE value_2 >= 1 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id = users_table.user_id) @@ -597,16 +597,16 @@ SELECT user_id, value_2 FROM users_table WHERE ORDER BY 2 DESC, 1 DESC LIMIT 4; user_id | value_2 ----------+--------- +--------------------------------------------------------------------- 5 | 5 5 | 5 5 | 5 5 | 4 (4 rows) ------------------------------------- +--------------------------------------------------------------------- -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, avg(value_2) FROM users_table @@ -627,14 +627,14 @@ ORDER BY 1 DESC, 2 DESC LIMIT 5; user_id | avg ----------+-------------------- +--------------------------------------------------------------------- 4 | 2.0000000000000000 3 | 2.0000000000000000 (2 rows) ------------------------------------- +--------------------------------------------------------------------- -- Find me all users_table who logged in more than once ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, value_1 from ( SELECT @@ -649,7 +649,7 @@ SELECT user_id, value_1 from ORDER BY user_id ASC, value_1 ASC; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 2 | 0 2 | 2 2 | 3 @@ -670,7 +670,7 @@ SELECT user_id, value_1 from ORDER BY user_id ASC, value_1 ASC; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 2 | 0 2 | 2 2 | 3 @@ -681,9 +681,9 @@ ORDER BY 3 | 4 (8 rows) ------------------------------------- +--------------------------------------------------------------------- -- Find me all users_table who has done some event and has filters ------------------------------------- +--------------------------------------------------------------------- SELECT user_id FROM events_table WHERE @@ -698,7 +698,7 @@ WHERE ) ORDER BY 1; user_id ---------- +--------------------------------------------------------------------- 1 2 2 @@ -706,9 +706,9 @@ ORDER BY 1; 5 (5 rows) ------------------------------------- +--------------------------------------------------------------------- -- Which events_table did people who has done some specific events_table ------------------------------------- +--------------------------------------------------------------------- SELECT user_id, event_type FROM events_table WHERE @@ -718,15 +718,15 @@ GROUP BY ORDER BY 2 DESC, 1 LIMIT 3; user_id | event_type ----------+------------ +--------------------------------------------------------------------- 1 | 6 2 | 5 3 | 5 (3 rows) ------------------------------------- +--------------------------------------------------------------------- -- Find me all the users_table who has done some event more than three times ------------------------------------- +--------------------------------------------------------------------- SELECT user_id FROM ( SELECT @@ -743,7 +743,7 @@ SELECT user_id FROM ORDER BY user_id; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -751,9 +751,9 @@ ORDER BY 6 (5 rows) ------------------------------------- +--------------------------------------------------------------------- -- Find my assets that have the highest probability and fetch their metadata ------------------------------------- +--------------------------------------------------------------------- CREATE TEMP TABLE assets AS SELECT users_table.user_id, users_table.value_1, prob @@ -773,7 +773,7 @@ FROM -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM assets; count | count | avg --------+-------+-------------------- +--------------------------------------------------------------------- 732 | 6 | 3.3934426229508197 (1 row) @@ -798,7 +798,7 @@ SELECT count(*) FROM DEBUG: generating subplan 23_1 for subquery SELECT user_id FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) 4) DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id FROM public.users_table WHERE (((users_table.value_1 OPERATOR(pg_catalog.=) 1) OR (users_table.value_1 OPERATOR(pg_catalog.=) 3)) AND (NOT (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) GROUP BY users_table.user_id HAVING (count(DISTINCT users_table.value_1) OPERATOR(pg_catalog.=) 2)) foo count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -832,7 +832,7 @@ SELECT subquery_count FROM a.user_id ) AS inner_subquery; subquery_count ----------------- +--------------------------------------------------------------------- 1 (1 row) @@ -865,7 +865,7 @@ WHERE GROUP BY a.user_id; subquery_count ----------------- +--------------------------------------------------------------------- 1 (1 row) @@ -941,7 +941,7 @@ GROUP BY e1.user_id LIMIT 1; user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen ----------+-----------------+----------+---------------------+------------------+------------------- +--------------------------------------------------------------------- 1 | 1 | 1 | 1 | 1 | 1 (1 row) @@ -1006,7 +1006,7 @@ GROUP BY e1.user_id ORDER BY 6 DESC NULLS LAST, 5 DESC NULLS LAST, 4 DESC NULLS LAST, 3 DESC NULLS LAST, 2 DESC NULLS LAST, 1 LIMIT 15; user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen ----------+-----------------+----------+---------------------+------------------+------------------- +--------------------------------------------------------------------- 2 | 1080 | 1080 | 1080 | 1080 | 1080 3 | 540 | 540 | 540 | 540 | 540 4 | 252 | 252 | 252 | 252 | 252 @@ -1077,7 +1077,7 @@ HAVING sum(submit_card_info) > 0 ORDER BY 6 DESC NULLS LAST, 5 DESC NULLS LAST, 4 DESC NULLS LAST, 3 DESC NULLS LAST, 2 DESC NULLS LAST, 1 LIMIT 15; user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen ----------+-----------------+----------+---------------------+------------------+------------------- +--------------------------------------------------------------------- 2 | 1080 | 1080 | 1080 | 1080 | 1080 3 | 540 | 540 | 540 | 540 | 540 4 | 252 | 252 | 252 | 252 | 252 @@ -1117,7 +1117,7 @@ ORDER BY avg(b.value_3), 2, 1 LIMIT 5; user_id | subquery_avg ----------+-------------------- +--------------------------------------------------------------------- 3 | 3.6000000000000000 5 | 2.1666666666666667 4 | 2.6666666666666667 @@ -1154,7 +1154,7 @@ ORDER BY avg(b.value_3), 2, 1 LIMIT 5; user_id | subquery_avg ----------+-------------------- +--------------------------------------------------------------------- 3 | 3.6000000000000000 5 | 2.1666666666666667 4 | 2.6666666666666667 @@ -1193,7 +1193,7 @@ ORDER BY avg(b.value_3) DESC, 2, 1 LIMIT 5; user_id | subquery_avg | avg ----------+--------------------+------------------ +--------------------------------------------------------------------- 1 | 2.3333333333333333 | 3.33333333333333 4 | 2.6666666666666667 | 2.55555555555556 5 | 2.1666666666666667 | 2.16666666666667 @@ -1227,7 +1227,7 @@ ORDER BY 4 DESC, 1 DESC, 2 ASC, 3 ASC LIMIT 10; user_id | value_2 | value_3 | counts ----------+---------+---------+-------- +--------------------------------------------------------------------- 5 | 3 | 4 | 160 2 | 3 | 5 | 156 3 | 2 | 5 | 108 @@ -1254,7 +1254,7 @@ ORDER BY users_count desc, avg_type DESC LIMIT 5; avg_type | users_count ---------------------+------------- +--------------------------------------------------------------------- 2.3750000000000000 | 24 2.5714285714285714 | 21 2.5294117647058824 | 17 @@ -1283,7 +1283,7 @@ ORDER BY users_count.ct desc, event_type DESC LIMIT 5; event_type | ct -------------+---- +--------------------------------------------------------------------- 5 | 26 4 | 26 3 | 26 @@ -1315,7 +1315,7 @@ ORDER BY total_count DESC, count_1 DESC, 1 DESC LIMIT 10; user_id | count_1 | total_count ----------+---------+------------- +--------------------------------------------------------------------- 2 | 18 | 7 3 | 17 | 7 2 | 18 | 6 @@ -1357,7 +1357,7 @@ GROUP BY a.user_id ORDER BY avg(b.value_3), 2, 1 LIMIT 5; user_id | subquery_avg ----------+------------------------ +--------------------------------------------------------------------- 5 | 0.00000000000000000000 3 | 2.0000000000000000 4 | 1.00000000000000000000 @@ -1425,7 +1425,7 @@ ORDER BY avg(b.value_3), 2, 1 LIMIT 5; user_id | subquery_avg ----------+-------------------- +--------------------------------------------------------------------- 3 | 3.3333333333333333 5 | 2.2000000000000000 4 | 3.2500000000000000 @@ -1463,7 +1463,7 @@ ORDER BY prob DESC, value_2 DESC, user_id DESC, event_type DESC LIMIT 10; user_id | event_type ----------+------------ +--------------------------------------------------------------------- 3 | 5 3 | 4 3 | 4 @@ -1501,7 +1501,7 @@ ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; user_id | event_type ----------+------------ +--------------------------------------------------------------------- 3 | 5 2 | 5 2 | 5 @@ -1547,7 +1547,7 @@ ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; user_id | event_type ----------+------------ +--------------------------------------------------------------------- 3 | 5 2 | 5 2 | 5 @@ -1572,7 +1572,7 @@ SELECT * FROM run_command_on_workers('CREATE OR REPLACE FUNCTION array_index(AN $$ LANGUAGE sql') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+----------------- +--------------------------------------------------------------------- localhost | 57637 | t | CREATE FUNCTION localhost | 57638 | t | CREATE FUNCTION (2 rows) @@ -1646,7 +1646,7 @@ ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10; user_id_e | event_type_e | value_2 | value_3 | user_id ------------+--------------+---------+---------+--------- +--------------------------------------------------------------------- 5 | 5 | 2 | 0 | 5 5 | 5 | 2 | 0 | 5 5 | 5 | 2 | 0 | 5 @@ -1729,7 +1729,7 @@ ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10; user_id_e | event_type_e | value_2 | value_3 | user_id ------------+--------------+---------+---------+--------- +--------------------------------------------------------------------- 5 | 5 | 2 | 0 | 5 5 | 5 | 2 | 0 | 5 5 | 5 | 2 | 0 | 5 @@ -1746,7 +1746,7 @@ LIMIT 10; SELECT * FROM run_command_on_workers('DROP FUNCTION array_index(ANYARRAY, ANYELEMENT)') ORDER BY 1,2; nodename | nodeport | success | result ------------+----------+---------+--------------- +--------------------------------------------------------------------- localhost | 57637 | t | DROP FUNCTION localhost | 57638 | t | DROP FUNCTION (2 rows) @@ -1772,7 +1772,7 @@ FROM ( WHERE b.user_id IS NULL GROUP BY a.user_id; subquery_count ----------------- +--------------------------------------------------------------------- 1 1 1 @@ -1844,7 +1844,7 @@ ORDER BY 2 DESC, 1 LIMIT 1+1 OFFSET 1::smallint; DEBUG: push down of limit count: 3 user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 4 | 184 2 | 180 (2 rows) @@ -1868,7 +1868,7 @@ ORDER BY 2 DESC, 1 LIMIT '3' OFFSET 2+1; DEBUG: push down of limit count: 6 user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 5 | 156 6 | 40 1 | 28 @@ -1898,7 +1898,7 @@ ORDER BY 2 DESC, 1 LIMIT volatile_func_test() + (ROW(1,2,NULL) < ROW(1,3,0))::int OFFSET volatile_func_test() + volatile_func_test(); DEBUG: push down of limit count: 4 user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 3 | 340 5 | 312 (2 rows) @@ -1927,7 +1927,7 @@ LIMIT (5 > 4)::int OFFSET END; DEBUG: push down of limit count: 3 user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 2 | 180 (1 row) @@ -1950,7 +1950,7 @@ FROM ( EXECUTE parametrized_limit(1,1); DEBUG: push down of limit count: 2 user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 4 | 184 (1 row) @@ -1972,7 +1972,7 @@ FROM ( EXECUTE parametrized_offset(1); DEBUG: push down of limit count: 2 user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 4 | 184 (1 row) @@ -1993,7 +1993,7 @@ CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool $f$); run_command_on_workers ---------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"CREATE FUNCTION") (localhost,57638,t,"CREATE FUNCTION") (2 rows) @@ -2031,7 +2031,7 @@ FROM WHERE users_table.value_1 < 3 AND test_join_function_2(users_table.user_id, temp.user_id); user_id | value_1 | prob ----------+---------+------ +--------------------------------------------------------------------- (0 rows) -- we do support the following since there is already an equality on the partition @@ -2055,7 +2055,7 @@ FROM ORDER BY 2 DESC, 1 DESC LIMIT 10; user_id | value_1 | prob ----------+---------+------------------------ +--------------------------------------------------------------------- 6 | 2 | 0.50000000000000000000 6 | 2 | 0.50000000000000000000 6 | 2 | 0.50000000000000000000 @@ -2083,7 +2083,7 @@ FROM events_table.value_2 IN (0, 4) ) as foo; count -------- +--------------------------------------------------------------------- 180 (1 row) @@ -2124,7 +2124,7 @@ FROM WHERE foo.event_type > bar.event_type AND foo.user_id = bar.user_id; count -------- +--------------------------------------------------------------------- 11971 (1 row) @@ -2169,7 +2169,7 @@ FROM ORDER BY 1 LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -2195,7 +2195,7 @@ FROM ORDER BY 1, 2 LIMIT 5; user_id | value_1 | prob ----------+---------+------------------------ +--------------------------------------------------------------------- 1 | 1 | 0.50000000000000000000 2 | 0 | 0.50000000000000000000 3 | 0 | 0.50000000000000000000 @@ -2220,7 +2220,7 @@ FROM ORDER BY 1,2 LIMIT 5; user_id | value_1 | prob ----------+---------+------------------------ +--------------------------------------------------------------------- 1 | 1 | 0.50000000000000000000 2 | 0 | 0.50000000000000000000 3 | 0 | 0.50000000000000000000 @@ -2245,7 +2245,7 @@ FROM 1 ) AS temp; count | avg --------+----- +--------------------------------------------------------------------- 6 | (1 row) @@ -2276,7 +2276,7 @@ LATERAL ( ORDER BY user_id, value_2, cnt LIMIT 1; user_id | value_2 | cnt ----------+---------+----- +--------------------------------------------------------------------- 2 | 0 | 1 (1 row) @@ -2287,7 +2287,7 @@ SELECT run_command_on_workers($f$ $f$); run_command_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") (2 rows) diff --git a/src/test/regress/expected/multi_subquery_complex_queries.out b/src/test/regress/expected/multi_subquery_complex_queries.out index ce61f1df1..4570d533a 100644 --- a/src/test/regress/expected/multi_subquery_complex_queries.out +++ b/src/test/regress/expected/multi_subquery_complex_queries.out @@ -69,7 +69,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -134,7 +134,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -200,7 +200,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 234 2 | 75 @@ -265,7 +265,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 369 2 | 75 @@ -344,7 +344,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 2 | 433 3 | 75 @@ -430,7 +430,7 @@ DEBUG: generating subplan 16_4 for subquery SELECT "time", event, user_id FROM DEBUG: generating subplan 16_5 for subquery SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) UNION SELECT events_subquery_2.max, events_subquery_2.event, events_subquery_2.user_id FROM (SELECT events_subquery_5.max, events_subquery_5.event, events_subquery_5.user_id FROM (SELECT intermediate_result.max, intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(max timestamp without time zone, event integer, user_id integer)) events_subquery_5) events_subquery_2 UNION SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_3'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) UNION SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_4'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT event_types AS types, count(*) AS sumofeventtype FROM (SELECT q.user_id, q."time", q.event_types, t.user_id, random() AS random FROM ((SELECT t_1.user_id, t_1."time", unnest(t_1.collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_5'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer)) t1 GROUP BY t1.user_id) t_1) q JOIN (SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t ON ((t.user_id OPERATOR(pg_catalog.=) q.user_id)))) final_query(user_id, "time", event_types, user_id_1, random) GROUP BY event_types ORDER BY event_types types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 2 | 433 3 | 75 @@ -503,7 +503,7 @@ ORDER BY DEBUG: generating subplan 22_1 for subquery SELECT user_id, "time", unnest(collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT events_subquery_1.user_id, events_subquery_1."time", events_subquery_1.event FROM (SELECT events.user_id, events."time", 0 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) events_subquery_1 UNION SELECT events_subquery_2.user_id, events_subquery_2."time", events_subquery_2.event FROM (SELECT events.user_id, events."time", 1 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4]))) events_subquery_2 UNION SELECT events_subquery_3.user_id, events_subquery_3."time", events_subquery_3.event FROM (SELECT events.user_id, events."time", 2 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_3 UNION SELECT events_subquery_4.user_id, events_subquery_4."time", events_subquery_4.event FROM (SELECT events.user_id, events."time", 3 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5]))) events_subquery_4) t1 GROUP BY t1.user_id) t DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT event_types AS types, count(*) AS sumofeventtype FROM (SELECT q.user_id, q."time", q.event_types, t.user_id, random() AS random FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_types FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_types integer)) q JOIN (SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t ON ((t.user_id OPERATOR(pg_catalog.<>) q.user_id)))) final_query(user_id, "time", event_types, user_id_1, random) GROUP BY event_types ORDER BY event_types types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 2088 1 | 2163 2 | 397 @@ -653,7 +653,7 @@ INNER JOIN GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 434 2 | 433 @@ -717,7 +717,7 @@ INNER JOIN GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -780,7 +780,7 @@ ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -839,7 +839,7 @@ INNER JOIN GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 62 @@ -903,7 +903,7 @@ ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -983,7 +983,7 @@ GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; user_id | cnt ----------+----- +--------------------------------------------------------------------- 3 | 275 6 | 72 (2 rows) @@ -1065,7 +1065,7 @@ LIMIT 10; DEBUG: generating subplan 42_1 for subquery SELECT DISTINCT user_id FROM public.events_table events WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[0, 6])) GROUP BY user_id DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT user_id, count(*) AS cnt FROM (SELECT first_query.user_id, random() AS random FROM ((SELECT t.user_id, t."time", unnest(t.collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT events_subquery_1.user_id, events_subquery_1."time", events_subquery_1.event FROM (SELECT events.user_id, events."time", 0 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) events_subquery_1 UNION ALL SELECT events_subquery_2.user_id, events_subquery_2."time", events_subquery_2.event FROM (SELECT events.user_id, events."time", 1 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4]))) events_subquery_2 UNION ALL SELECT events_subquery_3.user_id, events_subquery_3."time", events_subquery_3.event FROM (SELECT events.user_id, events."time", 2 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_3 UNION ALL SELECT events_subquery_4.user_id, events_subquery_4."time", events_subquery_4.event FROM (SELECT events.user_id, events."time", 3 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6]))) events_subquery_4) t1 GROUP BY t1.user_id) t) first_query JOIN (SELECT t.user_id FROM ((SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t LEFT JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t2 ON ((t2.user_id OPERATOR(pg_catalog.>) t.user_id))) WHERE (t2.user_id IS NULL)) second_query ON ((first_query.user_id OPERATOR(pg_catalog.=) second_query.user_id)))) final_query GROUP BY user_id ORDER BY (count(*)) DESC, user_id DESC LIMIT 10 user_id | cnt ----------+----- +--------------------------------------------------------------------- 5 | 324 6 | 72 (2 rows) @@ -1146,7 +1146,7 @@ GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; user_id | cnt ----------+----- +--------------------------------------------------------------------- 3 | 275 6 | 72 (2 rows) @@ -1192,7 +1192,7 @@ order BY user_id LIMIT 50; user_id | lastseen ----------+--------------------------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 3 | Thu Nov 23 18:08:26.550729 2017 (2 rows) @@ -1231,7 +1231,7 @@ ORDER BY user_id limit 50; user_id | lastseen ----------+--------------------------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 3 | Thu Nov 23 18:08:26.550729 2017 (2 rows) @@ -1372,7 +1372,7 @@ ORDER BY user_id DESC, lastseen DESC LIMIT 10; user_id | lastseen ----------+--------------------------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 2 | Thu Nov 23 17:26:14.563216 2017 2 | Thu Nov 23 17:26:14.563216 2017 @@ -1436,7 +1436,7 @@ GROUP BY 1 ORDER BY 2, 1 DESC LIMIT 10; user_id | max | count ----------+---------------------------------+------- +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 | 10 (1 row) @@ -1717,7 +1717,7 @@ GROUP BY ORDER BY generated_group_field DESC, value DESC; value | generated_group_field --------+----------------------- +--------------------------------------------------------------------- 1 | 5 2 | 2 2 | 1 @@ -1771,7 +1771,7 @@ ORDER BY DEBUG: generating subplan 64_1 for subquery SELECT user_id, value_2 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_3 OPERATOR(pg_catalog.>) (3)::double precision)) DEBUG: Plan 64 query after replacing subqueries and CTEs: SELECT count(*) AS value, generated_group_field FROM (SELECT DISTINCT "pushedDownQuery_1".real_user_id, "pushedDownQuery_1".generated_group_field FROM (SELECT "eventQuery".real_user_id, "eventQuery"."time", random() AS random, "eventQuery".value_2 AS generated_group_field FROM (SELECT temp_data_queries."time", temp_data_queries.user_id, temp_data_queries.value_2, user_filters_1.real_user_id FROM ((SELECT events."time", events.user_id, events.value_2 FROM public.events_table events WHERE ((events.user_id OPERATOR(pg_catalog.>) 1) AND (events.user_id OPERATOR(pg_catalog.<) 4) AND (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5])))) temp_data_queries JOIN (SELECT user_where_1_1.real_user_id FROM ((SELECT users.user_id AS real_user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 3))) user_where_1_1 JOIN (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('64_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) user_where_1_join_1 ON ((user_where_1_1.real_user_id OPERATOR(pg_catalog.=) user_where_1_join_1.value_2)))) user_filters_1 ON ((temp_data_queries.user_id OPERATOR(pg_catalog.=) user_filters_1.real_user_id)))) "eventQuery") "pushedDownQuery_1") "pushedDownQuery" GROUP BY generated_group_field ORDER BY generated_group_field DESC, (count(*)) DESC value | generated_group_field --------+----------------------- +--------------------------------------------------------------------- 1 | 5 2 | 2 2 | 1 @@ -1823,7 +1823,7 @@ ORDER BY DEBUG: generating subplan 66_1 for subquery SELECT user_id, value_2 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_3 OPERATOR(pg_catalog.>) (3)::double precision)) DEBUG: Plan 66 query after replacing subqueries and CTEs: SELECT count(*) AS value, generated_group_field FROM (SELECT DISTINCT "pushedDownQuery_1".real_user_id, "pushedDownQuery_1".generated_group_field FROM (SELECT "eventQuery".real_user_id, "eventQuery"."time", random() AS random, "eventQuery".value_2 AS generated_group_field FROM (SELECT temp_data_queries."time", temp_data_queries.user_id, temp_data_queries.value_2, user_filters_1.real_user_id FROM ((SELECT events."time", events.user_id, events.value_2 FROM public.events_table events WHERE ((events.user_id OPERATOR(pg_catalog.>) 1) AND (events.user_id OPERATOR(pg_catalog.<) 4) AND (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5])))) temp_data_queries JOIN (SELECT user_where_1_1.real_user_id FROM ((SELECT users.user_id AS real_user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 3))) user_where_1_1 JOIN (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('66_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) user_where_1_join_1 ON ((user_where_1_1.real_user_id OPERATOR(pg_catalog.>=) user_where_1_join_1.user_id)))) user_filters_1 ON ((temp_data_queries.user_id OPERATOR(pg_catalog.=) user_filters_1.real_user_id)))) "eventQuery") "pushedDownQuery_1") "pushedDownQuery" GROUP BY generated_group_field ORDER BY generated_group_field DESC, (count(*)) DESC value | generated_group_field --------+----------------------- +--------------------------------------------------------------------- 1 | 5 2 | 2 2 | 1 @@ -1870,7 +1870,7 @@ FROM GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; value_3 | cnt ----------+----- +--------------------------------------------------------------------- 0 | 7 10 | 21 4 | 21 @@ -1923,7 +1923,7 @@ ORDER BY cnt, value_3 DESC LIMIT 10; DEBUG: generating subplan 69_1 for subquery SELECT user_id FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_2 OPERATOR(pg_catalog.>) 3)) DEBUG: Plan 69 query after replacing subqueries and CTEs: SELECT value_3, count(*) AS cnt FROM (SELECT segmentalias_1.value_3, segmentalias_1.user_id, random() AS random FROM (SELECT users_in_segment_1.user_id, users_in_segment_1.value_3 FROM ((SELECT all_buckets_1.user_id, (all_buckets_1.value_3 OPERATOR(pg_catalog.*) (2)::double precision) AS value_3 FROM (SELECT simple_user_where_1.user_id, simple_user_where_1.value_3 FROM (SELECT users.user_id, users.value_3 FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 2))) simple_user_where_1) all_buckets_1) users_in_segment_1 JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('69_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) some_users_data ON (true))) segmentalias_1) "tempQuery" GROUP BY value_3 ORDER BY (count(*)), value_3 DESC LIMIT 10 value_3 | cnt ----------+----- +--------------------------------------------------------------------- 0 | 14 10 | 42 4 | 42 @@ -1978,7 +1978,7 @@ ORDER BY value_3 DESC, user_id ASC LIMIT 10; user_id | value_3 ----------+--------- +--------------------------------------------------------------------- 2 | 5 2 | 5 2 | 5 @@ -2032,7 +2032,7 @@ ORDER BY value_3 DESC, user_id ASC LIMIT 10; user_id | value_3 ----------+--------- +--------------------------------------------------------------------- 2 | 5 2 | 5 2 | 5 @@ -2087,7 +2087,7 @@ ORDER BY value_3 DESC, user_id DESC LIMIT 10; user_id | value_3 ----------+--------- +--------------------------------------------------------------------- 3 | 5 3 | 5 3 | 5 @@ -2140,7 +2140,7 @@ FROM ORDER BY value_3 DESC, user_id DESC LIMIT 10; user_id | value_3 ----------+--------- +--------------------------------------------------------------------- 3 | 5 3 | 5 3 | 5 @@ -2191,7 +2191,7 @@ count(*) AS cnt, "generated_group_field" cnt DESC, generated_group_field ASC LIMIT 10; cnt | generated_group_field ------+----------------------- +--------------------------------------------------------------------- 336 | 2 210 | 1 210 | 3 @@ -2219,7 +2219,7 @@ ORDER BY cnt DESC, user_id DESC LIMIT 10; cnt | user_id ------+--------- +--------------------------------------------------------------------- 11 | 3 10 | 2 8 | 4 @@ -2268,7 +2268,7 @@ ORDER BY value_2 DESC, user_id DESC LIMIT 10; user_id | value_2 ----------+--------- +--------------------------------------------------------------------- 2 | 5 (1 row) @@ -2378,7 +2378,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 433 (2 rows) @@ -2445,7 +2445,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 425 2 | 75 @@ -2511,7 +2511,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 449 1 | 433 2 | 75 @@ -2572,7 +2572,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 4 1 | 8 2 | 1 @@ -2609,7 +2609,7 @@ ORDER BY 1,2,3,4 LIMIT 5; uid | event_type | value_2 | value_3 ------+------------+---------+--------- +--------------------------------------------------------------------- 1 | 1 | 0 | 2 1 | 1 | 0 | 2 1 | 1 | 0 | 2 @@ -2646,7 +2646,7 @@ FROM GROUP BY user_id ORDER BY 1, 2; user_id | subquery_avg ----------+------------------------ +--------------------------------------------------------------------- 1 | 2.3333333333333333 3 | 5.0000000000000000 4 | 1.00000000000000000000 @@ -2681,7 +2681,7 @@ FROM GROUP BY a.user_id ORDER BY 1, 2; user_id | subquery_avg ----------+------------------------ +--------------------------------------------------------------------- 1 | 2.3333333333333333 3 | 5.0000000000000000 4 | 1.00000000000000000000 @@ -2698,7 +2698,7 @@ FROM ( ORDER BY k1 LIMIT 5; k1 ----- +--------------------------------------------------------------------- 1 1 1 @@ -2713,7 +2713,7 @@ FROM ( ORDER BY k1 LIMIT 5; k1 ----- +--------------------------------------------------------------------- 1 2 3 @@ -2726,7 +2726,7 @@ FROM (users_table u FULL JOIN events_table e ON (u.user_id = e.user_id)) k(x1, x ORDER BY 1, 2, 3 LIMIT 5; x1 | x3 | value_2 -----+----+--------- +--------------------------------------------------------------------- 1 | 1 | 1 1 | 1 | 1 1 | 1 | 1 @@ -2739,7 +2739,7 @@ FROM (users_table u FULL JOIN events_table e USING (user_id)) k(x1, x2, x3, x4, ORDER BY 1, 2, 3 LIMIT 5; x1 | x3 | value_2 -----+----+--------- +--------------------------------------------------------------------- 1 | 1 | 1 1 | 1 | 1 1 | 1 | 1 @@ -2753,7 +2753,7 @@ FROM (users_table LEFT OUTER JOIN events_table ON (users_table.user_id = events ORDER BY 1 DESC LIMIT 10; c_custkey ------------ +--------------------------------------------------------------------- 6 6 6 @@ -2773,7 +2773,7 @@ GROUP BY 1 ORDER BY 2, 1 LIMIT 10; c_custkey | date_trunc ------------+-------------------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 13:52:00 2017 6 | Thu Nov 23 14:43:00 2017 4 | Thu Nov 23 15:32:00 2017 @@ -2790,7 +2790,7 @@ HAVING extract(minute from max(c_nationkey)) >= 45 ORDER BY 2, 1 LIMIT 10; c_custkey | date_trunc ------------+-------------------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 13:52:00 2017 5 | Thu Nov 23 16:48:00 2017 (2 rows) @@ -2801,7 +2801,7 @@ FROM (users_table JOIN events_table USING (user_id)) AS test(user_id, c_nationke ORDER BY 1 DESC LIMIT 10; user_id ---------- +--------------------------------------------------------------------- 6 6 6 @@ -2825,7 +2825,7 @@ GROUP BY 1,2 ORDER BY 2 DESC, 1 DESC LIMIT 10; bar | value_3 ------+--------- +--------------------------------------------------------------------- 3 | 5 2 | 5 1 | 5 @@ -2853,7 +2853,7 @@ GROUP BY 1, 2 ORDER BY 2 DESC, 1 DESC LIMIT 10; bar | value_3 ------+--------- +--------------------------------------------------------------------- 3 | 5 2 | 5 1 | 5 @@ -2882,7 +2882,7 @@ SELECT bar, foo.value_3, c_custkey, test_2.time_2 FROM ORDER BY 2 DESC, 1 DESC, 3 DESC, 4 DESC LIMIT 10; bar | value_3 | c_custkey | time_2 ------+---------+-----------+--------------------------------- +--------------------------------------------------------------------- 3 | 5 | 3 | Thu Nov 23 17:18:51.048758 2017 3 | 5 | 3 | Thu Nov 23 17:18:51.048758 2017 3 | 5 | 3 | Thu Nov 23 17:18:51.048758 2017 diff --git a/src/test/regress/expected/multi_subquery_complex_reference_clause.out b/src/test/regress/expected/multi_subquery_complex_reference_clause.out index e1977446d..2e867de71 100644 --- a/src/test/regress/expected/multi_subquery_complex_reference_clause.out +++ b/src/test/regress/expected/multi_subquery_complex_reference_clause.out @@ -8,7 +8,7 @@ CREATE TABLE user_buy_test_table(user_id int, item_id int, buy_count int); SELECT create_distributed_table('user_buy_test_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -19,7 +19,7 @@ INSERT INTO user_buy_test_table VALUES(7,5,2); CREATE TABLE users_return_test_table(user_id int, item_id int, buy_count int); SELECT create_distributed_table('users_return_test_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -31,7 +31,7 @@ SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -40,7 +40,7 @@ SELECT count(*) FROM (SELECT random(), k_no FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1 WHERE k_no = 47; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -50,7 +50,7 @@ SELECT subquery_1.item_id FROM ON user_buy_test_table.item_id = users_ref_test_table.id) subquery_1 ORDER BY 1; item_id ---------- +--------------------------------------------------------------------- 2 3 4 @@ -63,7 +63,7 @@ SELECT subquery_1.user_id FROM ON user_buy_test_table.user_id > users_ref_test_table.id) subquery_1 ORDER BY 1; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -87,7 +87,7 @@ SELECT count(*) FROM (SELECT random() FROM users_ref_test_table RIGHT JOIN user_buy_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -102,7 +102,7 @@ SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id) subquery_1; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -111,7 +111,7 @@ SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -120,7 +120,7 @@ SELECT count(*) FROM (SELECT random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -132,7 +132,7 @@ SELECT count(*) FROM (SELECT tt1.user_id, random() FROM user_buy_test_table as tt1 LEFT JOIN users_ref_test_table as ref ON tt1.user_id = ref.id) subquery_2 ON subquery_1.user_id = subquery_2.user_id; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -145,7 +145,7 @@ SELECT count(*) FROM ON user_buy_test_table.user_id > users_ref_test_table.id AND users_ref_test_table.k_no > 44 AND user_buy_test_table.user_id > 44) subquery_2 WHERE subquery_1.user_id = subquery_2.user_id ; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -166,7 +166,7 @@ SELECT subquery_2.id ON subquery_1.user_id = subquery_2.user_id ORDER BY 1 DESC LIMIT 5; id ----- +--------------------------------------------------------------------- 3 2 1 @@ -204,7 +204,7 @@ FROM ) as foo GROUP BY user_id ORDER BY 2 DESC LIMIT 10; user_id | sum ----------+------- +--------------------------------------------------------------------- 2 | 31248 3 | 15120 4 | 14994 @@ -224,7 +224,7 @@ FROM ) as foo GROUP BY user_id ORDER BY 2 DESC LIMIT 10; user_id | sum ----------+------- +--------------------------------------------------------------------- 2 | 31248 3 | 15120 4 | 14994 @@ -237,7 +237,7 @@ SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN generate_series(1,10) AS users_ref_test_table(id) ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -245,7 +245,7 @@ SELECT count(*) FROM SELECT count(*) FROM user_buy_test_table JOIN generate_series(1,10) AS users_ref_test_table(id) ON user_buy_test_table.item_id = users_ref_test_table.id; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -254,14 +254,14 @@ SELECT count(*) FROM (SELECT random() FROM user_buy_test_table LEFT JOIN generate_series(1,10) AS users_ref_test_table(id) ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; count -------- +--------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM user_buy_test_table LEFT JOIN (SELECT * FROM generate_series(1,10) id) users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -285,7 +285,7 @@ DEBUG: generating subplan 30_1 for subquery SELECT id FROM random() users_ref_t DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT random() AS random FROM (public.user_buy_test_table JOIN (SELECT intermediate_result.id FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(id double precision)) users_ref_test_table(id) ON (((user_buy_test_table.item_id)::double precision OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1 DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -299,7 +299,7 @@ DEBUG: generating subplan 31_1 for subquery SELECT id FROM generate_series((ran DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT user_buy_test_table.item_id FROM (public.user_buy_test_table JOIN (SELECT intermediate_result.id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) users_ref_test_table(id) ON ((user_buy_test_table.item_id OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1 WHERE (item_id OPERATOR(pg_catalog.=) 6) DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -318,7 +318,7 @@ DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Creating router plan DEBUG: Plan is router executable count -------- +--------------------------------------------------------------------- 14 (1 row) @@ -328,7 +328,7 @@ SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN (SELECT 4 AS id) users_ref_test_table ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -336,7 +336,7 @@ SELECT count(*) FROM SELECT count(*) FROM user_buy_test_table JOIN (SELECT 5 AS id) users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -344,7 +344,7 @@ ON user_buy_test_table.item_id = users_ref_test_table.id; SELECT count(*) FROM user_buy_test_table LEFT JOIN (SELECT 5 AS id) users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -370,7 +370,7 @@ DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Creating router plan DEBUG: Plan is router executable count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -391,7 +391,7 @@ DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT id FROM (SELEC DEBUG: Creating router plan DEBUG: Plan is router executable id ----- +--------------------------------------------------------------------- 7 6 5 @@ -417,7 +417,7 @@ DEBUG: Plan 45 query after replacing subqueries and CTEs: SELECT id, "?column?" DEBUG: Creating router plan DEBUG: Plan is router executable id | ?column? -----+---------- +--------------------------------------------------------------------- 7 | 0 6 | 0 5 | 0 @@ -437,7 +437,7 @@ SELECT * FROM ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries user_id ---------- +--------------------------------------------------------------------- 7 3 2 @@ -453,7 +453,7 @@ SELECT * FROM ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries user_id | ?column? ----------+---------- +--------------------------------------------------------------------- 7 | 0 3 | 0 2 | 0 @@ -477,7 +477,7 @@ SELECT * FROM ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries user_id ---------- +--------------------------------------------------------------------- 3 2 1 @@ -494,7 +494,7 @@ SELECT * FROM ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries user_id ---------- +--------------------------------------------------------------------- 7 3 2 @@ -527,7 +527,7 @@ FROM ) as foo GROUP BY user_id ORDER BY 2 DESC LIMIT 10; user_id | sum ----------+------- +--------------------------------------------------------------------- 2 | 31248 3 | 15120 4 | 14994 @@ -595,7 +595,7 @@ SELECT * FROM (SELECT user_id as user_user_id FROM users_table) as fooo ON (user_id = user_user_id)) as bar ORDER BY 1; user_id ---------- +--------------------------------------------------------------------- 3 4 5 @@ -632,7 +632,7 @@ FROM INNER JOIN users_table ON (users_table.user_id = events_reference_table.user_id) GROUP BY users_table.user_id) AS events_all LEFT JOIN events_table ON (events_all.usr_id = events_table.user_id) GROUP BY 2 ORDER BY 1 DESC, 2 DESC LIMIT 5; max | usr_id ------+-------- +--------------------------------------------------------------------- 432 | 2 391 | 4 364 | 5 @@ -716,7 +716,7 @@ ORDER BY user_id DESC LIMIT 10; user_id | lastseen ----------+--------------------------------- +--------------------------------------------------------------------- 1 | Thu Nov 23 21:54:46.924477 2017 1 | Thu Nov 23 21:54:46.924477 2017 1 | Thu Nov 23 21:54:46.924477 2017 @@ -773,7 +773,7 @@ GROUP BY ORDER BY generated_group_field DESC, value DESC; value | generated_group_field --------+----------------------- +--------------------------------------------------------------------- 2 | 5 1 | 3 3 | 2 @@ -819,7 +819,7 @@ FROM GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; value_3 | cnt ----------+----- +--------------------------------------------------------------------- 0 | 7 10 | 21 4 | 21 @@ -872,7 +872,7 @@ ORDER BY value_3 DESC LIMIT 10; user_id | value_3 ----------+--------- +--------------------------------------------------------------------- 3 | 5 3 | 5 3 | 5 @@ -921,7 +921,7 @@ count(*) AS cnt, "generated_group_field" cnt DESC, generated_group_field ASC LIMIT 10; cnt | generated_group_field ------+----------------------- +--------------------------------------------------------------------- 336 | 2 210 | 1 210 | 3 @@ -1000,7 +1000,7 @@ FROM ( ) t GROUP BY user_id, hasdone_event ORDER BY user_id; user_id | sum | length | hasdone_event ----------+-----+--------+---------------- +--------------------------------------------------------------------- 2 | 72 | 14 | Has done event 3 | 238 | 14 | Has done event | 1 | 14 | Has done event @@ -1035,7 +1035,7 @@ FROM ( ) t GROUP BY user_id, hasdone_event ORDER BY user_id; user_id | sum | length | hasdone_event ----------+-----+--------+---------------- +--------------------------------------------------------------------- 1 | 55 | 14 | Has done event 2 | 88 | 14 | Has done event 3 | 83 | 14 | Has done event @@ -1081,7 +1081,7 @@ count(*) AS cnt, "generated_group_field" cnt DESC, generated_group_field ASC LIMIT 10; cnt | generated_group_field ------+----------------------- +--------------------------------------------------------------------- 737 | 5 679 | 1 591 | 2 @@ -1185,7 +1185,7 @@ ORDER BY types LIMIT 5; types -------- +--------------------------------------------------------------------- 0 0 0 @@ -1265,7 +1265,7 @@ GROUP BY ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 217 2 | 191 3 | 31 @@ -1325,7 +1325,7 @@ INNER JOIN GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 191 1 | 191 2 | 31 @@ -1350,7 +1350,7 @@ WHERE subquery_1.user_id != subquery_2.user_id ; DEBUG: generating subplan 84_1 for subquery SELECT user_buy_test_table.user_id, random() AS random FROM (public.user_buy_test_table LEFT JOIN public.users_ref_test_table ON ((user_buy_test_table.user_id OPERATOR(pg_catalog.>) users_ref_test_table.id))) DEBUG: Plan 84 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT user_buy_test_table.user_id, random() AS random FROM (public.user_buy_test_table LEFT JOIN public.users_ref_test_table ON ((user_buy_test_table.item_id OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('84_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) subquery_2 WHERE (subquery_1.user_id OPERATOR(pg_catalog.<>) subquery_2.user_id) count -------- +--------------------------------------------------------------------- 67 (1 row) @@ -1427,7 +1427,7 @@ SELECT foo.user_id FROM WHERE event_type > 100 ) as foo; user_id ---------- +--------------------------------------------------------------------- (0 rows) -- not pushdownable since group by is on the reference table column @@ -1439,7 +1439,7 @@ SELECT foo.user_id FROM ) as foo ORDER BY 1 DESC; user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -1457,7 +1457,7 @@ SELECT foo.user_id FROM ) as foo ORDER BY 1 LIMIT 3; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -1472,7 +1472,7 @@ SELECT foo.user_id FROM ORDER BY 1 DESC LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 6 6 6 @@ -1487,7 +1487,7 @@ SELECT foo.user_id FROM SELECT DISTINCT ON(r.user_id) r.user_id, random() FROM users_table m JOIN events_reference_table r ON int4eq(m.user_id, r.user_id) ) as foo; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -1503,7 +1503,7 @@ SELECT foo.user_id FROM ) as foo ORDER BY 1 LIMIT 3; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -1520,7 +1520,7 @@ ORDER BY time DESC LIMIT 5 OFFSET 0; distinct_users | event_type | time -----------------+------------+--------------------------------- +--------------------------------------------------------------------- 1 | 6 | Thu Nov 23 21:54:46.924477 2017 4 | 1 | Thu Nov 23 18:10:21.338399 2017 3 | 2 | Thu Nov 23 18:08:26.550729 2017 @@ -1540,7 +1540,7 @@ ORDER BY time DESC LIMIT 5 OFFSET 0; distinct_users | event_type | time -----------------+------------+--------------------------------- +--------------------------------------------------------------------- 1 | 6 | Thu Nov 23 21:54:46.924477 2017 4 | 1 | Thu Nov 23 18:10:21.338399 2017 3 | 2 | Thu Nov 23 18:08:26.550729 2017 @@ -1558,7 +1558,7 @@ ORDER BY time DESC LIMIT 5 OFFSET 0; distinct_users | event_type | time -----------------+------------+--------------------------------- +--------------------------------------------------------------------- 1 | 6 | Thu Nov 23 21:54:46.924477 2017 4 | 1 | Thu Nov 23 18:10:21.338399 2017 3 | 2 | Thu Nov 23 18:08:26.550729 2017 @@ -1574,7 +1574,7 @@ SELECT * FROM SELECT DISTINCT users_reference_table.user_id FROM users_reference_table, events_table WHERE users_reference_table.user_id = events_table.value_4 ) as foo; user_id ---------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM @@ -1583,7 +1583,7 @@ SELECT * FROM GROUP BY 1 ) as foo; user_id ---------- +--------------------------------------------------------------------- (0 rows) -- similiar to the above examples, this time there is a subquery @@ -1594,7 +1594,7 @@ SELECT * FROM ) as foo ORDER BY 1; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -1611,7 +1611,7 @@ SELECT * FROM ORDER BY 1 DESC LIMIT 4; user_id | user_id ----------+--------- +--------------------------------------------------------------------- 6 | 6 5 | 5 4 | 4 @@ -1634,7 +1634,7 @@ SELECT * FROM ORDER BY 1 DESC LIMIT 4; user_id | value_4 ----------+--------- +--------------------------------------------------------------------- 6 | 5 | 4 | @@ -1645,7 +1645,7 @@ LIMIT 4; BEGIN; SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,200) s'); broadcast_intermediate_result -------------------------------- +--------------------------------------------------------------------- 200 (1 row) @@ -1664,7 +1664,7 @@ GROUP BY res.val_square) squares ORDER BY 1 LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -1681,7 +1681,7 @@ JOIN ORDER BY 1 LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -1705,7 +1705,7 @@ squares ORDER BY 1 LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -1730,7 +1730,7 @@ GROUP BY res2.val_square) squares ORDER BY 1 LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 4 (2 rows) @@ -1754,7 +1754,7 @@ FROM ORDER BY 1 LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -1779,7 +1779,7 @@ JOIN ORDER BY 1 LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 4 (2 rows) @@ -1799,7 +1799,7 @@ FROM ORDER BY 1 LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 5 6 (2 rows) @@ -1823,7 +1823,7 @@ JOIN ORDER BY 1 LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 6 (1 row) diff --git a/src/test/regress/expected/multi_subquery_in_where_clause.out b/src/test/regress/expected/multi_subquery_in_where_clause.out index f72b7adf0..413538b36 100644 --- a/src/test/regress/expected/multi_subquery_in_where_clause.out +++ b/src/test/regress/expected/multi_subquery_in_where_clause.out @@ -25,7 +25,7 @@ HAVING count(*) > 2 ORDER BY user_id LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 5 6 @@ -53,7 +53,7 @@ HAVING count(*) > 1 ORDER BY user_id LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 5 6 @@ -95,7 +95,7 @@ ORDER BY 1 DESC LIMIT 3; user_id ---------- +--------------------------------------------------------------------- 4 3 2 @@ -118,7 +118,7 @@ WHERE ) ORDER BY 1; user_id ---------- +--------------------------------------------------------------------- 2 2 2 @@ -157,7 +157,7 @@ WHERE HAVING count(*) > 2 ORDER BY 1; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -174,7 +174,7 @@ FROM WHERE user_id =ANY(SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 <= 2) GROUP BY 1 ORDER BY 2 DESC LIMIT 5; user_id | count ----------+------- +--------------------------------------------------------------------- 5 | 26 4 | 23 2 | 18 @@ -201,7 +201,7 @@ GROUP BY ORDER BY user_id; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -274,7 +274,7 @@ SELECT user_id, value_2 FROM users_table WHERE ) ORDER BY 1, 2; user_id | value_2 ----------+--------- +--------------------------------------------------------------------- 2 | 2 2 | 2 2 | 4 @@ -364,7 +364,7 @@ GROUP BY user_id HAVING count(*) > 1 AND sum(value_2) > 29 ORDER BY 1; user_id ---------- +--------------------------------------------------------------------- 2 3 (2 rows) @@ -397,7 +397,7 @@ FROM ( ) q ORDER BY 2 DESC, 1; user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 5 | 364 (1 row) @@ -573,7 +573,7 @@ WHERE ORDER BY 1 ASC LIMIT 2; user_id ---------- +--------------------------------------------------------------------- 1 1 (2 rows) @@ -590,7 +590,7 @@ WHERE ORDER BY 1 ASC LIMIT 2; user_id ---------- +--------------------------------------------------------------------- (0 rows) -- OFFSET is not supported in the subquey @@ -641,7 +641,7 @@ WHERE user_id WHERE f_inner.user_id = f_outer.user_id ) ORDER BY 1 LIMIT 3; user_id ---------- +--------------------------------------------------------------------- 1 (1 row) @@ -656,7 +656,7 @@ DEBUG: generating subplan 26_1 for subquery SELECT user_id FROM public.users_ta DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table_1.user_id FROM public.users_table users_table_1 WHERE ((users_table_1.value_1 OPERATOR(pg_catalog.>=) 1) AND (users_table_1.value_1 OPERATOR(pg_catalog.<=) 2)))) AND (user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table_1.user_id FROM public.users_table users_table_1 WHERE ((users_table_1.value_1 OPERATOR(pg_catalog.>=) 3) AND (users_table_1.value_1 OPERATOR(pg_catalog.<=) 4)))) AND (value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 user_id ---------- +--------------------------------------------------------------------- 6 6 6 diff --git a/src/test/regress/expected/multi_subquery_in_where_reference_clause.out b/src/test/regress/expected/multi_subquery_in_where_reference_clause.out index 377d651dc..d246aaff0 100644 --- a/src/test/regress/expected/multi_subquery_in_where_reference_clause.out +++ b/src/test/regress/expected/multi_subquery_in_where_reference_clause.out @@ -18,7 +18,7 @@ GROUP BY user_id ORDER BY user_id LIMIT 3; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -43,7 +43,7 @@ GROUP BY user_id ORDER BY user_id LIMIT 3; user_id ---------- +--------------------------------------------------------------------- (0 rows) -- subqueries in WHERE with NOT EXISTS operator, should not work since @@ -120,7 +120,7 @@ WHERE ORDER BY user_id LIMIT 3; user_id ---------- +--------------------------------------------------------------------- 2 3 4 @@ -143,7 +143,7 @@ WHERE ORDER BY user_id LIMIT 3; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -185,7 +185,7 @@ GROUP BY users_table.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; user_id | count ----------+------- +--------------------------------------------------------------------- 5 | 26 4 | 23 3 | 17 @@ -209,7 +209,7 @@ GROUP BY users_table.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; user_id | count ----------+------- +--------------------------------------------------------------------- 5 | 26 4 | 23 2 | 18 @@ -233,7 +233,7 @@ GROUP BY users_table.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; user_id | count ----------+------- +--------------------------------------------------------------------- 6 | 10 (1 row) @@ -299,7 +299,7 @@ SELECT user_id, value_2 FROM users_table WHERE ) ORDER BY 1, 2; user_id | value_2 ----------+--------- +--------------------------------------------------------------------- 5 | 5 5 | 5 (2 rows) @@ -376,7 +376,7 @@ WHERE value_3 =ANY(SELECT value_2 FROM users_reference_table WHERE value_1 >= 1 AND value_1 <= 2) GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | count ----------+------- +--------------------------------------------------------------------- 5 | 26 4 | 23 2 | 18 @@ -404,7 +404,7 @@ GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | count ----------+------- +--------------------------------------------------------------------- 2 | 7 5 | 6 4 | 5 @@ -476,7 +476,7 @@ SELECT user_id, value_2 FROM users_table WHERE ) ORDER BY 1, 2; user_id | value_2 ----------+--------- +--------------------------------------------------------------------- 5 | 5 5 | 5 (2 rows) @@ -494,7 +494,7 @@ WHERE user_id DEBUG: generating subplan 18_1 for subquery SELECT users_table.value_2 FROM (public.users_table JOIN public.users_reference_table u2 ON ((users_table.value_2 OPERATOR(pg_catalog.=) u2.value_2))) DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_reference_table WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -510,7 +510,7 @@ FROM DEBUG: generating subplan 20_1 for subquery SELECT users_table.value_2 FROM (public.users_table JOIN public.users_reference_table u2 ON ((users_table.value_2 OPERATOR(pg_catalog.=) u2.value_2))) DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_reference_table.user_id, random() AS random FROM public.users_reference_table) vals WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -544,7 +544,7 @@ ORDER BY 1,2,3 LIMIT 5; DEBUG: push down of limit count: 5 user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | diff --git a/src/test/regress/expected/multi_subquery_misc.out b/src/test/regress/expected/multi_subquery_misc.out index bae9595dd..bbaa8f047 100644 --- a/src/test/regress/expected/multi_subquery_misc.out +++ b/src/test/regress/expected/multi_subquery_misc.out @@ -33,7 +33,7 @@ FROM ( ORDER BY user_lastseen DESC, user_id; EXECUTE prepared_subquery_1; user_id | user_lastseen | array_length ----------+---------------------------------+-------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) @@ -67,49 +67,49 @@ ORDER BY user_lastseen DESC, user_id; -- should be fine with more than five executions EXECUTE prepared_subquery_2(1, 3); user_id | user_lastseen | array_length ----------+---------------------------------+-------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); user_id | user_lastseen | array_length ----------+---------------------------------+-------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); user_id | user_lastseen | array_length ----------+---------------------------------+-------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); user_id | user_lastseen | array_length ----------+---------------------------------+-------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); user_id | user_lastseen | array_length ----------+---------------------------------+-------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); user_id | user_lastseen | array_length ----------+---------------------------------+-------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); user_id | user_lastseen | array_length ----------+---------------------------------+-------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) @@ -129,7 +129,7 @@ ORDER BY -- enough times (6+) to actually use prepared statements EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -139,7 +139,7 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -149,7 +149,7 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -159,7 +159,7 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -169,7 +169,7 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -179,7 +179,7 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -212,44 +212,44 @@ $$ LANGUAGE plpgsql; -- enough times (6+) to actually use prepared statements SELECT plpgsql_subquery_test(1, 2); plpgsql_subquery_test ------------------------ +--------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); plpgsql_subquery_test ------------------------ +--------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); plpgsql_subquery_test ------------------------ +--------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); plpgsql_subquery_test ------------------------ +--------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); plpgsql_subquery_test ------------------------ +--------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); plpgsql_subquery_test ------------------------ +--------------------------------------------------------------------- 539 (1 row) -- this should also work, but should return 0 given that int = NULL is always returns false SELECT plpgsql_subquery_test(1, NULL); plpgsql_subquery_test ------------------------ +--------------------------------------------------------------------- 0 (1 row) @@ -303,7 +303,7 @@ INNER JOIN ( GROUP BY 1 ORDER BY 2 DESC; user_id | count ----------+------- +--------------------------------------------------------------------- 5 | 676 4 | 529 2 | 324 @@ -340,7 +340,7 @@ INNER JOIN ( GROUP BY 1 ORDER BY 2 DESC; user_id | count ----------+------- +--------------------------------------------------------------------- 5 | 676 4 | 529 2 | 324 @@ -375,7 +375,7 @@ INNER JOIN ( ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC, 5 DESC, 6 DESC, 7 DESC, 8 DESC LIMIT 5; user_id | time | value_1 | value_2 | value_3 | value_4 | user_id | user_id ----------+---------------------------------+---------+---------+---------+---------+---------+--------- +--------------------------------------------------------------------- 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | | 6 | 6 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | | 6 | 6 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | | 6 | 6 diff --git a/src/test/regress/expected/multi_subquery_union.out b/src/test/regress/expected/multi_subquery_union.out index 78f69cd2b..73a7d44b0 100644 --- a/src/test/regress/expected/multi_subquery_union.out +++ b/src/test/regress/expected/multi_subquery_union.out @@ -14,7 +14,7 @@ FROM ( ORDER BY 2 DESC,1 LIMIT 5; user_id | counter ----------+--------- +--------------------------------------------------------------------- 2 | 5 3 | 5 4 | 5 @@ -32,7 +32,7 @@ FROM ( ORDER BY 2 DESC,1 LIMIT 5; user_id | max ----------+----- +--------------------------------------------------------------------- 5 | 5 1 | 4 (2 rows) @@ -47,7 +47,7 @@ FROM ( ORDER BY 2 DESC,1 LIMIT 5; user_id | counter ----------+--------- +--------------------------------------------------------------------- 2 | 5 3 | 5 4 | 5 @@ -65,7 +65,7 @@ FROM ( ORDER BY 2 DESC,1 LIMIT 5; user_id | counter ----------+--------- +--------------------------------------------------------------------- 2 | 5 2 | 5 3 | 5 @@ -83,7 +83,7 @@ FROM ( ORDER BY 2 DESC,1 LIMIT 5; user_id | counter ----------+--------- +--------------------------------------------------------------------- 2 | 5 2 | 5 3 | 5 @@ -102,7 +102,7 @@ GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 2 | 15 3 | 15 4 | 15 @@ -121,7 +121,7 @@ GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 2 | 32 3 | 32 4 | 23 @@ -140,7 +140,7 @@ GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 2 | 15 3 | 15 4 | 15 @@ -160,7 +160,7 @@ GROUP BY --HAVING sum(counter) > 900 ORDER BY 1,2 DESC LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 7 2 | 15 3 | 15 @@ -181,7 +181,7 @@ GROUP BY --HAVING sum(counter) > 900 ORDER BY 1,2 DESC LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 7 2 | 15 3 | 15 @@ -204,7 +204,7 @@ FROM ( ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; sum ------ +--------------------------------------------------------------------- 141 94 87 @@ -226,7 +226,7 @@ FROM ( ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; sum ------ +--------------------------------------------------------------------- 135 87 85 @@ -248,7 +248,7 @@ FROM ( ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; sum ------ +--------------------------------------------------------------------- 135 87 85 @@ -298,7 +298,7 @@ FROM ( ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 2 | 107 3 | 101 5 | 94 @@ -349,7 +349,7 @@ FROM ( ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 2 | 107 3 | 101 5 | 94 @@ -406,7 +406,7 @@ FROM GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 43 1 | 42 2 | 28 @@ -457,7 +457,7 @@ FROM GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 43 1 | 42 2 | 28 @@ -502,7 +502,7 @@ FROM GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 43 1 | 42 2 | 28 @@ -547,7 +547,7 @@ FROM ORDER BY 1 LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 1 1 @@ -593,7 +593,7 @@ FROM GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 43 1 | 42 2 | 28 @@ -610,7 +610,7 @@ FROM (SELECT user_id FROM events_table) ) b; count -------- +--------------------------------------------------------------------- 202 (1 row) @@ -624,7 +624,7 @@ FROM (SELECT user_id FROM events_reference_table) ) b; count -------- +--------------------------------------------------------------------- 202 (1 row) @@ -640,7 +640,7 @@ FROM ORDER BY 1 DESC LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 6 6 6 @@ -660,7 +660,7 @@ FROM ORDER BY 1 DESC, 2 DESC LIMIT 5; user_id | value_3 ----------+--------- +--------------------------------------------------------------------- 6 | 5 6 | 5 6 | 5 @@ -680,7 +680,7 @@ FROM ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | value_3_sum ----------+------------- +--------------------------------------------------------------------- 4 | 65 4 | 65 5 | 64 @@ -701,7 +701,7 @@ GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 2 | 119 4 | 111 3 | 100 @@ -729,7 +729,7 @@ FROM ORDER BY 1 DESC, 2 DESC LIMIT 5; user_id | value_3 ----------+--------- +--------------------------------------------------------------------- 6 | 5 6 | 5 6 | 3 @@ -758,7 +758,7 @@ GROUP BY user_id ORDER BY 1 DESC LIMIT 5; max ------ +--------------------------------------------------------------------- 5 5 5 @@ -777,7 +777,7 @@ FROM ( GROUP BY user_id ORDER BY 1,2; user_id | sum ----------+----- +--------------------------------------------------------------------- 0 | 31 1 | 76 2 | 99 @@ -802,7 +802,7 @@ FROM ( ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; sum ------ +--------------------------------------------------------------------- 80 76 55 @@ -834,7 +834,7 @@ UNION ) as ftop ORDER BY 1,2; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 20 1 | 62 2 | 50 @@ -892,7 +892,7 @@ UNION ORDER BY 2, 1 LIMIT 10; user_id | sum ----------+----- +--------------------------------------------------------------------- 6 | 43 1 | 62 4 | 91 @@ -924,7 +924,7 @@ UNION ORDER BY 2, 1 LIMIT 10; sum | user_id -------+--------- +--------------------------------------------------------------------- 300 | 1 1200 | 2 1155 | 3 @@ -949,7 +949,7 @@ UNION ORDER BY 2, 1 LIMIT 10; value_2 | user_id ----------+--------- +--------------------------------------------------------------------- 0 | 1 2 | 1 3 | 1 @@ -981,7 +981,7 @@ UNION ALL ORDER BY 2, 1 LIMIT 10; sum | user_id -------+--------- +--------------------------------------------------------------------- 300 | 1 300 | 1 1200 | 2 @@ -1004,7 +1004,7 @@ FROM ( GROUP BY user_id ORDER BY 1,2; user_id | sum ----------+----- +--------------------------------------------------------------------- 3 | 101 4 | 91 5 | 94 @@ -1053,7 +1053,7 @@ FROM ( user_id)) AS ftop ORDER BY 1,2; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 20 1 | 62 2 | 50 @@ -1084,7 +1084,7 @@ FROM (SELECT 2 * user_id FROM events_table) ) b; count -------- +--------------------------------------------------------------------- 202 (1 row) @@ -1108,7 +1108,7 @@ FROM ORDER BY 1 DESC, 2 DESC LIMIT 5; user_id | value_3 ----------+--------- +--------------------------------------------------------------------- 6 | 5 6 | 5 6 | 3 @@ -1126,7 +1126,7 @@ FROM (SELECT users_table.user_id FROM events_table, users_table WHERE events_table.user_id = users_table.user_id) ) b; count -------- +--------------------------------------------------------------------- 1850 (1 row) @@ -1140,7 +1140,7 @@ FROM (SELECT 1) ) b; count -------- +--------------------------------------------------------------------- 102 (1 row) @@ -1154,7 +1154,7 @@ FROM (SELECT (random() * 100)::int) ) b; count -------- +--------------------------------------------------------------------- 102 (1 row) @@ -1178,7 +1178,7 @@ FROM ORDER BY 1 DESC, 2 DESC LIMIT 5; user_id | value_3 ----------+--------- +--------------------------------------------------------------------- 6 | 5 6 | 5 6 | 3 @@ -1230,7 +1230,7 @@ FROM GROUP BY types ORDER BY types; types | sumofeventtype --------+---------------- +--------------------------------------------------------------------- 0 | 43 1 | 42 2 | 28 diff --git a/src/test/regress/expected/multi_subquery_window_functions.out b/src/test/regress/expected/multi_subquery_window_functions.out index 92632c87d..100c92133 100644 --- a/src/test/regress/expected/multi_subquery_window_functions.out +++ b/src/test/regress/expected/multi_subquery_window_functions.out @@ -24,7 +24,7 @@ ORDER BY LIMIT 10; user_id | time | rnk ----------+---------------------------------+----- +--------------------------------------------------------------------- 2 | Wed Nov 22 20:16:16.614779 2017 | 24 2 | Wed Nov 22 22:06:12.107108 2017 | 23 2 | Wed Nov 22 22:23:25.40611 2017 | 22 @@ -52,7 +52,7 @@ ORDER BY LIMIT 10; user_id | time | rnk ----------+---------------------------------+----- +--------------------------------------------------------------------- 2 | Wed Nov 22 20:16:16.614779 2017 | 24 2 | Wed Nov 22 22:06:12.107108 2017 | 23 2 | Wed Nov 22 22:23:25.40611 2017 | 22 @@ -80,7 +80,7 @@ ORDER BY LIMIT 10; user_id | time | lag_event_type | row_no ----------+---------------------------------+----------------+-------- +--------------------------------------------------------------------- 2 | Wed Nov 22 20:16:16.614779 2017 | 0 | 24 2 | Wed Nov 22 22:06:12.107108 2017 | 3 | 23 2 | Wed Nov 22 22:23:25.40611 2017 | 4 | 22 @@ -111,7 +111,7 @@ ORDER BY LIMIT 10; user_id | rnk | avg_val_2 ----------+-----+-------------------- +--------------------------------------------------------------------- 6 | 2 | 2.0000000000000000 5 | 2 | 2.0909090909090909 4 | 2 | 2.4000000000000000 @@ -141,7 +141,7 @@ ORDER BY LIMIT 10; min | min | lag_event_type | count ------+---------------------------------+----------------+------- +--------------------------------------------------------------------- 1 | Thu Nov 23 11:09:38.074595 2017 | 6 | 1 2 | Wed Nov 22 19:00:10.396739 2017 | 5 | 7 1 | Wed Nov 22 18:49:42.327403 2017 | 4 | 21 @@ -167,7 +167,7 @@ SELECT * FROM ORDER BY 3 DESC, 1 DESC, 2 DESC NULLS LAST LIMIT 10; user_id | lag | rank ----------+-----+------ +--------------------------------------------------------------------- 2 | 2 | 109 5 | 5 | 105 3 | 3 | 103 @@ -196,7 +196,7 @@ SELECT * FROM ORDER BY 3 DESC, 1 DESC, 2 DESC NULLS LAST LIMIT 10; user_id | lag | rank ----------+-----+------ +--------------------------------------------------------------------- 2 | 2 | 73 4 | 4 | 70 3 | 3 | 69 @@ -240,7 +240,7 @@ JOIN ORDER BY 3 DESC, 4 DESC, 1 DESC, 2 DESC NULLS LAST LIMIT 10; user_id | max | max | max ----------+-----+-----+----- +--------------------------------------------------------------------- 2 | 2 | 73 | 73 4 | 4 | 70 | 70 3 | 3 | 69 | 69 @@ -271,7 +271,7 @@ ORDER BY LIMIT 10; avg | max | my_rank ---------------------+--------------------------+--------- +--------------------------------------------------------------------- 3.5000000000000000 | Wed Nov 22 00:00:00 2017 | 2 (1 row) @@ -297,7 +297,7 @@ ORDER BY LIMIT 10; avg | max | my_rank ---------------------+--------------------------+--------- +--------------------------------------------------------------------- 3.7500000000000000 | Wed Nov 22 00:00:00 2017 | 2 3.3750000000000000 | Thu Nov 23 00:00:00 2017 | 1 (2 rows) @@ -323,7 +323,7 @@ ORDER BY LIMIT 10; avg | my_rank ---------------------+--------- +--------------------------------------------------------------------- 3.5000000000000000 | 1 (1 row) @@ -347,7 +347,7 @@ ORDER BY LIMIT 10; user_id | time | sum ----------+--------------------------+----- +--------------------------------------------------------------------- 1 | Wed Nov 22 00:00:00 2017 | 1 1 | Thu Nov 23 00:00:00 2017 | 7 1 | Thu Nov 23 00:00:00 2017 | 6 @@ -375,7 +375,7 @@ ORDER BY LIMIT 20; user_id | it_name | count ----------+---------+------- +--------------------------------------------------------------------- 2 | User_1 | 2 3 | User_1 | 6 4 | User_1 | 2 @@ -396,7 +396,7 @@ ORDER BY LIMIT 10; user_id | sum ----------+----- +--------------------------------------------------------------------- 3 | 44 5 | 43 4 | 41 @@ -418,7 +418,7 @@ GROUP BY user_id ORDER BY LIMIT 10; user_id | max ----------+----- +--------------------------------------------------------------------- 3 | 15 4 | 13 2 | 10 @@ -441,7 +441,7 @@ ORDER BY LIMIT 10; user_id | rank ----------+------ +--------------------------------------------------------------------- 5 | 6 2 | 5 4 | 5 @@ -463,7 +463,7 @@ ORDER BY LIMIT 10; user_id | rank ----------+------ +--------------------------------------------------------------------- 5 | 6 2 | 5 4 | 5 @@ -497,7 +497,7 @@ GROUP BY user_id ORDER BY 1 DESC LIMIT 5; max ------- +--------------------------------------------------------------------- 5 3.5 3.25 @@ -539,7 +539,7 @@ FROM ( ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 2 | 107 3 | 101 5 | 94 @@ -569,7 +569,7 @@ ORDER BY LIMIT 3; user_id ---------- +--------------------------------------------------------------------- 4 3 2 @@ -593,7 +593,7 @@ ORDER BY difference DESC, rank DESC, user_id LIMIT 20; user_id | rank | difference | distinct_users ----------+------+------------+---------------- +--------------------------------------------------------------------- 4 | 12 | 306 | 9 5 | 12 | 136 | 8 3 | 1 | 84 | 6 @@ -642,7 +642,7 @@ ORDER BY abs DESC, user_id LIMIT 10; user_id | abs ----------+----- +--------------------------------------------------------------------- 6 | 2 1 | 1 2 | 0 @@ -667,7 +667,7 @@ ORDER BY LIMIT 5; user_id | count ----------+------- +--------------------------------------------------------------------- 6 | 1 5 | 1 4 | 1 @@ -710,7 +710,7 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE) ORDER BY 2 DESC, 1 DESC LIMIT 5; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit Output: remote_scan.user_id, remote_scan.sum -> Sort diff --git a/src/test/regress/expected/multi_subtransactions.out b/src/test/regress/expected/multi_subtransactions.out index d71d54f70..efdc43f0e 100644 --- a/src/test/regress/expected/multi_subtransactions.out +++ b/src/test/regress/expected/multi_subtransactions.out @@ -4,7 +4,7 @@ CREATE TABLE artists ( ); SELECT create_distributed_table('artists', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -22,7 +22,7 @@ RELEASE SAVEPOINT s1; COMMIT; SELECT * FROM artists WHERE id=5; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- ROLLBACK TO SAVEPOINT @@ -34,7 +34,7 @@ ROLLBACK TO SAVEPOINT s1; COMMIT; SELECT * FROM artists WHERE id=5; id | name -----+----------- +--------------------------------------------------------------------- 5 | Asher Lev (1 row) @@ -49,7 +49,7 @@ RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=5; id | name -----+------------ +--------------------------------------------------------------------- 5 | Jacob Kahn (1 row) @@ -64,7 +64,7 @@ ROLLBACK TO SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=5; id | name -----+------------ +--------------------------------------------------------------------- 5 | Jacob Kahn (1 row) @@ -82,7 +82,7 @@ INSERT INTO artists VALUES (6, 'Emily Carr'); COMMIT; SELECT * FROM artists WHERE id=6; id | name -----+------------ +--------------------------------------------------------------------- 6 | Emily Carr (1 row) @@ -98,7 +98,7 @@ RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=7; id | name -----+------ +--------------------------------------------------------------------- (0 rows) -- Recover from errors @@ -132,7 +132,7 @@ INSERT INTO artists VALUES (9, 'Mohsen Namjoo'); COMMIT; SELECT * FROM artists WHERE id IN (7, 8, 9) ORDER BY id; id | name -----+--------------- +--------------------------------------------------------------------- 8 | Sogand 9 | Mohsen Namjoo (2 rows) @@ -149,7 +149,7 @@ INSERT INTO artists VALUES (10, 'Mahmoud Farshchian'); COMMIT; SELECT * FROM artists WHERE id IN (9, 10) ORDER BY id; id | name -----+-------------------- +--------------------------------------------------------------------- 10 | Mahmoud Farshchian (1 row) @@ -165,7 +165,7 @@ INSERT INTO artists VALUES (11, 'Egon Schiele'); COMMIT; SELECT * FROM artists WHERE id IN (10, 11) ORDER BY id; id | name -----+-------------- +--------------------------------------------------------------------- 11 | Egon Schiele (1 row) @@ -181,7 +181,7 @@ INSERT INTO artists VALUES (12, 'Marc Chagall'); COMMIT; SELECT * FROM artists WHERE id IN (11, 12) ORDER BY id; id | name -----+-------------- +--------------------------------------------------------------------- 12 | Marc Chagall (1 row) @@ -192,7 +192,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1190000; select create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a'); create_distributed_table | create_distributed_table ---------------------------+-------------------------- +--------------------------------------------------------------------- | (1 row) @@ -237,7 +237,7 @@ with r AS ( commit; select * from t2 order by a, b; a | b ----+--- +--------------------------------------------------------------------- 1 | 4 2 | 5 3 | 6 @@ -245,7 +245,7 @@ select * from t2 order by a, b; select * from t1 order by a, b; a | b ----+--- +--------------------------------------------------------------------- 1 | 3 1 | 4 2 | 4 @@ -266,7 +266,7 @@ CREATE TABLE researchers ( SET citus.shard_count TO 2; SELECT create_distributed_table('researchers', 'lab_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -280,7 +280,7 @@ RELEASE SAVEPOINT s1; COMMIT; SELECT * FROM researchers WHERE id in (7, 8); id | lab_id | name -----+--------+----------- +--------------------------------------------------------------------- 7 | 4 | Jan Plaza (1 row) @@ -295,7 +295,7 @@ INSERT INTO researchers VALUES (12, 10, 'Stephen Kleene'); COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name -----+--------+---------------- +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -311,7 +311,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio ROLLBACK; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name -----+--------+---------------- +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -327,7 +327,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name -----+--------+---------------- +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -345,7 +345,7 @@ NOTICE: caught not_null_violation COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name -----+--------+---------------- +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -362,7 +362,7 @@ NOTICE: caught manual plpgsql_error COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name -----+--------+---------------- +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -379,7 +379,7 @@ ERROR: not_null_violation COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name -----+--------+---------------- +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -396,7 +396,7 @@ END $$; COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name -----+--------+------------------ +--------------------------------------------------------------------- 12 | 10 | Stephen Kleene 32 | 10 | Raymond Smullyan (2 rows) diff --git a/src/test/regress/expected/multi_table_ddl.out b/src/test/regress/expected/multi_table_ddl.out index 7fe9af2e9..ec7f9487a 100644 --- a/src/test/regress/expected/multi_table_ddl.out +++ b/src/test/regress/expected/multi_table_ddl.out @@ -6,7 +6,7 @@ SET citus.next_shard_id TO 870000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -31,7 +31,7 @@ COMMIT; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -43,13 +43,13 @@ CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT 1 FROM master_create_empty_shard('testtableddl'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -59,17 +59,17 @@ RESET citus.shard_replication_factor; -- ensure no metadata of distributed tables are remaining SELECT * FROM pg_dist_partition; logicalrelid | partmethod | partkey | colocationid | repmodel ---------------+------------+---------+--------------+---------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------+---------+--------------+---------------+--------------- +--------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard_placement; shardid | shardstate | shardlength | nodename | nodeport | placementid ----------+------------+-------------+----------+----------+------------- +--------------------------------------------------------------------- (0 rows) -- check that the extension now can be dropped (and recreated) @@ -78,13 +78,13 @@ CREATE EXTENSION citus; -- re-add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -94,7 +94,7 @@ SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('testserialtable', 'group_id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -123,6 +123,6 @@ DROP TABLE testserialtable; \ds List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_task_assignment_policy.out b/src/test/regress/expected/multi_task_assignment_policy.out index 273814f0f..7c6f27218 100644 --- a/src/test/regress/expected/multi_task_assignment_policy.out +++ b/src/test/regress/expected/multi_task_assignment_policy.out @@ -30,7 +30,7 @@ SET citus.explain_distributed_queries TO off; CREATE TABLE task_assignment_test_table (test_id integer); SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -74,7 +74,7 @@ DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -86,7 +86,7 @@ DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -100,7 +100,7 @@ DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -112,7 +112,7 @@ DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx QUERY PLAN ----------------------------------------------------------------------- +--------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -122,7 +122,7 @@ COMMIT; CREATE TABLE task_assignment_reference_table (test_id integer); SELECT create_reference_table('task_assignment_reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -136,7 +136,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable QUERY PLAN --------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (2 rows) @@ -146,7 +146,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable QUERY PLAN --------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (2 rows) @@ -157,7 +157,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable QUERY PLAN --------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (2 rows) @@ -167,7 +167,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable QUERY PLAN --------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (2 rows) @@ -192,7 +192,7 @@ INSERT INTO explain_outputs -- given that we're in the same transaction, the count should be 1 SELECT count(DISTINCT value) FROM explain_outputs; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -211,7 +211,7 @@ INSERT INTO explain_outputs -- since there are two different worker nodes SELECT count(DISTINCT value) FROM explain_outputs; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -224,7 +224,7 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE task_assignment_replicated_hash (test_id integer); SELECT create_distributed_table('task_assignment_replicated_hash', 'test_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -238,7 +238,7 @@ INSERT INTO explain_outputs -- given that we're in the same transaction, the count should be 1 SELECT count(DISTINCT value) FROM explain_outputs; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -257,7 +257,7 @@ INSERT INTO explain_outputs -- since there are two different worker nodes SELECT count(DISTINCT value) FROM explain_outputs; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -268,7 +268,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE task_assignment_nonreplicated_hash (test_id integer, ref_id integer); SELECT create_distributed_table('task_assignment_nonreplicated_hash', 'test_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -290,7 +290,7 @@ $cmd$, 'task_assignment_nonreplicated_hash'); -- The count should be 1 since the shard exists in only one worker node SELECT count(DISTINCT value) FROM explain_outputs; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -300,7 +300,7 @@ TRUNCATE explain_outputs; CREATE TABLE task_assignment_test_table_2 (test_id integer); SELECT create_distributed_table('task_assignment_test_table_2', 'test_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -319,7 +319,7 @@ $cmd$, 'task_assignment_test_table_2'); -- different workers SELECT count(DISTINCT value) FROM explain_outputs; count -------- +--------------------------------------------------------------------- 2 (1 row) diff --git a/src/test/regress/expected/multi_task_string_size.out b/src/test/regress/expected/multi_task_string_size.out index 1eec2b95b..97eea8e57 100644 --- a/src/test/regress/expected/multi_task_string_size.out +++ b/src/test/regress/expected/multi_task_string_size.out @@ -207,14 +207,14 @@ CREATE TABLE wide_table ); SELECT create_distributed_table('wide_table', 'long_column_001'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SET citus.task_executor_type TO 'task-tracker'; SHOW citus.max_task_string_size; citus.max_task_string_size ----------------------------- +--------------------------------------------------------------------- 12288 (1 row) @@ -233,7 +233,7 @@ CONTEXT: PL/pgSQL function raise_failed_execution(text) line 6 at RAISE -- following will succeed since it fetches few columns SELECT u.long_column_001, u.long_column_002, u.long_column_003 FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); long_column_001 | long_column_002 | long_column_003 ------------------+-----------------+----------------- +--------------------------------------------------------------------- (0 rows) RESET client_min_messages; diff --git a/src/test/regress/expected/multi_test_catalog_views.out b/src/test/regress/expected/multi_test_catalog_views.out index 80dab1b3b..dbf97c81f 100644 --- a/src/test/regress/expected/multi_test_catalog_views.out +++ b/src/test/regress/expected/multi_test_catalog_views.out @@ -98,7 +98,7 @@ ORDER BY a.attrelid, a.attnum; $desc_views$ ); run_command_on_master_and_workers ------------------------------------ +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index 117a467e4..8924f58fe 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -89,7 +89,7 @@ ALTER SYSTEM SET citus.metadata_sync_interval TO 3000; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_tpch_query1.out b/src/test/regress/expected/multi_tpch_query1.out index 80e086b81..1debbf26d 100644 --- a/src/test/regress/expected/multi_tpch_query1.out +++ b/src/test/regress/expected/multi_tpch_query1.out @@ -24,7 +24,7 @@ ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order ---------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- +--------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 diff --git a/src/test/regress/expected/multi_tpch_query10.out b/src/test/regress/expected/multi_tpch_query10.out index 0a71a2914..102a54036 100644 --- a/src/test/regress/expected/multi_tpch_query10.out +++ b/src/test/regress/expected/multi_tpch_query10.out @@ -35,7 +35,7 @@ ORDER BY revenue DESC LIMIT 20; c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment ------------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole diff --git a/src/test/regress/expected/multi_tpch_query12.out b/src/test/regress/expected/multi_tpch_query12.out index 031d0bf29..608901a60 100644 --- a/src/test/regress/expected/multi_tpch_query12.out +++ b/src/test/regress/expected/multi_tpch_query12.out @@ -31,7 +31,7 @@ GROUP BY ORDER BY l_shipmode; l_shipmode | high_line_count | low_line_count -------------+-----------------+---------------- +--------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) diff --git a/src/test/regress/expected/multi_tpch_query14.out b/src/test/regress/expected/multi_tpch_query14.out index 51e4aa702..f3dba6d63 100644 --- a/src/test/regress/expected/multi_tpch_query14.out +++ b/src/test/regress/expected/multi_tpch_query14.out @@ -16,7 +16,7 @@ WHERE AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; promo_revenue ---------------------- +--------------------------------------------------------------------- 32.1126387112005225 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query19.out b/src/test/regress/expected/multi_tpch_query19.out index 1a4903de9..94a26cb17 100644 --- a/src/test/regress/expected/multi_tpch_query19.out +++ b/src/test/regress/expected/multi_tpch_query19.out @@ -33,7 +33,7 @@ WHERE AND l_shipinstruct = 'DELIVER IN PERSON' ); revenue -------------- +--------------------------------------------------------------------- 144747.0857 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query3.out b/src/test/regress/expected/multi_tpch_query3.out index 775385002..d09962a32 100644 --- a/src/test/regress/expected/multi_tpch_query3.out +++ b/src/test/regress/expected/multi_tpch_query3.out @@ -25,7 +25,7 @@ ORDER BY revenue DESC, o_orderdate; l_orderkey | revenue | o_orderdate | o_shippriority -------------+-------------+-------------+---------------- +--------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 diff --git a/src/test/regress/expected/multi_tpch_query6.out b/src/test/regress/expected/multi_tpch_query6.out index c6a54ec5d..40d2b2a4b 100644 --- a/src/test/regress/expected/multi_tpch_query6.out +++ b/src/test/regress/expected/multi_tpch_query6.out @@ -12,7 +12,7 @@ WHERE and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; revenue -------------- +--------------------------------------------------------------------- 243277.7858 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query7.out b/src/test/regress/expected/multi_tpch_query7.out index 24057f946..816202e81 100644 --- a/src/test/regress/expected/multi_tpch_query7.out +++ b/src/test/regress/expected/multi_tpch_query7.out @@ -42,7 +42,7 @@ ORDER BY cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query7_nested.out b/src/test/regress/expected/multi_tpch_query7_nested.out index 86479e5a5..4184d2aa3 100644 --- a/src/test/regress/expected/multi_tpch_query7_nested.out +++ b/src/test/regress/expected/multi_tpch_query7_nested.out @@ -51,7 +51,7 @@ ORDER BY cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ----------------------------+---------------------------+--------+----------- +--------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_transaction_recovery.out b/src/test/regress/expected/multi_transaction_recovery.out index 197a464a9..8d2ed2d9f 100644 --- a/src/test/regress/expected/multi_transaction_recovery.out +++ b/src/test/regress/expected/multi_transaction_recovery.out @@ -6,7 +6,7 @@ SET citus.next_shard_id TO 1220000; SET client_min_messages TO ERROR; SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -18,14 +18,14 @@ SET citus.force_max_query_parallelization TO ON; ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) -- Ensure pg_dist_transaction is empty SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -58,25 +58,25 @@ INSERT INTO pg_dist_transaction VALUES (1, 'citus_0_should_be_forgotten'), (0, 'citus_0_should_be_forgotten'); SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 6 (1 row) SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -84,13 +84,13 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -106,13 +106,13 @@ SET citus.multi_shard_commit_protocol TO '2pc'; CREATE TABLE test_recovery (x text); SELECT create_distributed_table('test_recovery', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) >= 2 FROM pg_dist_transaction; ?column? ----------- +--------------------------------------------------------------------- t (1 row) @@ -120,19 +120,19 @@ SELECT count(*) >= 2 FROM pg_dist_transaction; CREATE TABLE test_recovery_ref (x text); SELECT create_reference_table('test_recovery_ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) >= 4 FROM pg_dist_transaction; ?column? ----------- +--------------------------------------------------------------------- t (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -140,7 +140,7 @@ SELECT recover_prepared_transactions(); INSERT INTO test_recovery VALUES ('hello'); SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -150,7 +150,7 @@ ALTER TABLE test_recovery ADD COLUMN y text; ROLLBACK; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -158,19 +158,19 @@ SELECT count(*) FROM pg_dist_transaction; ALTER TABLE test_recovery ADD COLUMN y text; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -180,7 +180,7 @@ INSERT INTO test_recovery SELECT x, 'earth' FROM test_recovery; ROLLBACK; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -188,13 +188,13 @@ SELECT count(*) FROM pg_dist_transaction; INSERT INTO test_recovery SELECT x, 'earth' FROM test_recovery; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -202,13 +202,13 @@ SELECT recover_prepared_transactions(); INSERT INTO test_recovery (x) SELECT 'hello-'||s FROM generate_series(1,100) s; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -216,13 +216,13 @@ SELECT recover_prepared_transactions(); COPY test_recovery (x) FROM STDIN CSV; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -233,7 +233,7 @@ CREATE TABLE test_recovery_single (LIKE test_recovery); -- one connection/transaction per node SELECT create_distributed_table('test_recovery_single', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -246,13 +246,13 @@ INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -265,13 +265,13 @@ INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -281,13 +281,13 @@ SET citus.force_max_query_parallelization TO OFF; BEGIN; SELECT count(*) FROM test_recovery_single WHERE x = 'hello-0'; count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test_recovery_single WHERE x = 'hello-2'; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -296,13 +296,13 @@ INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -312,13 +312,13 @@ SET citus.force_max_query_parallelization TO ON; BEGIN; SELECT count(*) FROM test_recovery_single WHERE x = 'hello-0'; count -------- +--------------------------------------------------------------------- 3 (1 row) SELECT count(*) FROM test_recovery_single WHERE x = 'hello-2'; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -327,7 +327,7 @@ INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -335,27 +335,27 @@ SELECT count(*) FROM pg_dist_transaction; ALTER SYSTEM SET citus.recover_2pc_interval TO 10; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) -- Sleep 1 second to give Valgrind enough time to clear transactions SELECT pg_sleep(1); pg_sleep ----------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_dist_transaction; count -------- +--------------------------------------------------------------------- 0 (1 row) ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) @@ -364,7 +364,7 @@ DROP TABLE test_recovery; DROP TABLE test_recovery_single; SELECT 1 FROM master_remove_node('localhost', :master_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_transactional_drop_shards.out b/src/test/regress/expected/multi_transactional_drop_shards.out index 8016d3633..6b6f8e62c 100644 --- a/src/test/regress/expected/multi_transactional_drop_shards.out +++ b/src/test/regress/expected/multi_transactional_drop_shards.out @@ -8,7 +8,7 @@ SET citus.shard_count TO 4; CREATE TABLE transactional_drop_shards(column1 int); SELECT create_distributed_table('transactional_drop_shards', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -18,7 +18,7 @@ ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1410000 1410001 1410002 @@ -34,7 +34,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1410000 | 1 | localhost | 57637 1410000 | 1 | localhost | 57638 1410001 | 1 | localhost | 57637 @@ -49,7 +49,7 @@ ORDER BY \dt transactional_drop_shards List of relations Schema | Name | Type | Owner ---------+---------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_shards | table | postgres (1 row) @@ -58,7 +58,7 @@ ORDER BY \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_shards_1410000 | table | postgres public | transactional_drop_shards_1410001 | table | postgres public | transactional_drop_shards_1410002 | table | postgres @@ -73,7 +73,7 @@ COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410000, 1410001, 1410002, 1410003) ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -85,14 +85,14 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+----------+---------- +--------------------------------------------------------------------- (0 rows) -- verify table is dropped \dt transactional_drop_shards List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) -- verify shards are dropped @@ -100,7 +100,7 @@ ORDER BY \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -108,20 +108,20 @@ ORDER BY CREATE TABLE transactional_drop_shards(column1 int); SELECT create_distributed_table('transactional_drop_shards', 'column1', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('transactional_drop_shards'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1410004 (1 row) BEGIN; SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); master_apply_delete_command ------------------------------ +--------------------------------------------------------------------- 1 (1 row) @@ -129,7 +129,7 @@ ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1410004 (1 row) @@ -142,7 +142,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1410004 | 1 | localhost | 57637 1410004 | 1 | localhost | 57638 (2 rows) @@ -152,7 +152,7 @@ ORDER BY \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_shards_1410004 | table | postgres (1 row) @@ -161,7 +161,7 @@ ORDER BY BEGIN; SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); master_apply_delete_command ------------------------------ +--------------------------------------------------------------------- 1 (1 row) @@ -169,7 +169,7 @@ COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -181,7 +181,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+----------+---------- +--------------------------------------------------------------------- (0 rows) -- verify shards are dropped @@ -189,14 +189,14 @@ ORDER BY \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port -- test DROP table in a transaction after insertion SELECT master_create_empty_shard('transactional_drop_shards'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1410005 (1 row) @@ -207,7 +207,7 @@ ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1410005 (1 row) @@ -220,7 +220,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) @@ -229,7 +229,7 @@ ORDER BY \dt transactional_drop_shards List of relations Schema | Name | Type | Owner ---------+---------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_shards | table | postgres (1 row) @@ -238,7 +238,7 @@ ORDER BY \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -248,7 +248,7 @@ BEGIN; INSERT INTO transactional_drop_shards VALUES (1); SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); master_apply_delete_command ------------------------------ +--------------------------------------------------------------------- 1 (1 row) @@ -256,7 +256,7 @@ ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1410005 (1 row) @@ -269,7 +269,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) @@ -279,7 +279,7 @@ ORDER BY \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -298,7 +298,7 @@ ERROR: illegal value -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1410005 (1 row) @@ -311,7 +311,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) @@ -320,7 +320,7 @@ ORDER BY \dt transactional_drop_shards List of relations Schema | Name | Type | Owner ---------+---------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_shards | table | postgres (1 row) @@ -329,7 +329,7 @@ ORDER BY \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -338,7 +338,7 @@ ORDER BY CREATE TABLE transactional_drop_reference(column1 int); SELECT create_reference_table('transactional_drop_reference'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -349,7 +349,7 @@ ERROR: illegal value -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_reference'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1410006 (1 row) @@ -362,7 +362,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1410006 | 1 | localhost | 57637 1410006 | 1 | localhost | 57638 (2 rows) @@ -371,7 +371,7 @@ ORDER BY \dt transactional_drop_reference List of relations Schema | Name | Type | Owner ---------+------------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_reference | table | postgres (1 row) @@ -380,7 +380,7 @@ ORDER BY \dt transactional_drop_reference* List of relations Schema | Name | Type | Owner ---------+--------------------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_reference_1410006 | table | postgres (1 row) @@ -393,7 +393,7 @@ ERROR: illegal value -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1410005 (1 row) @@ -406,7 +406,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) @@ -416,7 +416,7 @@ ORDER BY \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -427,7 +427,7 @@ SET citus.shard_count TO 8; CREATE TABLE transactional_drop_serial(column1 int, column2 SERIAL); SELECT create_distributed_table('transactional_drop_serial', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -438,7 +438,7 @@ ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_serial'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1410007 1410008 1410009 @@ -458,7 +458,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1410007 | 1 | localhost | 57637 1410007 | 1 | localhost | 57638 1410008 | 1 | localhost | 57637 @@ -481,7 +481,7 @@ ORDER BY \dt transactional_drop_serial List of relations Schema | Name | Type | Owner ---------+---------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_serial | table | postgres (1 row) @@ -490,7 +490,7 @@ ORDER BY \dt transactional_drop_serial_* List of relations Schema | Name | Type | Owner ---------+-----------------------------------+-------+---------- +--------------------------------------------------------------------- public | transactional_drop_serial_1410007 | table | postgres public | transactional_drop_serial_1410008 | table | postgres public | transactional_drop_serial_1410009 | table | postgres @@ -504,7 +504,7 @@ ORDER BY \ds transactional_drop_serial_column2_seq List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -515,7 +515,7 @@ COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410007, 1410008, 1410009, 1410010, 1410011, 1410012, 1410013, 1410014) ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -527,14 +527,14 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+----------+---------- +--------------------------------------------------------------------- (0 rows) -- verify table is dropped \dt transactional_drop_serial List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) -- verify shards and sequence are dropped @@ -542,13 +542,13 @@ ORDER BY \dt transactional_drop_serial_* List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) \ds transactional_drop_serial_column2_seq List of relations Schema | Name | Type | Owner ---------+------+------+------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -558,7 +558,7 @@ SET citus.shard_count TO 4; CREATE TABLE transactional_drop_mx(column1 int); SELECT create_distributed_table('transactional_drop_mx', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -566,7 +566,7 @@ UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='transactional_drop -- make worker 1 receive metadata changes SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -574,7 +574,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1410015 1410016 1410017 @@ -590,7 +590,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1410015 | 1 | localhost | 57637 1410016 | 1 | localhost | 57638 1410017 | 1 | localhost | 57637 @@ -605,7 +605,7 @@ ROLLBACK; \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1410015 1410016 1410017 @@ -621,7 +621,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+-----------+---------- +--------------------------------------------------------------------- 1410015 | 1 | localhost | 57637 1410016 | 1 | localhost | 57638 1410017 | 1 | localhost | 57637 @@ -637,7 +637,7 @@ COMMIT; \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410015, 1410016, 1410017, 1410018) ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- (0 rows) SELECT @@ -649,7 +649,7 @@ WHERE ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ----------+------------+----------+---------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port @@ -657,14 +657,14 @@ ORDER BY SELECT 1 FROM master_add_node('localhost', :master_port); NOTICE: Replicating reference table "transactional_drop_reference" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) CREATE TABLE citus_local (id serial, k int); SELECT create_distributed_table('citus_local', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -672,7 +672,7 @@ INSERT INTO citus_local (k) VALUES (2); DROP TABLE citus_local; SELECT master_remove_node('localhost', :master_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -680,7 +680,7 @@ SELECT master_remove_node('localhost', :master_port); DROP TABLE transactional_drop_shards, transactional_drop_reference; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -691,14 +691,14 @@ HINT: Connect to worker nodes directly to manually create all necessary users a GRANT ALL ON SCHEMA public TO try_drop_table; SELECT run_command_on_workers('CREATE USER try_drop_table WITH LOGIN'); run_command_on_workers ------------------------------------ +--------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) SELECT run_command_on_workers('GRANT ALL ON SCHEMA public TO try_drop_table'); run_command_on_workers ---------------------------- +--------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) @@ -708,7 +708,7 @@ BEGIN; CREATE TABLE temp_dist_table (x int, y int); SELECT create_distributed_table('temp_dist_table','x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_truncate.out b/src/test/regress/expected/multi_truncate.out index a13b309a7..06fd2dfbb 100644 --- a/src/test/regress/expected/multi_truncate.out +++ b/src/test/regress/expected/multi_truncate.out @@ -11,7 +11,7 @@ SET search_path TO multi_truncate; CREATE TABLE test_truncate_append(a int); SELECT create_distributed_table('test_truncate_append', 'a', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -22,34 +22,34 @@ UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT count(*) FROM test_truncate_append; count -------- +--------------------------------------------------------------------- 0 (1 row) INSERT INTO test_truncate_append values (1); SELECT count(*) FROM test_truncate_append; count -------- +--------------------------------------------------------------------- 1 (1 row) -- create some more shards SELECT master_create_empty_shard('test_truncate_append'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1210001 (1 row) SELECT master_create_empty_shard('test_truncate_append'); master_create_empty_shard ---------------------------- +--------------------------------------------------------------------- 1210002 (1 row) -- verify 3 shards are presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1210000 1210001 1210002 @@ -59,14 +59,14 @@ TRUNCATE TABLE test_truncate_append; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_append; count -------- +--------------------------------------------------------------------- 0 (1 row) -- verify no shard exists anymore SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::regclass; shardid ---------- +--------------------------------------------------------------------- (0 rows) -- command can run inside transaction @@ -79,7 +79,7 @@ DROP TABLE test_truncate_append; CREATE TABLE test_truncate_range(a int); SELECT create_distributed_table('test_truncate_range', 'a', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -96,7 +96,7 @@ UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2500 WHERE shardid = :new_shard_id; SELECT count(*) FROM test_truncate_range; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -106,14 +106,14 @@ INSERT INTO test_truncate_range values (2000); INSERT INTO test_truncate_range values (100); SELECT count(*) FROM test_truncate_range; count -------- +--------------------------------------------------------------------- 4 (1 row) -- verify 3 shards are presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1210003 1210004 1210005 @@ -123,14 +123,14 @@ TRUNCATE TABLE test_truncate_range; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_range; count -------- +--------------------------------------------------------------------- 0 (1 row) -- verify 3 shards are still present SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1210003 1210004 1210005 @@ -141,7 +141,7 @@ INSERT INTO test_truncate_range VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_range; ROLLBACK; SELECT count(*) FROM test_truncate_range; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -153,7 +153,7 @@ DROP TABLE test_truncate_range; CREATE TABLE test_truncate_hash(a int); SELECT master_create_distributed_table('test_truncate_hash', 'a', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) @@ -161,7 +161,7 @@ SELECT master_create_distributed_table('test_truncate_hash', 'a', 'hash'); TRUNCATE TABLE test_truncate_hash; SELECT count(*) FROM test_truncate_hash; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -183,20 +183,20 @@ DETAIL: No shards exist for distributed table "test_truncate_hash". HINT: Run master_create_worker_shards to create shards and try again. SELECT count(*) FROM test_truncate_hash; count -------- +--------------------------------------------------------------------- 0 (1 row) -- verify 4 shards are present SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- (0 rows) TRUNCATE TABLE test_truncate_hash; SELECT master_create_worker_shards('test_truncate_hash', 4, 1); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -206,7 +206,7 @@ INSERT INTO test_truncate_hash values (2000); INSERT INTO test_truncate_hash values (100); SELECT count(*) FROM test_truncate_hash; count -------- +--------------------------------------------------------------------- 4 (1 row) @@ -214,14 +214,14 @@ TRUNCATE TABLE test_truncate_hash; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_hash; count -------- +--------------------------------------------------------------------- 0 (1 row) -- verify 4 shards are still presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1210006 1210007 1210008 @@ -233,7 +233,7 @@ INSERT INTO test_truncate_hash VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_hash; ROLLBACK; SELECT count(*) FROM test_truncate_hash; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -243,21 +243,21 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE "a b hash" (a int, b int); SELECT create_distributed_table('"a b hash"', 'a', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) INSERT INTO "a b hash" values (1, 0); SELECT * from "a b hash"; a | b ----+--- +--------------------------------------------------------------------- 1 | 0 (1 row) TRUNCATE TABLE "a b hash"; SELECT * from "a b hash"; a | b ----+--- +--------------------------------------------------------------------- (0 rows) DROP TABLE "a b hash"; @@ -265,7 +265,7 @@ DROP TABLE "a b hash"; CREATE TABLE "a b append" (a int, b int); SELECT create_distributed_table('"a b append"', 'a', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -279,7 +279,7 @@ INSERT INTO "a b append" values (1, 1); INSERT INTO "a b append" values (600, 600); SELECT * FROM "a b append" ORDER BY a; a | b ------+----- +--------------------------------------------------------------------- 1 | 1 600 | 600 (2 rows) @@ -288,7 +288,7 @@ TRUNCATE TABLE "a b append"; -- verify all shards are dropped SELECT shardid FROM pg_dist_shard where logicalrelid = '"a b append"'::regclass; shardid ---------- +--------------------------------------------------------------------- (0 rows) DROP TABLE "a b append"; @@ -298,7 +298,7 @@ INSERT INTO test_local_truncate VALUES (1,2); SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -309,14 +309,14 @@ COMMIT; -- Ensure distributed data is not truncated SELECT * FROM test_local_truncate; x | y ----+--- +--------------------------------------------------------------------- 1 | 2 (1 row) -- Undistribute table SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_truncate'); master_drop_all_shards ------------------------- +--------------------------------------------------------------------- 4 (1 row) @@ -324,7 +324,7 @@ DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regcla -- Ensure local data is truncated SELECT * FROM test_local_truncate; x | y ----+--- +--------------------------------------------------------------------- (0 rows) DROP TABLE test_local_truncate; @@ -334,7 +334,7 @@ INSERT INTO test_local_truncate VALUES (1,2); SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -345,14 +345,14 @@ ROLLBACK; -- Ensure distributed data is not truncated SELECT * FROM test_local_truncate; x | y ----+--- +--------------------------------------------------------------------- 1 | 2 (1 row) -- Undistribute table SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_truncate'); master_drop_all_shards ------------------------- +--------------------------------------------------------------------- 4 (1 row) @@ -360,7 +360,7 @@ DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regcla -- Ensure local data is not truncated SELECT * FROM test_local_truncate; x | y ----+--- +--------------------------------------------------------------------- 1 | 2 (1 row) diff --git a/src/test/regress/expected/multi_unsupported_worker_operations.out b/src/test/regress/expected/multi_unsupported_worker_operations.out index 67ee3ad47..a224117e2 100644 --- a/src/test/regress/expected/multi_unsupported_worker_operations.out +++ b/src/test/regress/expected/multi_unsupported_worker_operations.out @@ -17,21 +17,21 @@ SET citus.shard_count TO 5; CREATE TABLE mx_table (col_1 int, col_2 text, col_3 BIGSERIAL); SELECT create_distributed_table('mx_table', 'col_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE mx_table_2 (col_1 int, col_2 text, col_3 BIGSERIAL); SELECT create_distributed_table('mx_table_2', 'col_1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE mx_ref_table (col_1 int, col_2 text); SELECT create_reference_table('mx_ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -41,14 +41,14 @@ FROM pg_dist_partition WHERE logicalrelid IN ('mx_table'::regclass, 'mx_table_2'::regclass) ORDER BY logicalrelid; logicalrelid | repmodel | colocationid ---------------+----------+-------------- +--------------------------------------------------------------------- mx_table | s | 150000 mx_table_2 | s | 150000 (2 rows) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -58,7 +58,7 @@ INSERT INTO mx_ref_table VALUES (-78, 'sapien'); INSERT INTO mx_ref_table VALUES (-34, 'augue'); SELECT * FROM mx_table ORDER BY col_1; col_1 | col_2 | col_3 --------+----------+------- +--------------------------------------------------------------------- -37 | 'lorem' | 1 80 | 'dolor' | 3 7344 | 'sit' | 4 @@ -83,7 +83,7 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_partition WHERE logicalrelid='mx_table_worker'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -97,14 +97,14 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass; count -------- +--------------------------------------------------------------------- 0 (1 row) INSERT INTO pg_dist_shard SELECT * FROM pg_dist_shard_temp; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass; count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -122,7 +122,7 @@ HINT: Connect to the coordinator and run it again. -- DDL commands SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; Column | Type | Modifiers ---------+---------+---------------------------------------------------------- +--------------------------------------------------------------------- col_1 | integer | col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) @@ -139,7 +139,7 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; Column | Type | Modifiers ---------+---------+---------------------------------------------------------- +--------------------------------------------------------------------- col_1 | integer | col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) @@ -152,7 +152,7 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass; count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -162,7 +162,7 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table; count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -172,7 +172,7 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -181,7 +181,7 @@ SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; DROP INDEX mx_test_uniq_index; SELECT 1 FROM master_add_inactive_node('localhost', 5432); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -191,14 +191,14 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; count -------- +--------------------------------------------------------------------- 1 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', 5432); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) @@ -210,7 +210,7 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT colocationid FROM pg_dist_partition WHERE logicalrelid='mx_table_2'::regclass; colocationid --------------- +--------------------------------------------------------------------- 0 (1 row) @@ -226,7 +226,7 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; hasmetadata -------------- +--------------------------------------------------------------------- f (1 row) @@ -234,7 +234,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -245,26 +245,26 @@ HINT: Connect to the coordinator and run it again. \c - - - :master_port SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; hasmetadata -------------- +--------------------------------------------------------------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; hasmetadata -------------- +--------------------------------------------------------------------- f (1 row) \c - - - :worker_2_port SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition; worker_drop_distributed_table -------------------------------- +--------------------------------------------------------------------- (2 rows) @@ -279,7 +279,7 @@ ERROR: operation is not allowed on this node \set VERBOSITY default SELECT count(*) FROM mx_table; count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -292,7 +292,7 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table; count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -313,7 +313,7 @@ FROM pg_dist_shard_placement WHERE shardid = :testshardid ORDER BY nodeport; shardid | nodename | nodeport | shardstate ----------+-----------+----------+------------ +--------------------------------------------------------------------- 1270000 | localhost | 57637 | 1 1270000 | localhost | 57638 | 3 (2 rows) @@ -332,7 +332,7 @@ DROP SEQUENCE some_sequence; BEGIN; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; Column | Type | Modifiers ---------+---------+---------------------------------------------------------- +--------------------------------------------------------------------- col_1 | integer | col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) @@ -344,7 +344,7 @@ DROP SEQUENCE mx_table_col_3_seq CASCADE; RESET client_min_messages; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; Column | Type | Modifiers ---------+---------+----------- +--------------------------------------------------------------------- col_1 | integer | col_2 | text | col_3 | bigint | not null @@ -357,7 +357,7 @@ DROP TABLE mx_table; DROP TABLE mx_table_2; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -365,7 +365,7 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); DELETE FROM pg_dist_node; SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition; worker_drop_distributed_table -------------------------------- +--------------------------------------------------------------------- (0 rows) \c - - - :master_port diff --git a/src/test/regress/expected/multi_upgrade_reference_table.out b/src/test/regress/expected/multi_upgrade_reference_table.out index ecc6340fd..3414b70c8 100644 --- a/src/test/regress/expected/multi_upgrade_reference_table.out +++ b/src/test/regress/expected/multi_upgrade_reference_table.out @@ -23,7 +23,7 @@ SET citus.shard_count TO 4; CREATE TABLE upgrade_reference_table_multiple_shard(column1 int); SELECT create_distributed_table('upgrade_reference_table_multiple_shard', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -35,7 +35,7 @@ DROP TABLE upgrade_reference_table_multiple_shard; CREATE TABLE upgrade_reference_table_no_shard(column1 int); SELECT create_distributed_table('upgrade_reference_table_no_shard', 'column1', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -49,14 +49,14 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_referenced(column1 int PRIMARY KEY); SELECT create_distributed_table('upgrade_reference_table_referenced', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE upgrade_reference_table_referencing(column1 int REFERENCES upgrade_reference_table_referenced(column1)); SELECT create_distributed_table('upgrade_reference_table_referencing', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -75,7 +75,7 @@ DROP TABLE upgrade_reference_table_referenced; CREATE TABLE upgrade_reference_table_unhealthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_unhealthy', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -92,14 +92,14 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_composite(column1 int, column2 upgrade_test_composite_type); SELECT create_distributed_table('upgrade_reference_table_composite', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_composite'::regclass; SELECT upgrade_to_reference_table('upgrade_reference_table_composite'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -109,7 +109,7 @@ DROP TYPE upgrade_test_composite_type; CREATE TABLE upgrade_reference_table_reference(column1 int); SELECT create_reference_table('upgrade_reference_table_reference'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -121,7 +121,7 @@ DROP TABLE upgrade_reference_table_reference; CREATE TABLE upgrade_reference_table_append(column1 int); SELECT create_distributed_table('upgrade_reference_table_append', 'column1', 'append'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -134,7 +134,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- a | f | 0 | c (1 row) @@ -145,7 +145,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360009 | f | f (1 row) @@ -156,7 +156,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- (0 rows) SELECT count(*) active_primaries FROM pg_dist_node WHERE isactive AND noderole='primary' \gset @@ -170,13 +170,13 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360009 | f (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_append'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -188,7 +188,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -199,7 +199,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360009 | t | t (1 row) @@ -210,7 +210,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -224,7 +224,7 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360009 | t (1 row) @@ -233,7 +233,7 @@ DROP TABLE upgrade_reference_table_append; CREATE TABLE upgrade_reference_table_one_worker(column1 int); SELECT create_distributed_table('upgrade_reference_table_one_worker', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -246,7 +246,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- h | f | 1360001 | c (1 row) @@ -257,7 +257,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360010 | f | f (1 row) @@ -268,7 +268,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 1360001 | 1 | 1 | 23 | 0 (1 row) @@ -282,13 +282,13 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360010 | f (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_one_worker'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -300,7 +300,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -311,7 +311,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360010 | t | t (1 row) @@ -322,7 +322,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -336,7 +336,7 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360010 | t (1 row) @@ -346,7 +346,7 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE upgrade_reference_table_one_unhealthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_one_unhealthy', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -360,7 +360,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- h | f | 1360002 | c (1 row) @@ -371,7 +371,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360011 | f | f (1 row) @@ -382,7 +382,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 1360002 | 1 | 2 | 23 | 0 (1 row) @@ -397,13 +397,13 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360011 | f (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_one_unhealthy'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -415,7 +415,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -426,7 +426,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360011 | t | t (1 row) @@ -437,7 +437,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -452,7 +452,7 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360011 | t (1 row) @@ -461,7 +461,7 @@ DROP TABLE upgrade_reference_table_one_unhealthy; CREATE TABLE upgrade_reference_table_both_healthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_both_healthy', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -473,7 +473,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- h | f | 1360003 | c (1 row) @@ -484,7 +484,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360012 | f | f (1 row) @@ -495,7 +495,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 1360003 | 1 | 2 | 23 | 0 (1 row) @@ -509,13 +509,13 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1360012 (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_both_healthy'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -527,7 +527,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -538,7 +538,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360012 | t | t (1 row) @@ -549,7 +549,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -563,7 +563,7 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360012 | t (1 row) @@ -573,7 +573,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_transaction_rollback(column1 int); SELECT create_distributed_table('upgrade_reference_table_transaction_rollback', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -586,7 +586,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- h | f | 1360004 | c (1 row) @@ -597,7 +597,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360013 | f | f (1 row) @@ -608,7 +608,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 1360004 | 1 | 1 | 23 | 0 (1 row) @@ -622,14 +622,14 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360013 | f (1 row) BEGIN; SELECT upgrade_to_reference_table('upgrade_reference_table_transaction_rollback'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -642,7 +642,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- h | f | 1360004 | c (1 row) @@ -653,7 +653,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360013 | f | f (1 row) @@ -664,7 +664,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 1360004 | 1 | 1 | 23 | 0 (1 row) @@ -678,7 +678,7 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360013 | f (1 row) @@ -688,7 +688,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_transaction_commit(column1 int); SELECT create_distributed_table('upgrade_reference_table_transaction_commit', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -701,7 +701,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- h | f | 1360004 | c (1 row) @@ -712,7 +712,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360014 | f | f (1 row) @@ -723,7 +723,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 1360004 | 1 | 1 | 23 | 0 (1 row) @@ -737,14 +737,14 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360014 | f (1 row) BEGIN; SELECT upgrade_to_reference_table('upgrade_reference_table_transaction_commit'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -757,7 +757,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -768,7 +768,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360014 | t | t (1 row) @@ -779,7 +779,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -793,7 +793,7 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360014 | t (1 row) @@ -802,7 +802,7 @@ ORDER BY shardid; \dt upgrade_reference_table_transaction_commit_* List of relations Schema | Name | Type | Owner ---------+----------------------------------------------------+-------+---------- +--------------------------------------------------------------------- public | upgrade_reference_table_transaction_commit_1360014 | table | postgres (1 row) @@ -815,7 +815,7 @@ SET citus.replication_model TO 'streaming'; CREATE TABLE upgrade_reference_table_mx(column1 int); SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -827,7 +827,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- h | f | 1360005 | s (1 row) @@ -838,7 +838,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360015 | f | f (1 row) @@ -849,7 +849,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 1360005 | 1 | 1 | 23 | 0 (1 row) @@ -863,7 +863,7 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1360015 (1 row) @@ -878,7 +878,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- h | f | 1360005 | s (1 row) @@ -889,7 +889,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360015 | f | f (1 row) @@ -900,7 +900,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 1360005 | 1 | 1 | 23 | 0 (1 row) @@ -914,7 +914,7 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360015 | f (1 row) @@ -926,7 +926,7 @@ RESET citus.replication_model; CREATE TABLE upgrade_reference_table_mx(column1 int); SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -935,7 +935,7 @@ WHERE nodeport = :worker_2_port AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='upgrade_reference_table_mx'::regclass); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -947,7 +947,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- h | f | 1360006 | c (1 row) @@ -958,7 +958,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360016 | f | f (1 row) @@ -969,7 +969,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 1360006 | 1 | 2 | 23 | 0 (1 row) @@ -983,14 +983,14 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid ---------- +--------------------------------------------------------------------- 1360016 (1 row) SET client_min_messages TO WARNING; SELECT upgrade_to_reference_table('upgrade_reference_table_mx'); upgrade_to_reference_table ----------------------------- +--------------------------------------------------------------------- (1 row) @@ -1002,7 +1002,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -1013,7 +1013,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360016 | t | t (1 row) @@ -1024,7 +1024,7 @@ WHERE colocationid IN FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------+------------+-------------------+------------------------+----------------------------- +--------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -1038,7 +1038,7 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360016 | t (1 row) @@ -1051,7 +1051,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel -------------+---------------+--------------+---------- +--------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -1062,7 +1062,7 @@ FROM WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ----------+---------------------+--------------------- +--------------------------------------------------------------------- 1360016 | t | t (1 row) @@ -1076,7 +1076,7 @@ WHERE shardid IN GROUP BY shardid ORDER BY shardid; shardid | ?column? ----------+---------- +--------------------------------------------------------------------- 1360016 | t (1 row) @@ -1084,7 +1084,7 @@ ORDER BY shardid; DROP TABLE upgrade_reference_table_mx; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ----------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_upsert.out b/src/test/regress/expected/multi_upsert.out index f05ecf242..3e52671e3 100644 --- a/src/test/regress/expected/multi_upsert.out +++ b/src/test/regress/expected/multi_upsert.out @@ -9,7 +9,7 @@ CREATE TABLE upsert_test -- distribute the table and create shards SELECT create_distributed_table('upsert_test', 'part_key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -27,7 +27,7 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) -- see the results SELECT * FROM upsert_test ORDER BY part_key ASC; part_key | other_col | third_col -----------+-----------+----------- +--------------------------------------------------------------------- 1 | 2 | 4 2 | 2 | (2 rows) @@ -42,7 +42,7 @@ UPDATE SET other_col = EXCLUDED.other_col WHERE upsert_test.part_key != 1; -- see the results SELECT * FROM upsert_test ORDER BY part_key ASC; part_key | other_col | third_col -----------+-----------+----------- +--------------------------------------------------------------------- 1 | 2 | 4 2 | 20 | 3 | 30 | @@ -56,7 +56,7 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ke -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col -----------+-----------+----------- +--------------------------------------------------------------------- 1 | 2 | 4 (1 row) @@ -66,7 +66,7 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ke -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col -----------+-----------+----------- +--------------------------------------------------------------------- 1 | 30 | 4 (1 row) @@ -79,7 +79,7 @@ INSERT INTO upsert_test (part_key, other_col, third_col) VALUES (1, 1, 100) ON C -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col -----------+-----------+----------- +--------------------------------------------------------------------- 1 | 100 | 4 (1 row) @@ -89,7 +89,7 @@ INSERT INTO upsert_test as ups_test (part_key) VALUES (1) -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col -----------+-----------+----------- +--------------------------------------------------------------------- 1 | 150 | 200 (1 row) @@ -100,7 +100,7 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ke -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col -----------+-----------+----------- +--------------------------------------------------------------------- 1 | 151 | 872 (1 row) @@ -111,7 +111,7 @@ INSERT INTO upsert_test as ups_test (part_key, other_col) VALUES (1, 1) ON CONFL -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col -----------+-----------+----------- +--------------------------------------------------------------------- 1 | 5 | 872 (1 row) @@ -120,7 +120,7 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (2, 2) ON CONFLICT (part_key) DO UPDATE SET other_col = 3 RETURNING *; part_key | other_col | third_col -----------+-----------+----------- +--------------------------------------------------------------------- 2 | 2 | (1 row) @@ -128,7 +128,7 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (2, 2) ON CONFLICT (part_key) DO UPDATE SET other_col = 3 RETURNING *; part_key | other_col | third_col -----------+-----------+----------- +--------------------------------------------------------------------- 2 | 3 | (1 row) @@ -143,7 +143,7 @@ CREATE TABLE upsert_test_2 -- distribute the table and create shards SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -164,7 +164,7 @@ CREATE INDEX idx_ups_test ON upsert_test_3(part_key); -- distribute the table and create shards SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -180,7 +180,7 @@ CREATE TABLE upsert_test_4 -- distribute the table and create shards SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -196,7 +196,7 @@ INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET coun -- now see the results SELECT * FROM upsert_test_4; part_key | count -----------+------- +--------------------------------------------------------------------- 1 | 6 (1 row) @@ -205,7 +205,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); SELECT create_distributed_table('dropcol_distributed', 'key', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index ee2c00168..699f9079a 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -7,7 +7,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE sharded_table ( name text, id bigint ); SELECT create_distributed_table('sharded_table', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -36,27 +36,27 @@ PREPARE sharded_delete AS DELETE FROM sharded_table WHERE id = 1; PREPARE sharded_query AS SELECT name FROM sharded_table WHERE id = 1; EXECUTE sharded_query; name ------- +--------------------------------------------------------------------- (0 rows) EXECUTE sharded_insert; EXECUTE sharded_query; name ------- +--------------------------------------------------------------------- adam (1 row) EXECUTE sharded_update; EXECUTE sharded_query; name ------- +--------------------------------------------------------------------- bob (1 row) EXECUTE sharded_delete; EXECUTE sharded_query; name ------- +--------------------------------------------------------------------- (0 rows) -- try to drop shards with where clause @@ -73,13 +73,13 @@ HINT: Use the DELETE command instead. BEGIN; SELECT lock_shard_metadata(5, ARRAY[999001, 999002, 999002]); lock_shard_metadata ---------------------- +--------------------------------------------------------------------- (1 row) SELECT lock_shard_metadata(7, ARRAY[999001, 999003, 999004]); lock_shard_metadata ---------------------- +--------------------------------------------------------------------- (1 row) @@ -88,7 +88,7 @@ FROM pg_locks WHERE objid IN (999001, 999002, 999003, 999004) ORDER BY objid, mode; locktype | objid | mode | granted -----------+--------+---------------+--------- +--------------------------------------------------------------------- advisory | 999001 | ExclusiveLock | t advisory | 999001 | ShareLock | t advisory | 999002 | ShareLock | t @@ -103,7 +103,7 @@ ERROR: unsupported lockmode 0 -- lock shard metadata: invalid shard ID SELECT lock_shard_metadata(5, ARRAY[0]); lock_shard_metadata ---------------------- +--------------------------------------------------------------------- (1 row) @@ -114,13 +114,13 @@ ERROR: no locks specified BEGIN; SELECT lock_shard_resources(5, ARRAY[999001, 999002, 999002]); lock_shard_resources ----------------------- +--------------------------------------------------------------------- (1 row) SELECT lock_shard_resources(7, ARRAY[999001, 999003, 999004]); lock_shard_resources ----------------------- +--------------------------------------------------------------------- (1 row) @@ -129,7 +129,7 @@ FROM pg_locks WHERE objid IN (999001, 999002, 999003, 999004) ORDER BY objid, mode; locktype | objid | mode | granted -----------+--------+---------------+--------- +--------------------------------------------------------------------- advisory | 999001 | ExclusiveLock | t advisory | 999001 | ShareLock | t advisory | 999002 | ShareLock | t @@ -144,7 +144,7 @@ ERROR: unsupported lockmode 0 -- lock shard metadata: invalid shard ID SELECT lock_shard_resources(5, ARRAY[-1]); lock_shard_resources ----------------------- +--------------------------------------------------------------------- (1 row) @@ -160,7 +160,7 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE dustbunnies (id integer, name text, age integer); SELECT create_distributed_table('dustbunnies', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -169,13 +169,13 @@ SELECT create_distributed_table('dustbunnies', 'id', 'hash'); CREATE TABLE second_dustbunnies(id integer, name text, age integer); SELECT master_create_distributed_table('second_dustbunnies', 'id', 'hash'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT master_create_worker_shards('second_dustbunnies', 1, 2); master_create_worker_shards ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -267,20 +267,20 @@ ANALYZE dustbunnies; \c - - - :worker_1_port SELECT wait_for_stats(); wait_for_stats ----------------- +--------------------------------------------------------------------- (1 row) REFRESH MATERIALIZED VIEW prevcounts; SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass); pg_stat_get_vacuum_count --------------------------- +--------------------------------------------------------------------- 1 (1 row) SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass); pg_stat_get_analyze_count ---------------------------- +--------------------------------------------------------------------- 1 (1 row) @@ -296,26 +296,26 @@ VACUUM ANALYZE dustbunnies; SELECT relfilenode != :oldnode AS table_rewritten FROM pg_class WHERE oid='dustbunnies_990002'::regclass; table_rewritten ------------------ +--------------------------------------------------------------------- t (1 row) -- verify the VACUUM ANALYZE incremented both vacuum and analyze counts SELECT wait_for_stats(); wait_for_stats ----------------- +--------------------------------------------------------------------- (1 row) SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass); pg_stat_get_vacuum_count --------------------------- +--------------------------------------------------------------------- 2 (1 row) SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass); pg_stat_get_analyze_count ---------------------------- +--------------------------------------------------------------------- 2 (1 row) @@ -332,7 +332,7 @@ VACUUM (FREEZE) dustbunnies; SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class WHERE oid='dustbunnies_990002'::regclass; frozen_performed ------------------- +--------------------------------------------------------------------- t (1 row) @@ -340,7 +340,7 @@ WHERE oid='dustbunnies_990002'::regclass; SELECT attname, null_frac FROM pg_stats WHERE tablename = 'dustbunnies_990002' ORDER BY attname; attname | null_frac ----------+----------- +--------------------------------------------------------------------- age | 1 id | 0 name | 0 @@ -355,7 +355,7 @@ ANALYZE dustbunnies (name); SELECT attname, null_frac FROM pg_stats WHERE tablename = 'dustbunnies_990002' ORDER BY attname; attname | null_frac ----------+----------- +--------------------------------------------------------------------- age | 1 id | 0 name | 0.166667 @@ -371,21 +371,21 @@ VACUUM dustbunnies, second_dustbunnies; -- check the current number of vacuum and analyze run on dustbunnies SELECT run_command_on_workers($$SELECT wait_for_stats()$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_vacuum_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,4) (localhost,57638,t,4) (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,3) (localhost,57638,t,3) (2 rows) @@ -402,21 +402,21 @@ SET citus.enable_ddl_propagation to DEFAULT; -- should not propagate the vacuum and analyze SELECT run_command_on_workers($$SELECT wait_for_stats()$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_vacuum_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,4) (localhost,57638,t,4) (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,3) (localhost,57638,t,3) (2 rows) @@ -424,13 +424,13 @@ SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regc -- test worker_hash SELECT worker_hash(123); worker_hash -------------- +--------------------------------------------------------------------- -205084363 (1 row) SELECT worker_hash('1997-08-08'::date); worker_hash -------------- +--------------------------------------------------------------------- -499701663 (1 row) @@ -440,7 +440,7 @@ ERROR: cannot find a hash function for the input type HINT: Cast input to a data type with a hash function. SELECT worker_hash('(1, 2)'::test_composite_type); worker_hash -------------- +--------------------------------------------------------------------- -1895345704 (1 row) @@ -449,7 +449,7 @@ ERROR: must be called as trigger -- confirm that citus_create_restore_point works SELECT 1 FROM citus_create_restore_point('regression-test'); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_utility_statements.out b/src/test/regress/expected/multi_utility_statements.out index 31888483a..2f5787648 100644 --- a/src/test/regress/expected/multi_utility_statements.out +++ b/src/test/regress/expected/multi_utility_statements.out @@ -31,7 +31,7 @@ CREATE TEMP TABLE lineitem_pricing_summary AS ); SELECT * FROM lineitem_pricing_summary ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order ---------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- +--------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 @@ -66,7 +66,7 @@ CREATE TABLE shipping_priority AS ); SELECT * FROM shipping_priority; l_orderkey | revenue | o_orderdate | o_shippriority -------------+-------------+-------------+---------------- +--------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 @@ -195,7 +195,7 @@ CREATE TEMP TABLE customer_few (customer_key) ON COMMIT DROP AS SELECT customer_key, c_name, c_address FROM customer_few ORDER BY customer_key LIMIT 5; customer_key | c_name | c_address ---------------+--------------------+----------------------------------------- +--------------------------------------------------------------------- 3 | Customer#000000003 | MG9kdTD2WBHm 14 | Customer#000000014 | KXkletMlL2JQEA 30 | Customer#000000030 | nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY @@ -213,7 +213,7 @@ LINE 2: FROM customer_few ORDER BY customer_key LIMIT 5; CREATE TABLE cursor_me (x int, y int); SELECT create_distributed_table('cursor_me', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -222,13 +222,13 @@ DECLARE holdCursor CURSOR WITH HOLD FOR SELECT * FROM cursor_me WHERE x = 1 ORDER BY y; FETCH NEXT FROM holdCursor; x | y ----+---- +--------------------------------------------------------------------- 1 | 10 (1 row) FETCH FORWARD 3 FROM holdCursor; x | y ----+---- +--------------------------------------------------------------------- 1 | 11 1 | 12 1 | 13 @@ -236,13 +236,13 @@ FETCH FORWARD 3 FROM holdCursor; FETCH LAST FROM holdCursor; x | y ----+---- +--------------------------------------------------------------------- 1 | 19 (1 row) FETCH BACKWARD 3 FROM holdCursor; x | y ----+---- +--------------------------------------------------------------------- 1 | 18 1 | 17 1 | 16 @@ -250,7 +250,7 @@ FETCH BACKWARD 3 FROM holdCursor; FETCH FORWARD 3 FROM holdCursor; x | y ----+---- +--------------------------------------------------------------------- 1 | 17 1 | 18 1 | 19 @@ -286,7 +286,7 @@ END; $$ LANGUAGE plpgsql; SELECT cursor_plpgsql(4); cursor_plpgsql ----------------- +--------------------------------------------------------------------- 40 41 42 @@ -309,13 +309,13 @@ DECLARE holdCursor SCROLL CURSOR WITH HOLD FOR ORDER BY l_orderkey, l_linenumber; FETCH NEXT FROM holdCursor; l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ +--------------------------------------------------------------------- 1 | 1 | 17.00 | 0.04 (1 row) FETCH FORWARD 5 FROM holdCursor; l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ +--------------------------------------------------------------------- 1 | 2 | 36.00 | 0.09 1 | 3 | 8.00 | 0.10 1 | 4 | 28.00 | 0.09 @@ -325,13 +325,13 @@ FETCH FORWARD 5 FROM holdCursor; FETCH LAST FROM holdCursor; l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ +--------------------------------------------------------------------- 14947 | 2 | 29.00 | 0.04 (1 row) FETCH BACKWARD 5 FROM holdCursor; l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ +--------------------------------------------------------------------- 14947 | 1 | 14.00 | 0.09 14946 | 2 | 37.00 | 0.01 14946 | 1 | 38.00 | 0.00 @@ -347,13 +347,13 @@ DECLARE noHoldCursor SCROLL CURSOR FOR ORDER BY l_orderkey, l_linenumber; FETCH ABSOLUTE 5 FROM noHoldCursor; l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ +--------------------------------------------------------------------- 1 | 5 | 24.00 | 0.10 (1 row) FETCH BACKWARD noHoldCursor; l_orderkey | l_linenumber | l_quantity | l_discount -------------+--------------+------------+------------ +--------------------------------------------------------------------- 1 | 4 | 28.00 | 0.09 (1 row) diff --git a/src/test/regress/expected/multi_view.out b/src/test/regress/expected/multi_view.out index e39bb4bba..8919aaf83 100644 --- a/src/test/regress/expected/multi_view.out +++ b/src/test/regress/expected/multi_view.out @@ -7,13 +7,13 @@ -- into select, multi row insert via copy commands. SELECT count(*) FROM lineitem_hash_part; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM orders_hash_part; count -------- +--------------------------------------------------------------------- 2985 (1 row) @@ -22,14 +22,14 @@ CREATE VIEW priority_orders AS SELECT * FROM orders_hash_part WHERE o_orderprior -- aggregate pushdown SELECT o_orderpriority, count(*) FROM priority_orders GROUP BY 1 ORDER BY 2, 1; o_orderpriority | count ------------------+------- +--------------------------------------------------------------------- 2-HIGH | 593 1-URGENT | 604 (2 rows) SELECT o_orderpriority, count(*) FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM' GROUP BY 1 ORDER BY 2,1; o_orderpriority | count ------------------+------- +--------------------------------------------------------------------- 2-HIGH | 593 1-URGENT | 604 (2 rows) @@ -37,7 +37,7 @@ SELECT o_orderpriority, count(*) FROM orders_hash_part WHERE o_orderpriority < -- filters SELECT o_orderpriority, count(*) as all, count(*) FILTER (WHERE o_orderstatus ='F') as fullfilled FROM priority_orders GROUP BY 1 ORDER BY 2, 1; o_orderpriority | all | fullfilled ------------------+-----+------------ +--------------------------------------------------------------------- 2-HIGH | 593 | 271 1-URGENT | 604 | 280 (2 rows) @@ -45,7 +45,7 @@ SELECT o_orderpriority, count(*) as all, count(*) FILTER (WHERE o_orderstatus =' -- having SELECT o_orderdate, count(*) from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc; o_orderdate | count --------------+------- +--------------------------------------------------------------------- 08-20-1996 | 5 10-10-1994 | 4 05-05-1994 | 4 @@ -56,7 +56,7 @@ SELECT o_orderdate, count(*) from priority_orders group by 1 having (count(*) > -- having with filters SELECT o_orderdate, count(*) as all, count(*) FILTER(WHERE o_orderstatus = 'F') from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc; o_orderdate | all | count --------------+-----+------- +--------------------------------------------------------------------- 08-20-1996 | 5 | 0 10-10-1994 | 4 | 4 05-05-1994 | 4 | 4 @@ -67,7 +67,7 @@ SELECT o_orderdate, count(*) as all, count(*) FILTER(WHERE o_orderstatus = 'F') -- limit SELECT o_orderkey, o_totalprice from orders_hash_part order by 2 desc, 1 asc limit 5 ; o_orderkey | o_totalprice -------------+-------------- +--------------------------------------------------------------------- 4421 | 401055.62 10209 | 400191.77 11142 | 395039.05 @@ -77,14 +77,14 @@ SELECT o_orderkey, o_totalprice from orders_hash_part order by 2 desc, 1 asc lim SELECT o_orderkey, o_totalprice from priority_orders order by 2 desc, 1 asc limit 1 ; o_orderkey | o_totalprice -------------+-------------- +--------------------------------------------------------------------- 14179 | 384265.43 (1 row) CREATE VIEW priority_lineitem AS SELECT li.* FROM lineitem_hash_part li JOIN priority_orders ON (l_orderkey = o_orderkey); SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1 LIMIT 5; l_orderkey | count -------------+------- +--------------------------------------------------------------------- 7 | 7 225 | 7 226 | 7 @@ -96,28 +96,28 @@ CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_sh -- join between view and table SELECT count(*) FROM orders_hash_part join air_shipped_lineitems ON (o_orderkey = l_orderkey); count -------- +--------------------------------------------------------------------- 1706 (1 row) -- join between views SELECT count(*) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); count -------- +--------------------------------------------------------------------- 700 (1 row) -- count distinct on partition column is supported SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); count -------- +--------------------------------------------------------------------- 551 (1 row) -- count distinct on non-partition column is supported SELECT count(distinct o_orderpriority) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -126,7 +126,7 @@ SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitem ON (o_orderkey = l_orderkey) WHERE (o_orderkey = 231); count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -135,35 +135,35 @@ SELECT distinct(o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE (o_orderkey = 231); o_orderkey ------------- +--------------------------------------------------------------------- 231 (1 row) -- left join support depends on flattening of the query SELECT o_orderkey, l_orderkey FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey) ORDER BY o_orderkey LIMIT 1; o_orderkey | l_orderkey -------------+------------ +--------------------------------------------------------------------- 2 | (1 row) -- however, this works SELECT count(*) FROM priority_orders left join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; count -------- +--------------------------------------------------------------------- 700 (1 row) -- view on the inner side is supported SELECT count(*) FROM priority_orders right join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; count -------- +--------------------------------------------------------------------- 1706 (1 row) -- view on the outer side is supported SELECT count(*) FROM lineitem_hash_part right join priority_orders ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; count -------- +--------------------------------------------------------------------- 700 (1 row) @@ -171,7 +171,7 @@ SELECT count(*) FROM lineitem_hash_part right join priority_orders ON (o_orderke SELECT o_orderkey, l_linenumber FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE o_orderkey = 2; o_orderkey | l_linenumber -------------+-------------- +--------------------------------------------------------------------- 2 | (1 row) @@ -183,7 +183,7 @@ DEBUG: generating subplan 22_1 for subquery SELECT lineitem_hash_part.l_orderke DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT priority_orders.o_orderkey, priority_orders.o_custkey, priority_orders.o_orderstatus, priority_orders.o_totalprice, priority_orders.o_orderdate, priority_orders.o_orderpriority, priority_orders.o_clerk, priority_orders.o_shippriority, priority_orders.o_comment, air_shipped_lineitems.l_orderkey, air_shipped_lineitems.l_partkey, air_shipped_lineitems.l_suppkey, air_shipped_lineitems.l_linenumber, air_shipped_lineitems.l_quantity, air_shipped_lineitems.l_extendedprice, air_shipped_lineitems.l_discount, air_shipped_lineitems.l_tax, air_shipped_lineitems.l_returnflag, air_shipped_lineitems.l_linestatus, air_shipped_lineitems.l_shipdate, air_shipped_lineitems.l_commitdate, air_shipped_lineitems.l_receiptdate, air_shipped_lineitems.l_shipinstruct, air_shipped_lineitems.l_shipmode, air_shipped_lineitems.l_comment FROM ((SELECT orders_hash_part.o_orderkey, orders_hash_part.o_custkey, orders_hash_part.o_orderstatus, orders_hash_part.o_totalprice, orders_hash_part.o_orderdate, orders_hash_part.o_orderpriority, orders_hash_part.o_clerk, orders_hash_part.o_shippriority, orders_hash_part.o_comment FROM public.orders_hash_part WHERE (orders_hash_part.o_orderpriority OPERATOR(pg_catalog.<) '3-MEDIUM'::bpchar)) priority_orders JOIN (SELECT intermediate_result.l_orderkey, intermediate_result.l_partkey, intermediate_result.l_suppkey, intermediate_result.l_linenumber, intermediate_result.l_quantity, intermediate_result.l_extendedprice, intermediate_result.l_discount, intermediate_result.l_tax, intermediate_result.l_returnflag, intermediate_result.l_linestatus, intermediate_result.l_shipdate, intermediate_result.l_commitdate, intermediate_result.l_receiptdate, intermediate_result.l_shipinstruct, intermediate_result.l_shipmode, intermediate_result.l_comment FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint, l_partkey integer, l_suppkey integer, l_linenumber integer, l_quantity numeric(15,2), l_extendedprice numeric(15,2), l_discount numeric(15,2), l_tax numeric(15,2), l_returnflag character(1), l_linestatus character(1), l_shipdate date, l_commitdate date, l_receiptdate date, l_shipinstruct character(25), l_shipmode character(10), l_comment character varying(44))) air_shipped_lineitems ON ((priority_orders.o_custkey OPERATOR(pg_catalog.=) air_shipped_lineitems.l_suppkey))) ORDER BY priority_orders.o_orderkey DESC, priority_orders.o_custkey DESC, priority_orders.o_orderpriority DESC LIMIT 5 DEBUG: push down of limit count: 5 o_orderkey | o_custkey | o_orderstatus | o_totalprice | o_orderdate | o_orderpriority | o_clerk | o_shippriority | o_comment | l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment -------------+-----------+---------------+--------------+-------------+-----------------+-----------------+----------------+-------------------------------------------------------+------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+------------------------------------------- +--------------------------------------------------------------------- 14821 | 1435 | O | 322002.95 | 06-12-1998 | 2-HIGH | Clerk#000000630 | 0 | n packages are furiously ironic ideas. d | 1607 | 118923 | 1435 | 2 | 37.00 | 71851.04 | 0.05 | 0.02 | N | O | 02-27-1996 | 02-18-1996 | 03-16-1996 | NONE | AIR | alongside 14790 | 613 | O | 270163.54 | 08-21-1996 | 2-HIGH | Clerk#000000347 | 0 | p. regular deposits wake. final n | 2629 | 123076 | 613 | 2 | 31.00 | 34071.17 | 0.08 | 0.03 | N | O | 05-24-1998 | 05-26-1998 | 06-10-1998 | COLLECT COD | AIR | ate blithely bold, regular deposits. bold 14758 | 1225 | F | 37812.49 | 10-27-1993 | 2-HIGH | Clerk#000000687 | 0 | ages nag about the furio | 9156 | 176190 | 1225 | 2 | 22.00 | 27856.18 | 0.03 | 0.00 | R | F | 02-08-1994 | 04-01-1994 | 02-24-1994 | DELIVER IN PERSON | AIR | equests dete @@ -194,7 +194,7 @@ DEBUG: push down of limit count: 5 RESET client_min_messages; SELECT count(*) FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey); count -------- +--------------------------------------------------------------------- 192 (1 row) @@ -212,7 +212,7 @@ SELECT l_suppkey, count(*) FROM FROM lineitem_hash_part WHERE l_shipmode = 'AIR' GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; l_suppkey | count ------------+------- +--------------------------------------------------------------------- 7680 | 4 160 | 3 1042 | 3 @@ -232,7 +232,7 @@ DETAIL: Subqueries without group by clause are not supported yet CREATE VIEW supp_count_view AS SELECT * FROM (SELECT l_suppkey, count(*) FROM lineitem_hash_part GROUP BY 1) s1; SELECT * FROM supp_count_view ORDER BY 2 DESC, 1 LIMIT 10; l_suppkey | count ------------+------- +--------------------------------------------------------------------- 6104 | 8 1868 | 6 5532 | 6 @@ -252,7 +252,7 @@ CREATE VIEW lineitems_by_shipping_method AS -- following will be supported via recursive planning SELECT * FROM lineitems_by_shipping_method ORDER BY 1,2 LIMIT 5; l_shipmode | cnt -------------+------ +--------------------------------------------------------------------- AIR | 1706 FOB | 1709 MAIL | 1739 @@ -270,7 +270,7 @@ CREATE VIEW lineitems_by_orderkey AS -- this should work since we're able to push down this query SELECT * FROM lineitems_by_orderkey ORDER BY 2 DESC, 1 ASC LIMIT 10; l_orderkey | count -------------+------- +--------------------------------------------------------------------- 7 | 7 68 | 7 129 | 7 @@ -286,7 +286,7 @@ SELECT * FROM lineitems_by_orderkey ORDER BY 2 DESC, 1 ASC LIMIT 10; -- it would also work since it is made router plannable SELECT * FROM lineitems_by_orderkey WHERE l_orderkey = 100; l_orderkey | count -------------+------- +--------------------------------------------------------------------- 100 | 5 (1 row) @@ -304,7 +304,7 @@ CREATE VIEW recent_users AS HAVING max(time) > '2017-11-23 16:20:33.264457'::timestamp order by 2 DESC; SELECT * FROM recent_users ORDER BY 2 DESC, 1 DESC; user_id | lastseen ----------+--------------------------------- +--------------------------------------------------------------------- 1 | Thu Nov 23 17:30:34.635085 2017 3 | Thu Nov 23 17:18:51.048758 2017 5 | Thu Nov 23 16:48:32.08896 2017 @@ -316,14 +316,14 @@ CREATE VIEW recent_events AS WHERE time > '2017-11-23 16:20:33.264457'::timestamp; SELECT count(*) FROM recent_events; count -------- +--------------------------------------------------------------------- 6 (1 row) -- count number of events of recent_users SELECT count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id); count -------- +--------------------------------------------------------------------- 50 (1 row) @@ -335,7 +335,7 @@ SELECT ru.user_id, count(*) GROUP BY ru.user_id ORDER BY 2 DESC, 1; user_id | count ----------+------- +--------------------------------------------------------------------- 3 | 21 1 | 15 5 | 14 @@ -349,7 +349,7 @@ SELECT ru.user_id, count(*) GROUP BY ru.user_id ORDER BY 2 DESC, 1; user_id | count ----------+------- +--------------------------------------------------------------------- 3 | 21 1 | 15 5 | 14 @@ -365,7 +365,7 @@ SELECT * FROM ORDER BY 2 DESC, 1) s1 ORDER BY 2 DESC, 1; user_id | count ----------+------- +--------------------------------------------------------------------- 3 | 21 1 | 15 5 | 14 @@ -382,7 +382,7 @@ SELECT * FROM ORDER BY 2 DESC, 1) s1 ORDER BY 2 DESC, 1; user_id | count ----------+------- +--------------------------------------------------------------------- 1 | 24 3 | 23 5 | 7 @@ -392,7 +392,7 @@ ORDER BY 2 DESC, 1; -- recent users who has an event in recent events SELECT ru.user_id FROM recent_users ru JOIN recent_events re USING(user_id) GROUP BY ru.user_id ORDER BY ru.user_id; user_id ---------- +--------------------------------------------------------------------- 1 3 (2 rows) @@ -404,7 +404,7 @@ SELECT count(*) FROM ( FROM recent_events re LEFT JOIN recent_users ru USING(user_id)) reu WHERE recent_user IS NULL; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -413,7 +413,7 @@ SELECT count(*) FROM recent_events re LEFT JOIN recent_users ru ON(ru.user_id = re.user_id) WHERE ru.user_id IS NULL; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -421,7 +421,7 @@ SELECT count(*) -- users who has recent activity and they have an entry with value_1 is less than 3 SELECT ut.* FROM recent_users ru JOIN users_table ut USING (user_id) WHERE ut.value_1 < 3 ORDER BY 1,2; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | @@ -452,7 +452,7 @@ SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done ON(ru.user_id = et.user_id AND et.event_type = 6) ORDER BY 2 DESC, 1; user_id | done_event ----------+------------ +--------------------------------------------------------------------- 1 | YES 3 | NO 5 | NO @@ -467,7 +467,7 @@ SELECT * FROM ) s1 ORDER BY 2 DESC, 1; user_id | done_event ----------+------------ +--------------------------------------------------------------------- 1 | YES 3 | NO 5 | NO @@ -489,7 +489,7 @@ CREATE VIEW selected_users AS SELECT * FROM users_table WHERE value_1 >= 1 and v CREATE VIEW recent_selected_users AS SELECT su.* FROM selected_users su JOIN recent_users ru USING(user_id); SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; user_id ---------- +--------------------------------------------------------------------- 1 3 5 @@ -498,7 +498,7 @@ SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; -- this would be supported when we implement where partition_key in (subquery) support SELECT et.user_id, et.time FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users) GROUP BY 1,2 ORDER BY 1 DESC,2 DESC LIMIT 5; user_id | time ----------+--------------------------------- +--------------------------------------------------------------------- 5 | Thu Nov 23 16:11:02.929469 2017 5 | Thu Nov 23 14:40:40.467511 2017 5 | Thu Nov 23 14:28:51.833214 2017 @@ -509,7 +509,7 @@ SELECT et.user_id, et.time FROM events_table et WHERE et.user_id IN (SELECT user -- it is supported when it is a router query SELECT count(*) FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users WHERE user_id = 1); count -------- +--------------------------------------------------------------------- 15 (1 row) @@ -519,7 +519,7 @@ UNION (SELECT user_id FROM selected_users) ORDER BY 1; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -537,7 +537,7 @@ SELECT * WHERE user_id < 2 AND user_id > 0 ORDER BY user_id; user_id ---------- +--------------------------------------------------------------------- 1 (1 row) @@ -550,7 +550,7 @@ SELECT * WHERE user_id < 2 AND user_id > 0 ORDER BY user_id; user_id ---------- +--------------------------------------------------------------------- 1 1 (2 rows) @@ -562,7 +562,7 @@ SELECT count(*) (SELECT user_id FROM selected_users) ) u WHERE user_id < 2 AND user_id > 0; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -574,7 +574,7 @@ SELECT count(*) (SELECT user_id FROM selected_users) ) u WHERE user_id < 2 AND user_id > 0; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -589,7 +589,7 @@ SELECT count(*) (SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u WHERE user_id < 2 AND user_id > 0; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -603,7 +603,7 @@ SELECT count(*) (SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u WHERE user_id < 2 AND user_id > 0; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -612,7 +612,7 @@ SELECT count(*) CREATE VIEW distinct_user_with_value_1_3 AS SELECT DISTINCT user_id FROM users_table WHERE value_1 = 3; SELECT * FROM distinct_user_with_value_1_3 ORDER BY user_id; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -626,7 +626,7 @@ SELECT * FROM distinct_user_with_value_1_3 ORDER BY user_id; CREATE VIEW distinct_value_1 AS SELECT DISTINCT value_1 FROM users_table WHERE value_2 = 3; SELECT * FROM distinct_value_1 ORDER BY 1 DESC LIMIT 5; value_1 ---------- +--------------------------------------------------------------------- 5 4 3 @@ -639,7 +639,7 @@ CREATE VIEW cte_view_1 AS WITH c1 AS (SELECT * FROM users_table WHERE value_1 = 3) SELECT * FROM c1 WHERE value_2 < 4 AND EXISTS (SELECT * FROM c1); SELECT * FROM cte_view_1 ORDER BY 1,2,3,4,5 LIMIT 5; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | @@ -651,7 +651,7 @@ SELECT * FROM cte_view_1 ORDER BY 1,2,3,4,5 LIMIT 5; -- router planner can't detect it SELECT * FROM cte_view_1 WHERE user_id = 2 ORDER BY 1,2,3,4,5; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+--------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | (1 row) @@ -660,7 +660,7 @@ CREATE VIEW cte_view_2 AS WITH c1 AS (SELECT * FROM users_table WHERE user_id = 2) SELECT * FROM c1 WHERE value_1 = 3; SELECT * FROM cte_view_2; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | @@ -671,20 +671,20 @@ CREATE VIEW router_view AS SELECT * FROM users_table WHERE user_id = 2; -- router plannable SELECT user_id FROM router_view GROUP BY 1; user_id ---------- +--------------------------------------------------------------------- 2 (1 row) -- join a router view SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN recent_events USING (user_id) ORDER BY 2 LIMIT 3; user_id | time ----------+--------------------------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 (1 row) SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN (SELECT * FROM recent_events) re USING (user_id) ORDER BY 2 LIMIT 3; user_id | time ----------+--------------------------------- +--------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 (1 row) @@ -697,7 +697,7 @@ CREATE VIEW recent_10_users AS -- this is not supported since it has limit in it and subquery_pushdown is not set SELECT * FROM recent_10_users; user_id | lastseen ----------+--------------------------------- +--------------------------------------------------------------------- 1 | Thu Nov 23 17:30:34.635085 2017 3 | Thu Nov 23 17:18:51.048758 2017 5 | Thu Nov 23 16:48:32.08896 2017 @@ -719,7 +719,7 @@ DETAIL: Limit in subquery without limit in the outermost query is unsupported -- now both are supported when there is a limit on the outer most query SELECT * FROM recent_10_users ORDER BY lastseen DESC LIMIT 10; user_id | lastseen ----------+--------------------------------- +--------------------------------------------------------------------- 1 | Thu Nov 23 17:30:34.635085 2017 3 | Thu Nov 23 17:18:51.048758 2017 5 | Thu Nov 23 16:48:32.08896 2017 @@ -730,7 +730,7 @@ SELECT * FROM recent_10_users ORDER BY lastseen DESC LIMIT 10; SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; user_id | time | event_type | value_2 | value_3 | value_4 ----------+---------------------------------+------------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 | 4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 | 3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 | @@ -748,7 +748,7 @@ VACUUM ANALYZE users_table; -- explain tests EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id -> HashAggregate @@ -780,7 +780,7 @@ EXPLAIN (COSTS FALSE) SELECT * WHERE user_id < 4 AND user_id > 1 ORDER BY user_id; QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id -> Custom Scan (Citus Adaptive) @@ -808,7 +808,7 @@ EXPLAIN (COSTS FALSE) SELECT * EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; QUERY PLAN ---------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan."time" DESC @@ -845,7 +845,7 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN SET citus.subquery_pushdown to ON; EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan."time" DESC @@ -888,13 +888,13 @@ CREATE TABLE large (id int, tenant_id int); CREATE TABLE small (id int, tenant_id int, unique(tenant_id)); SELECT create_distributed_table('large','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('small','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -912,7 +912,7 @@ ERROR: cannot modify views over distributed tables UPDATE large SET id=20 FROM small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 2 | 3 5 | 4 20 | 2 @@ -926,7 +926,7 @@ INSERT INTO small VALUES(14, 14); UPDATE large SET id=23 FROM (SELECT *, id*2 from small_view ORDER BY 1,2 LIMIT 5) as small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 2 | 3 5 | 4 20 | 2 @@ -940,7 +940,7 @@ INSERT INTO large VALUES(14, 14); UPDATE large SET id=27 FROM small_view WHERE small_view.tenant_id=large.tenant_id; SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -955,7 +955,7 @@ INSERT INTO large VALUES(14, 14); UPDATE large SET id=28 FROM small_view WHERE small_view.id=large.id and small_view.tenant_id=14 and large.tenant_id=14; SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -973,14 +973,14 @@ INSERT INTO small VALUES(99, 99); -- print the columns from the "view" as well to test "rewrite resjunk" behaviour UPDATE large SET id=36 FROM small_view WHERE small_view.id=large.id RETURNING large.id, large.tenant_id, small_view.tenant_id; id | tenant_id | tenant_id -----+-----------+----------- +--------------------------------------------------------------------- 36 | 14 | 14 36 | 78 | 99 (2 rows) SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -995,7 +995,7 @@ SELECT * FROM large ORDER BY 1, 2; -- below statement should not update anything. so it should return empty UPDATE large SET id=46 FROM small_view WHERE small_view.id=large.id and large.id=15 RETURNING large.id, large.tenant_id; id | tenant_id -----+----------- +--------------------------------------------------------------------- (0 rows) -- we should still have identical rows for next test statements, then insert a new row to large table @@ -1004,7 +1004,7 @@ INSERT INTO large VALUES(14, 14); DELETE FROM large WHERE id in (SELECT id FROM small_view); SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1023,7 +1023,7 @@ WITH all_small_view_ids AS (SELECT id FROM small_view) DELETE FROM large WHERE id in (SELECT * FROM all_small_view_ids); SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1049,13 +1049,13 @@ CREATE TABLE large_partitioned_p2 PARTITION OF large_partitioned FOR VALUES FROM CREATE TABLE large_partitioned_p3 PARTITION OF large_partitioned FOR VALUES FROM (20) TO (100); SELECT create_distributed_table('large_partitioned','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('small','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1070,7 +1070,7 @@ ERROR: cannot modify views over distributed tables UPDATE large_partitioned SET id=27 FROM small_view WHERE small_view.tenant_id=large_partitioned.tenant_id; SELECT * FROM large_partitioned ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 6 | 5 26 | 32 27 | 2 @@ -1087,7 +1087,7 @@ INSERT INTO large_partitioned VALUES(14, 14); UPDATE large_partitioned SET id=28 FROM small_view WHERE small_view.id=large_partitioned.id and small_view.tenant_id=14 and large_partitioned.tenant_id=14; SELECT * FROM large_partitioned ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 6 | 5 26 | 32 27 | 2 @@ -1104,7 +1104,7 @@ INSERT INTO large_partitioned VALUES(14, 14); DELETE FROM large_partitioned WHERE tenant_id in (SELECT tenant_id FROM small_view); SELECT * FROM large_partitioned ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 6 | 5 26 | 32 29 | 15 @@ -1118,7 +1118,7 @@ WITH all_small_view_tenant_ids AS (SELECT tenant_id FROM small_view) DELETE FROM large_partitioned WHERE tenant_id in (SELECT * FROM all_small_view_tenant_ids); SELECT * FROM large_partitioned ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 6 | 5 26 | 32 29 | 15 @@ -1135,13 +1135,13 @@ CREATE TABLE large (id int, tenant_id int); CREATE TABLE small (id int, tenant_id int, unique(tenant_id)); SELECT create_distributed_table('large','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('small','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -1152,7 +1152,7 @@ CREATE VIEW small_view AS SELECT id, tenant_id FROM (SELECT *, id*2 FROM small W UPDATE large SET id=20 FROM small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 2 | 3 5 | 4 20 | 2 @@ -1166,7 +1166,7 @@ INSERT INTO small VALUES(14, 14); UPDATE large SET id=23 FROM (SELECT *, id*2 from small_view ORDER BY 1,2 LIMIT 5) as small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 2 | 3 5 | 4 20 | 2 @@ -1180,7 +1180,7 @@ INSERT INTO large VALUES(14, 14); UPDATE large SET id=27 FROM small_view WHERE small_view.tenant_id=large.tenant_id; SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1195,7 +1195,7 @@ INSERT INTO large VALUES(14, 14); UPDATE large SET id=28 FROM small_view WHERE small_view.id=large.id and small_view.tenant_id=14 and large.tenant_id=14; SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1213,13 +1213,13 @@ INSERT INTO small VALUES(99, 99); -- print the columns from the "view" as well to test "rewrite resjunk" behaviour UPDATE large SET id=36 FROM small_view WHERE small_view.id=large.id RETURNING large.id, large.tenant_id, small_view.tenant_id; id | tenant_id | tenant_id -----+-----------+----------- +--------------------------------------------------------------------- 36 | 14 | 14 (1 row) SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1234,7 +1234,7 @@ SELECT * FROM large ORDER BY 1, 2; -- below statement should not update anything. so it should return empty UPDATE large SET id=46 FROM small_view WHERE small_view.id=large.id and large.id=15 RETURNING large.id, large.tenant_id; id | tenant_id -----+----------- +--------------------------------------------------------------------- (0 rows) -- we should still have identical rows for next test statements, then insert a new row to large table @@ -1243,7 +1243,7 @@ INSERT INTO large VALUES(14, 14); DELETE FROM large WHERE id in (SELECT id FROM small_view); SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 @@ -1262,7 +1262,7 @@ WITH all_small_view_ids AS (SELECT id FROM small_view) DELETE FROM large WHERE id in (SELECT * FROM all_small_view_ids); SELECT * FROM large ORDER BY 1, 2; id | tenant_id -----+----------- +--------------------------------------------------------------------- 27 | 2 27 | 3 27 | 4 diff --git a/src/test/regress/expected/multi_working_columns.out b/src/test/regress/expected/multi_working_columns.out index 38c05af49..01ee93857 100644 --- a/src/test/regress/expected/multi_working_columns.out +++ b/src/test/regress/expected/multi_working_columns.out @@ -7,7 +7,7 @@ -- grouping. SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; l_quantity ------------- +--------------------------------------------------------------------- 38.00 13.00 15.00 @@ -34,7 +34,7 @@ SELECT l_quantity, count(*) as count FROM lineitem GROUP BY l_quantity, l_shipdate ORDER BY l_quantity, count LIMIT 20; l_quantity | count -------------+------- +--------------------------------------------------------------------- 1.00 | 1 1.00 | 1 1.00 | 1 @@ -61,7 +61,7 @@ SELECT l_quantity, l_shipdate, count(*) as count FROM lineitem GROUP BY l_quantity, l_shipdate ORDER BY l_quantity, count, l_shipdate LIMIT 20; l_quantity | l_shipdate | count -------------+------------+------- +--------------------------------------------------------------------- 1.00 | 02-07-1992 | 1 1.00 | 02-23-1992 | 1 1.00 | 03-17-1992 | 1 diff --git a/src/test/regress/expected/mx_foreign_key_to_reference_table.out b/src/test/regress/expected/mx_foreign_key_to_reference_table.out index a1d8d51ae..7abc9a1ad 100644 --- a/src/test/regress/expected/mx_foreign_key_to_reference_table.out +++ b/src/test/regress/expected/mx_foreign_key_to_reference_table.out @@ -31,26 +31,26 @@ ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCE ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('referenced_table2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SET search_path TO 'fkey_reference_table'; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1, 2; name | relid | refd_relid ------------------------+------------------------------------------------+------------------------------------------------ +--------------------------------------------------------------------- fkey_ref | fkey_reference_table.referencing_table | fkey_reference_table.referenced_table fkey_ref | fkey_reference_table.referencing_table | fkey_reference_table.referenced_table fkey_ref_7000002 | fkey_reference_table.referencing_table_7000002 | fkey_reference_table.referenced_table_7000000 diff --git a/src/test/regress/expected/non_colocated_join_order.out b/src/test/regress/expected/non_colocated_join_order.out index ba28a26c1..2c96af0c8 100644 --- a/src/test/regress/expected/non_colocated_join_order.out +++ b/src/test/regress/expected/non_colocated_join_order.out @@ -5,7 +5,7 @@ CREATE TABLE test_table_1(id int, value_1 int); SELECT master_create_distributed_table('test_table_1', 'id', 'append'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) @@ -14,7 +14,7 @@ SELECT master_create_distributed_table('test_table_1', 'id', 'append'); CREATE TABLE test_table_2(id int, value_1 int); SELECT master_create_distributed_table('test_table_2', 'id', 'append'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) @@ -27,7 +27,7 @@ SET client_min_messages to DEBUG1; SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id; LOG: join order: [ "test_table_1" ][ local partition join "test_table_2" ] count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -43,7 +43,7 @@ SET citus.enable_repartition_joins to ON; SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id; LOG: join order: [ "test_table_1" ][ single range partition join "test_table_2" ] count -------- +--------------------------------------------------------------------- 9 (1 row) diff --git a/src/test/regress/expected/non_colocated_leaf_subquery_joins.out b/src/test/regress/expected/non_colocated_leaf_subquery_joins.out index 298093b9d..4373ea227 100644 --- a/src/test/regress/expected/non_colocated_leaf_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_leaf_subquery_joins.out @@ -24,7 +24,7 @@ END; $BODY$ LANGUAGE plpgsql; SHOW log_error_verbosity; log_error_verbosity ---------------------- +--------------------------------------------------------------------- terse (1 row) @@ -39,7 +39,7 @@ WHERE DEBUG: generating subplan 1_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -55,7 +55,7 @@ DEBUG: generating subplan 3_1 for subquery SELECT users_table.user_id, random() DEBUG: generating subplan 3_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -76,7 +76,7 @@ WHERE DEBUG: generating subplan 6_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -93,7 +93,7 @@ DEBUG: generating subplan 8_1 for CTE q1: SELECT user_id FROM public.users_tabl DEBUG: generating subplan 8_2 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -105,7 +105,7 @@ DEBUG: generating subplan 11_1 for subquery SELECT users_table.user_id FROM pub DEBUG: generating subplan 11_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -145,7 +145,7 @@ DEBUG: generating subplan 14_4 for subquery SELECT DISTINCT ON ((e.event_type): DEBUG: generating subplan 14_5 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('14_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('14_5'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event valid -------- +--------------------------------------------------------------------- t (1 row) @@ -161,7 +161,7 @@ WHERE DEBUG: generating subplan 20_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.value_1) valid -------- +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/non_colocated_subquery_joins.out b/src/test/regress/expected/non_colocated_subquery_joins.out index 15fdc52ac..3382f9013 100644 --- a/src/test/regress/expected/non_colocated_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_subquery_joins.out @@ -44,7 +44,7 @@ $$); DEBUG: generating subplan 3_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT foo.value_2 FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -64,7 +64,7 @@ $$); DEBUG: generating subplan 5_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 100) DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -83,7 +83,7 @@ $$); DEBUG: generating subplan 7_1 for subquery SELECT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) 2) DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -104,7 +104,7 @@ $$); DEBUG: generating subplan 9_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -124,7 +124,7 @@ $$); DEBUG: generating subplan 11_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT events_table.user_id FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.<) 10)))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -145,7 +145,7 @@ DEBUG: generating subplan 13_1 for subquery SELECT (users_table.user_id OPERATO DEBUG: generating subplan 13_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 10) DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (NOT (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -166,7 +166,7 @@ DEBUG: generating subplan 16_1 for subquery SELECT users_table.user_id, events_ DEBUG: generating subplan 16_2 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 4) DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -192,7 +192,7 @@ DEBUG: generating subplan 19_3 for subquery SELECT event_type FROM public.event DEBUG: generating subplan 19_4 for subquery SELECT foo.user_id, random() AS random FROM (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('19_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('19_3'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT foo_top.user_id, foo_top.random, events_table.user_id FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('19_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo_top, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) foo_top.user_id) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -223,7 +223,7 @@ $$); DEBUG: generating subplan 24_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top valid -------- +--------------------------------------------------------------------- t (1 row) @@ -254,7 +254,7 @@ $$); DEBUG: generating subplan 26_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.user_id))) foo_top valid -------- +--------------------------------------------------------------------- t (1 row) @@ -284,7 +284,7 @@ DEBUG: generating subplan 28_1 for subquery SELECT users_table.user_id, users_t DEBUG: generating subplan 28_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('28_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top valid -------- +--------------------------------------------------------------------- t (1 row) @@ -315,7 +315,7 @@ DEBUG: generating subplan 31_1 for subquery SELECT users_table.user_id, users_t DEBUG: generating subplan 31_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('31_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo2.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top valid -------- +--------------------------------------------------------------------- t (1 row) @@ -348,7 +348,7 @@ DEBUG: generating subplan 34_1 for subquery SELECT users_table.user_id FROM pub DEBUG: generating subplan 34_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) bar_top ON ((foo_top.user_id OPERATOR(pg_catalog.=) bar_top.user_id))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -381,7 +381,7 @@ $$); DEBUG: generating subplan 37_1 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) DEBUG: Plan 37 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('37_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -413,7 +413,7 @@ DEBUG: generating subplan 39_1 for subquery SELECT DISTINCT users_table.user_id DEBUG: generating subplan 39_2 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -433,7 +433,7 @@ $$); DEBUG: generating subplan 42_1 for subquery SELECT events_table.user_id AS my_users FROM public.events_table, public.users_table WHERE (events_table.event_type OPERATOR(pg_catalog.=) users_table.user_id) DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid -------- +--------------------------------------------------------------------- t (1 row) @@ -452,7 +452,7 @@ $$); DEBUG: generating subplan 44_1 for subquery SELECT events_table.event_type AS my_users, random() AS random FROM public.events_table, public.users_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id) DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users, intermediate_result.random FROM read_intermediate_result('44_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer, random double precision)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid -------- +--------------------------------------------------------------------- t (1 row) @@ -475,7 +475,7 @@ DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: generating subplan 46_1 for subquery SELECT events_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) DEBUG: Plan 46 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('46_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) selected_users WHERE (events_table.event_type OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid -------- +--------------------------------------------------------------------- t (1 row) @@ -508,7 +508,7 @@ $$); DEBUG: generating subplan 48_1 for subquery SELECT value_2 FROM public.events_table DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT events_table_1.user_id FROM public.users_table users_table_1, public.events_table events_table_1 WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table_1.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('48_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))))) selected_users WHERE (events_table.user_id OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid -------- +--------------------------------------------------------------------- t (1 row) @@ -529,7 +529,7 @@ WHERE DEBUG: generating subplan 50_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) DEBUG: Plan 50 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('50_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -546,7 +546,7 @@ DEBUG: generating subplan 52_1 for CTE q1: SELECT user_id FROM public.users_tab DEBUG: generating subplan 52_2 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 52 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('52_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('52_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -562,7 +562,7 @@ SELECT true AS valid FROM explain_json_2($$ DEBUG: generating subplan 55_1 for CTE q1: SELECT user_id FROM public.users_table DEBUG: Plan 55 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('55_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -574,7 +574,7 @@ DEBUG: generating subplan 57_1 for subquery SELECT users_table.user_id FROM pub DEBUG: generating subplan 57_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 57 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('57_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('57_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -614,7 +614,7 @@ DEBUG: generating subplan 60_4 for subquery SELECT DISTINCT ON ((e.event_type): DEBUG: generating subplan 60_5 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('60_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event DEBUG: Plan 60 query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('60_5'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event valid -------- +--------------------------------------------------------------------- t (1 row) @@ -642,7 +642,7 @@ $$); DEBUG: generating subplan 68_1 for subquery SELECT u1.value_1, u1.user_id, u1."time", u1.value_2, u1.value_3, u1.value_4, u2.user_id, u2."time", u2.value_2, u2.value_3, u2.value_4 FROM (public.users_table u1 JOIN public.users_table u2 USING (value_1)) DEBUG: Plan 68 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_1, intermediate_result.user_id, intermediate_result."time", intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.user_id_1 AS user_id, intermediate_result.time_1 AS "time", intermediate_result.value_2_1 AS value_2, intermediate_result.value_3_1 AS value_3, intermediate_result.value_4_1 AS value_4 FROM read_intermediate_result('68_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, user_id integer, "time" timestamp without time zone, value_2 integer, value_3 double precision, value_4 bigint, user_id_1 integer, time_1 timestamp without time zone, value_2_1 integer, value_3_1 double precision, value_4_1 bigint)) a(value_1, user_id, "time", value_2, value_3, value_4, user_id_1, time_1, value_2_1, value_3_1, value_4_1) JOIN (SELECT users_table.value_1, random() AS random FROM public.users_table) u3 USING (value_1)) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -661,7 +661,7 @@ $$); DEBUG: generating subplan 70_1 for subquery SELECT value_2, random() AS random FROM public.users_table DEBUG: Plan 70 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('70_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u1 JOIN public.events_table USING (value_2)) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -679,7 +679,7 @@ $$); DEBUG: generating subplan 72_1 for subquery SELECT value_2, random() AS random FROM public.users_table DEBUG: Plan 72 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT users_table.value_2, random() AS random FROM public.users_table) u1 LEFT JOIN (SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('72_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u2 USING (value_2)) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -721,7 +721,7 @@ DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: generating subplan 76_1 for subquery SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table DEBUG: Plan 76 query after replacing subqueries and CTEs: SELECT a.user_id, foo.value_1 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('76_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a JOIN (SELECT users_table.value_1 FROM public.users_table) foo ON ((a.user_id OPERATOR(pg_catalog.=) foo.value_1))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -745,7 +745,7 @@ DEBUG: Plan 81 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: generating subplan 80_1 for subquery SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table DEBUG: Plan 80 query after replacing subqueries and CTEs: SELECT a.user_id, foo.user_id, foo."time", foo.value_1, foo.value_2, foo.value_3, foo.value_4 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('80_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a JOIN public.users_table foo ON ((a.user_id OPERATOR(pg_catalog.=) foo.value_1))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -775,7 +775,7 @@ $$); DEBUG: generating subplan 84_1 for subquery SELECT value_1 FROM public.users_table DEBUG: Plan 84 query after replacing subqueries and CTEs: SELECT foo.user_id, a.user_id, bar.value_1 FROM (((SELECT users_table.user_id FROM public.users_table) foo JOIN (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) UNION SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) a ON ((a.user_id OPERATOR(pg_catalog.=) foo.user_id))) JOIN (SELECT intermediate_result.value_1 FROM read_intermediate_result('84_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) bar ON ((foo.user_id OPERATOR(pg_catalog.=) bar.value_1))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -819,7 +819,7 @@ DEBUG: generating subplan 89_1 for subquery SELECT event_type FROM public.event DEBUG: Plan 89 query after replacing subqueries and CTEs: SELECT count(*) AS cnt FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('89_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer))) DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT non_colocated_subquery.value_2, non_colocated_subquery_2.cnt FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('86_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) non_colocated_subquery, (SELECT intermediate_result.cnt FROM read_intermediate_result('86_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) non_colocated_subquery_2 WHERE (non_colocated_subquery.value_2 OPERATOR(pg_catalog.<>) non_colocated_subquery_2.cnt) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -840,7 +840,7 @@ DEBUG: generating subplan 91_1 for subquery SELECT users_table_local.value_2 FR DEBUG: generating subplan 91_2 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12]))) DEBUG: Plan 91 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('91_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('91_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) AND (foo.value_2 OPERATOR(pg_catalog.=) baz.value_2)) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -876,7 +876,7 @@ DEBUG: generating subplan 93_2 for subquery SELECT value_1 FROM public.users_ta DEBUG: generating subplan 93_3 for subquery SELECT value_2 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.<) 2) DEBUG: Plan 93 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT users_table.user_id FROM public.users_table) foo JOIN (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) UNION SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) a ON ((a.user_id OPERATOR(pg_catalog.=) foo.user_id))) JOIN (SELECT intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('93_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, value_2 integer)) bar ON ((foo.user_id OPERATOR(pg_catalog.=) bar.value_1))) WHERE ((bar.value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('93_2'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer))) AND (bar.value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('93_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) AND (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2])))))) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -896,7 +896,7 @@ $$); DEBUG: generating subplan 97_1 for subquery SELECT user_id, value_2 FROM public.events_table DEBUG: Plan 97 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_reference_table users_table_ref, (SELECT users_table.user_id FROM public.users_table) foo, (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('97_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) bar WHERE ((users_table_ref.user_id OPERATOR(pg_catalog.=) foo.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) bar.value_2)) valid -------- +--------------------------------------------------------------------- t (1 row) @@ -1048,13 +1048,13 @@ CREATE TABLE table2_p1 PARTITION OF table2 FOR VALUES FROM (1) TO (10); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('table2','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('table1','tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/partitioned_intermediate_results.out b/src/test/regress/expected/partitioned_intermediate_results.out index 560cba703..4d6c9dbb7 100644 --- a/src/test/regress/expected/partitioned_intermediate_results.out +++ b/src/test/regress/expected/partitioned_intermediate_results.out @@ -8,7 +8,7 @@ SELECT * FROM worker_partition_query_result('squares_hash', '{-2147483648,-1073741824,0,1073741824}'::text[], '{-1073741825,-1,1073741823,2147483647}'::text[], false); partition_index | rows_written | bytes_written ------------------+--------------+--------------- +--------------------------------------------------------------------- 0 | 4 | 21 1 | 3 | 14 2 | 1 | 5 @@ -19,7 +19,7 @@ SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_0', 'text') AS res (x int, x2 int) ORDER BY x; hashint4 | x | x2 --------------+----+----- +--------------------------------------------------------------------- -1905060026 | 1 | 1 -1330264708 | 5 | 25 -2047600124 | 8 | 64 @@ -30,7 +30,7 @@ SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_1', 'text') AS res (x int, x2 int) ORDER BY x; hashint4 | x | x2 --------------+---+---- +--------------------------------------------------------------------- -28094569 | 3 | 9 -1011077333 | 4 | 16 -978793473 | 7 | 49 @@ -40,7 +40,7 @@ SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_2', 'text') AS res (x int, x2 int) ORDER BY x; hashint4 | x | x2 ------------+---+---- +--------------------------------------------------------------------- 566031088 | 6 | 36 (1 row) @@ -48,7 +48,7 @@ SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_3', 'text') AS res (x int, x2 int) ORDER BY x; hashint4 | x | x2 -------------+---+---- +--------------------------------------------------------------------- 1134484726 | 2 | 4 1672378334 | 9 | 81 (2 rows) @@ -64,7 +64,7 @@ SELECT * FROM worker_partition_query_result('squares_range', '{20,40,60,100}'::text[], true /* binary format */); partition_index | rows_written | bytes_written ------------------+--------------+--------------- +--------------------------------------------------------------------- 0 | 4 | 93 1 | 2 | 57 2 | 1 | 39 @@ -75,7 +75,7 @@ SELECT x, x2 FROM read_intermediate_result('squares_range_0', 'binary') AS res (x int, x2 int) ORDER BY x; x | x2 ----+---- +--------------------------------------------------------------------- 1 | 1 2 | 4 3 | 9 @@ -86,7 +86,7 @@ SELECT x, x2 FROM read_intermediate_result('squares_range_1', 'binary') AS res (x int, x2 int) ORDER BY x; x | x2 ----+---- +--------------------------------------------------------------------- 5 | 25 6 | 36 (2 rows) @@ -95,7 +95,7 @@ SELECT x, x2 FROM read_intermediate_result('squares_range_2', 'binary') AS res (x int, x2 int) ORDER BY x; x | x2 ----+---- +--------------------------------------------------------------------- 7 | 49 (1 row) @@ -103,7 +103,7 @@ SELECT x, x2 FROM read_intermediate_result('squares_range_3', 'binary') AS res (x int, x2 int) ORDER BY x; x | x2 -----+----- +--------------------------------------------------------------------- 8 | 64 9 | 81 10 | 100 @@ -117,7 +117,7 @@ SELECT * FROM worker_partition_query_result('doubles_hash', '{-2147483648,-1073741824,0,1073741824}'::text[], '{-1073741825,-1,1073741823,2147483647}'::text[], false); partition_index | rows_written | bytes_written ------------------+--------------+--------------- +--------------------------------------------------------------------- 0 | 250199 | 3586179 1 | 249872 | 3581280 2 | 250278 | 3587487 @@ -129,7 +129,7 @@ SELECT count(*) FROM read_intermediate_results(ARRAY['doubles_hash_0', 'doubles_hash_2', 'doubles_hash_3'], 'text') AS res (x int, x2 int); count ---------- +--------------------------------------------------------------------- 1000000 (1 row) @@ -141,7 +141,7 @@ SELECT * FROM worker_partition_query_result('doubles_range', '{0,250001,500001,750001}'::text[], '{250000,500000,750000,1000000}'::text[], true); partition_index | rows_written | bytes_written ------------------+--------------+--------------- +--------------------------------------------------------------------- 0 | 250000 | 4500021 1 | 250000 | 4500021 2 | 250000 | 4500021 @@ -153,7 +153,7 @@ SELECT count(*) FROM read_intermediate_results(ARRAY['doubles_range_0', 'doubles_range_2', 'doubles_range_3'], 'binary') AS res (x int, x2 int); count ---------- +--------------------------------------------------------------------- 1000000 (1 row) @@ -365,7 +365,7 @@ SET citus.shard_count TO 32; CREATE TABLE t(a int, b int); SELECT create_distributed_table('t', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -379,7 +379,7 @@ SET citus.shard_count TO 1; CREATE TABLE t(a int, b int); SELECT create_distributed_table('t', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -393,7 +393,7 @@ SET citus.shard_count TO 17; CREATE TABLE t(a int, b int); SELECT create_distributed_table('t', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -407,7 +407,7 @@ SET citus.shard_count TO 8; CREATE TABLE t(a DATE, b int); SELECT create_distributed_table('t', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -421,7 +421,7 @@ SET citus.shard_count TO 8; CREATE TABLE t(a int4range, b int); SELECT create_distributed_table('t', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -434,7 +434,7 @@ DROP TABLE t; CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -449,7 +449,7 @@ DROP TABLE t; CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -462,7 +462,7 @@ DROP TABLE t; CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -479,7 +479,7 @@ SET citus.shard_count TO 8; CREATE TABLE t(key composite_key_type, value int); SELECT create_distributed_table('t', 'key', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -495,7 +495,7 @@ DROP TYPE composite_key_type; CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out index 70afe9dc7..e7fa8af82 100644 --- a/src/test/regress/expected/pg12.out +++ b/src/test/regress/expected/pg12.out @@ -36,7 +36,7 @@ insert into gen2 (id, val1) values (1,4),(3,6),(5,2),(7,2); select create_distributed_table('gen1', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -47,7 +47,7 @@ insert into gen1 (id, val1) values (2,4),(4,6),(6,2),(8,2); insert into gen2 (id, val1) values (2,4),(4,6),(6,2),(8,2); select * from gen1 order by 1,2,3; id | val1 | val2 -----+------+------ +--------------------------------------------------------------------- 1 | 4 | 6 2 | 4 | 6 3 | 6 | 8 @@ -60,7 +60,7 @@ select * from gen1 order by 1,2,3; select * from gen2 order by 1,2,3; id | val1 | val2 -----+------+------ +--------------------------------------------------------------------- 1 | 4 | 6 2 | 4 | 6 3 | 6 | 8 @@ -80,7 +80,7 @@ vacuum (index_cleanup 1) gen1; create table cptest (id int, val int); select create_distributed_table('cptest', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -102,13 +102,13 @@ CREATE TABLE single_hash_repartition_first (id int, sum int, avg float); CREATE TABLE single_hash_repartition_second (id int primary key, sum int, avg float); SELECT create_distributed_table('single_hash_repartition_first', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('single_hash_repartition_second', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -132,7 +132,7 @@ FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); coordinator_plan ------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 (2 rows) @@ -151,7 +151,7 @@ FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); coordinator_plan ------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 (2 rows) @@ -171,7 +171,7 @@ FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); coordinator_plan ------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) -> Distributed Subplan 7_1 -> Custom Scan (Citus Adaptive) @@ -194,7 +194,7 @@ FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); coordinator_plan ------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 (2 rows) @@ -215,7 +215,7 @@ FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.sum = 45; $Q$); coordinator_plan ------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) -> Distributed Subplan 10_1 @@ -236,7 +236,7 @@ FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.sum = 45; $Q$); coordinator_plan ------------------------------------------------- +--------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) -> Distributed Subplan 12_1 @@ -271,14 +271,14 @@ DETAIL: Key (key, collection_id)=(1, 1000) is not present in table "collections SELECT create_distributed_table('collections_list', 'key'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('collection_users', 'key'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -296,7 +296,7 @@ INSERT INTO test (x,y) SELECT i,i*3 from generate_series(1, 100) i; SELECT create_distributed_table('test', 'x'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -306,7 +306,7 @@ UPDATE test SET y = 15 WHERE x = 1; COMMIT AND CHAIN; SELECT * FROM test WHERE x = 1; x | y ----+---- +--------------------------------------------------------------------- 1 | 15 (1 row) @@ -316,7 +316,7 @@ UPDATE test SET y = 20 WHERE x = 1; ROLLBACK AND CHAIN; SELECT * FROM test WHERE x = 1; x | y ----+---- +--------------------------------------------------------------------- 1 | 15 (1 row) @@ -327,7 +327,7 @@ UPDATE test SET y = 25; COMMIT AND CHAIN; SELECT DISTINCT y FROM test; y ----- +--------------------------------------------------------------------- 25 (1 row) @@ -337,7 +337,7 @@ UPDATE test SET y = 30; ROLLBACK AND CHAIN; SELECT DISTINCT y FROM test; y ----- +--------------------------------------------------------------------- 25 (1 row) @@ -350,7 +350,7 @@ ERROR: cannot execute UPDATE in a read-only transaction COMMIT; SELECT DISTINCT y FROM test; y ----- +--------------------------------------------------------------------- 25 (1 row) @@ -361,7 +361,7 @@ ERROR: cannot execute UPDATE in a read-only transaction COMMIT; SELECT DISTINCT y FROM test; y ----- +--------------------------------------------------------------------- 25 (1 row) @@ -383,7 +383,7 @@ ERROR: Hash distributed partition columns may not use a non deterministic colla select create_distributed_table('col_test', 'id'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -393,7 +393,7 @@ select count(*) from col_test where val = 'asdf'; count -------- +--------------------------------------------------------------------- 3 (1 row) diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index 4286db521..90f2a01c7 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -7,27 +7,27 @@ CREATE EXTENSION seg; -- make sure that both the schema and the extension is distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test'); count -------- +--------------------------------------------------------------------- 1 (1 row) CREATE TABLE test_table (key int, value seg); SELECT create_distributed_table('test_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- make sure that the table is also distributed now SELECT count(*) from pg_dist_partition where logicalrelid='extension''test.test_table'::regclass; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -35,7 +35,7 @@ CREATE TYPE two_segs AS (seg_1 seg, seg_2 seg); -- verify that the type that depends on the extension is also marked as distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_type WHERE typname = 'two_segs' AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test')); count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -47,20 +47,20 @@ BEGIN; CREATE TABLE dist_table (key int, value public.issn); SELECT create_distributed_table('dist_table', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- we can even run queries (sequentially) over the distributed table SELECT * FROM dist_table; key | value ------+------- +--------------------------------------------------------------------- (0 rows) INSERT INTO dist_table VALUES (1, public.issn('1436-4522')); INSERT INTO dist_table SELECT * FROM dist_table RETURNING *; key | value ------+----------- +--------------------------------------------------------------------- 1 | 1436-4522 (1 row) @@ -68,13 +68,13 @@ COMMIT; -- make sure that the extension is distributed even if we run create extension in a transaction block SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) @@ -83,7 +83,7 @@ CREATE TABLE ref_table (a public.issn); -- now, create a reference table relying on the data types SELECT create_reference_table('ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -95,7 +95,7 @@ RESET client_min_messages; -- before updating the version, ensure the current version SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$); run_command_on_workers -------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1.1) (localhost,57638,t,1.1) (2 rows) @@ -105,7 +105,7 @@ ALTER EXTENSION isn UPDATE TO '1.2'; -- show that ALTER EXTENSION is propagated SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$); run_command_on_workers -------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1.2) (localhost,57638,t,1.2) (2 rows) @@ -113,7 +113,7 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam -- before changing the schema, ensure the current schmea SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$); run_command_on_workers ------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,extension'test) (localhost,57638,t,extension'test) (2 rows) @@ -125,14 +125,14 @@ SET search_path TO public; -- make sure that the extension is distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); count -------- +--------------------------------------------------------------------- 1 (1 row) -- show that the ALTER EXTENSION command is propagated SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$); run_command_on_workers ----------------------------- +--------------------------------------------------------------------- (localhost,57637,t,public) (localhost,57638,t,public) (2 rows) @@ -155,7 +155,7 @@ DROP SCHEMA "extension'test" CASCADE; RESET client_min_messages; SELECT 1 from master_remove_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -164,13 +164,13 @@ CREATE EXTENSION seg; -- show that the extension is created on existing worker SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1) (1 row) SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers -------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1.3) (1 row) @@ -178,7 +178,7 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam CREATE TABLE ref_table_2 (x seg); SELECT create_reference_table('ref_table_2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -186,21 +186,21 @@ SELECT create_reference_table('ref_table_2'); SELECT 1 from master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- show that the extension is created on both existing and new node SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers -------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1.3) (localhost,57638,t,1.3) (2 rows) @@ -208,13 +208,13 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam -- and similarly check for the reference table select count(*) from pg_dist_partition where partmethod='n' and logicalrelid='ref_table_2'::regclass; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='ref_table_2'::regclass; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -227,14 +227,14 @@ ROLLBACK; -- make sure that the extension is not distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); count -------- +--------------------------------------------------------------------- 0 (1 row) -- and the extension does not exist on workers SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -262,7 +262,7 @@ SET client_min_messages TO WARNING; DROP EXTENSION pg_buffercache, isn CASCADE; SELECT count(*) FROM pg_extension WHERE extname IN ('pg_buffercache', 'isn'); count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -274,13 +274,13 @@ SET client_min_messages TO WARNING; DROP EXTENSION seg CASCADE; SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) (2 rows) @@ -290,7 +290,7 @@ RESET client_min_messages; -- make sure that the extension is not avaliable anymore as a distributed object SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn')); count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -302,7 +302,7 @@ BEGIN; CREATE TABLE some_random_table (a int); SELECT create_distributed_table('some_random_table', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -310,7 +310,7 @@ BEGIN; CREATE TABLE some_random_table_2 (a int, b seg); SELECT create_distributed_table('some_random_table_2', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -319,7 +319,7 @@ ROLLBACK; -- block is rollbacked, that's a shortcoming of dependency creation logic SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); run_command_on_workers -------------------------- +--------------------------------------------------------------------- (localhost,57637,t,1.3) (localhost,57638,t,1.3) (2 rows) @@ -335,7 +335,7 @@ RESET client_min_messages; -- remove the node, we'll add back again SELECT 1 from master_remove_node('localhost', :worker_2_port); ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) @@ -350,7 +350,7 @@ BEGIN; CREATE TABLE t2 (a int, b test_type_2, c issn); SELECT create_distributed_table('t2', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -358,7 +358,7 @@ BEGIN; CREATE TABLE t3 (a int, b test_type_3); SELECT create_reference_table('t3'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -367,20 +367,20 @@ COMMIT; SELECT 1 from master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "t3" to the node localhost:xxxxx ?column? ----------- +--------------------------------------------------------------------- 1 (1 row) -- make sure that both extensions are created on both nodes SELECT count(*) FROM citus.pg_dist_object WHERE objid IN (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn')); count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname IN ('seg', 'isn')$$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,2) (localhost,57638,t,2) (2 rows) diff --git a/src/test/regress/expected/propagate_set_commands.out b/src/test/regress/expected/propagate_set_commands.out index 8e34bcd96..4cb711c51 100644 --- a/src/test/regress/expected/propagate_set_commands.out +++ b/src/test/regress/expected/propagate_set_commands.out @@ -3,7 +3,7 @@ SET search_path TO propagate_set_commands; CREATE TABLE test (id int, value int); SELECT create_distributed_table('test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -16,7 +16,7 @@ SET citus.select_opens_transaction_block TO on; BEGIN; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- on (1 row) @@ -29,7 +29,7 @@ BEGIN; SET enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- on (1 row) @@ -39,7 +39,7 @@ BEGIN; SET LOCAL exit_on_error TO on; SELECT current_setting('exit_on_error') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- off (1 row) @@ -49,14 +49,14 @@ BEGIN; SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- off (1 row) -- expand to new node, set should still apply SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; current_setting ------------------ +--------------------------------------------------------------------- off (1 row) @@ -66,7 +66,7 @@ BEGIN; SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- off (1 row) @@ -74,14 +74,14 @@ SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; SET LOCAL enable_hashagg TO DEFAULT; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- on (1 row) -- expand to new node, set to default should still apply SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; current_setting ------------------ +--------------------------------------------------------------------- on (1 row) @@ -91,7 +91,7 @@ BEGIN; SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- off (1 row) @@ -99,7 +99,7 @@ SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; SET enable_hashagg TO DEFAULT; SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; current_setting ------------------ +--------------------------------------------------------------------- off (1 row) @@ -109,7 +109,7 @@ BEGIN; SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- off (1 row) @@ -117,14 +117,14 @@ SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; RESET enable_hashagg; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- on (1 row) -- expand to new node, reset should still apply SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; current_setting ------------------ +--------------------------------------------------------------------- on (1 row) @@ -134,7 +134,7 @@ BEGIN; SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- off (1 row) @@ -143,7 +143,7 @@ RESET ALL; SET search_path = 'propagate_set_commands'; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; current_setting ------------------ +--------------------------------------------------------------------- on (1 row) @@ -151,7 +151,7 @@ SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; current_setting ------------------ +--------------------------------------------------------------------- on (1 row) diff --git a/src/test/regress/expected/recursive_dml_queries_mx.out b/src/test/regress/expected/recursive_dml_queries_mx.out index 4e2aed32b..d627a5ac1 100644 --- a/src/test/regress/expected/recursive_dml_queries_mx.out +++ b/src/test/regress/expected/recursive_dml_queries_mx.out @@ -5,21 +5,21 @@ SET citus.replication_model TO streaming; CREATE TABLE recursive_dml_queries_mx.distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('distributed_table', 'tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE recursive_dml_queries_mx.second_distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE recursive_dml_queries_mx.reference_table (id text, name text); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -136,7 +136,7 @@ WHERE RETURNING distributed_table.*; tenant_id | dept | info ------------+------+------------------------ +--------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) @@ -157,7 +157,7 @@ WHERE RETURNING distributed_table.*; tenant_id | dept | info ------------+------+------------------------ +--------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) diff --git a/src/test/regress/expected/recursive_dml_with_different_planners_executors.out b/src/test/regress/expected/recursive_dml_with_different_planners_executors.out index f1a801af4..35f79d40d 100644 --- a/src/test/regress/expected/recursive_dml_with_different_planners_executors.out +++ b/src/test/regress/expected/recursive_dml_with_different_planners_executors.out @@ -3,21 +3,21 @@ SET search_path TO recursive_dml_with_different_planner_executors, public; CREATE TABLE distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('distributed_table', 'tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE second_distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE reference_table (id text, name text); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/relation_access_tracking.out b/src/test/regress/expected/relation_access_tracking.out index 2a67c3005..89f47cd8e 100644 --- a/src/test/regress/expected/relation_access_tracking.out +++ b/src/test/regress/expected/relation_access_tracking.out @@ -1,6 +1,6 @@ ---- +--------------------------------------------------------------------- --- tests around access tracking within transaction blocks ---- +--------------------------------------------------------------------- CREATE SCHEMA access_tracking; SET search_path TO 'access_tracking'; CREATE OR REPLACE FUNCTION relation_select_access_mode(relationId Oid) @@ -55,42 +55,42 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE table_1 (key int, value int); SELECT create_distributed_table('table_1', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table_2 (key int, value int); SELECT create_distributed_table('table_2', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table_3 (key int, value int); SELECT create_distributed_table('table_3', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table_4 (key int, value int); SELECT create_distributed_table('table_4', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table_5 (key int, value int); SELECT create_distributed_table('table_5', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE table_6 (key int, value int); SELECT create_reference_Table('table_6'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -105,13 +105,13 @@ BEGIN; CREATE TABLE table_7 (key int, value int); SELECT create_distributed_table('table_7', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_7') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------- +--------------------------------------------------------------------- table_7 | not_parallel_accessed | not_parallel_accessed | parallel_access (1 row) @@ -119,13 +119,13 @@ COMMIT; -- outisde the transaction blocks, the function always returns zero SELECT count(*) FROM table_1; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -134,45 +134,45 @@ SELECT * FROM relation_acesses WHERE table_name = 'table_1'; BEGIN; SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) SELECT count(*) FROM table_1 WHERE key = 1; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) SELECT count(*) FROM table_1 WHERE key = 1 OR key = 2; count -------- +--------------------------------------------------------------------- 2 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (1,1); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (1,1), (2,2); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) @@ -180,7 +180,7 @@ BEGIN; -- now see that the other tables are not accessed at all SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | parallel_access (1 row) @@ -190,39 +190,39 @@ ROLLBACK; BEGIN; SELECT count(*) FROM table_1 WHERE key = 1; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) SELECT count(*) FROM table_1 WHERE key = 2; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (1,1); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (2,2); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -232,7 +232,7 @@ BEGIN; ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access (1 row) @@ -248,13 +248,13 @@ BEGIN; table_3.key = table_4.key AND table_4.key = table_5.key AND table_1.key = 1; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name LIKE 'table_%' ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_3 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -274,13 +274,13 @@ BEGIN; WHERE table_1.key = table_2.key; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -297,13 +297,13 @@ BEGIN; WHERE table_1.key = table_2.key; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -324,13 +324,13 @@ BEGIN; table_3.key = table_4.key AND table_4.key = table_5.key ) as foo; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name LIKE 'table_%' ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed table_3 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -348,7 +348,7 @@ BEGIN; UPDATE table_1 SET value = 15; SELECT * FROM relation_acesses WHERE table_name = 'table_1'; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed (1 row) @@ -356,7 +356,7 @@ BEGIN; UPDATE table_2 SET value = 15; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -369,7 +369,7 @@ BEGIN; WHERE key IN (SELECT key FROM table_2 JOIN table_3 USING (key) WHERE table_2.value = 15); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed table_3 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -381,7 +381,7 @@ BEGIN; INSERT INTO table_2 SELECT * FROM table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | parallel_access | not_parallel_accessed (2 rows) @@ -393,7 +393,7 @@ BEGIN; INSERT INTO table_2 SELECT * FROM table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -404,7 +404,7 @@ BEGIN; INSERT INTO table_2 SELECT * FROM table_1 OFFSET 0; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | parallel_access | not_parallel_accessed (2 rows) @@ -425,13 +425,13 @@ BEGIN; OFFSET 0 ) as foo; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -454,7 +454,7 @@ BEGIN; ) as foo; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed table_3 | not_parallel_accessed | parallel_access | not_parallel_accessed @@ -480,7 +480,7 @@ BEGIN; ) as foo; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_3 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -506,7 +506,7 @@ BEGIN; ) AND value IN (SELECT key FROM table_4); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3', 'table_4') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed table_3 | parallel_access | parallel_access | not_parallel_accessed @@ -522,7 +522,7 @@ BEGIN; 3 3 SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) @@ -532,7 +532,7 @@ BEGIN; COPY table_1 FROM STDIN WITH CSV; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | parallel_access | not_parallel_accessed (1 row) @@ -542,7 +542,7 @@ BEGIN; COPY table_1 FROM STDIN WITH CSV; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -551,27 +551,27 @@ ROLLBACK; BEGIN; SELECT count(*) FROM table_6; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); table_name | select_access | dml_access | ddl_access -------------+------------------------+--------------+-------------- +--------------------------------------------------------------------- table_6 | reference_table_access | not_accessed | not_accessed (1 row) UPDATE table_6 SET value = 15; SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); table_name | select_access | dml_access | ddl_access -------------+------------------------+------------------------+-------------- +--------------------------------------------------------------------- table_6 | reference_table_access | reference_table_access | not_accessed (1 row) ALTER TABLE table_6 ADD COLUMN x INT; SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); table_name | select_access | dml_access | ddl_access -------------+------------------------+------------------------+------------------------ +--------------------------------------------------------------------- table_6 | reference_table_access | reference_table_access | reference_table_access (1 row) @@ -580,13 +580,13 @@ ROLLBACK; BEGIN; SELECT count(*) FROM table_1 JOIN table_6 USING(key); count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_6', 'table_1') ORDER BY 1,2; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_6 | parallel_access | not_accessed | not_accessed (2 rows) @@ -597,7 +597,7 @@ BEGIN; TRUNCATE table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access (1 row) @@ -608,7 +608,7 @@ BEGIN; TRUNCATE table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -618,7 +618,7 @@ BEGIN; TRUNCATE table_6; SELECT * FROM relation_acesses WHERE table_name IN ('table_6') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+------------------------ +--------------------------------------------------------------------- table_6 | not_accessed | not_accessed | reference_table_access (1 row) @@ -629,7 +629,7 @@ BEGIN; ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access table_2 | not_parallel_accessed | not_parallel_accessed | parallel_access (2 rows) @@ -642,7 +642,7 @@ BEGIN; ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (2 rows) @@ -651,7 +651,7 @@ ROLLBACK; CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_test', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -660,7 +660,7 @@ BEGIN; CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access (2 rows) @@ -672,7 +672,7 @@ BEGIN; ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access (2 rows) @@ -682,7 +682,7 @@ COMMIT; CREATE TABLE partitioning_test_2010 AS SELECT * FROM partitioning_test; SELECT create_distributed_table('partitioning_test_2010', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -690,7 +690,7 @@ BEGIN; ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2010 FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | parallel_access (2 rows) @@ -700,13 +700,13 @@ COMMIT; BEGIN; SELECT count(*) FROM partitioning_test; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- partitioning_test | parallel_access | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | parallel_access | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -718,13 +718,13 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT count(*) FROM partitioning_test; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -736,7 +736,7 @@ BEGIN; UPDATE partitioning_test SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------+-----------------+----------------------- +--------------------------------------------------------------------- partitioning_test | parallel_access | parallel_access | not_parallel_accessed partitioning_test_2009 | parallel_access | parallel_access | not_parallel_accessed partitioning_test_2010 | parallel_access | parallel_access | not_parallel_accessed @@ -749,7 +749,7 @@ BEGIN; UPDATE partitioning_test SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -761,7 +761,7 @@ BEGIN; ALTER TABLE partitioning_test ADD COLUMN X INT; SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | parallel_access @@ -774,7 +774,7 @@ BEGIN; ALTER TABLE partitioning_test ADD COLUMN X INT; SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -785,13 +785,13 @@ ROLLBACK; BEGIN; SELECT count(*) FROM partitioning_test_2009; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- partitioning_test | parallel_access | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | parallel_access | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -803,13 +803,13 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT count(*) FROM partitioning_test_2009; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -821,7 +821,7 @@ BEGIN; UPDATE partitioning_test_2009 SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- partitioning_test | parallel_access | parallel_access | not_parallel_accessed partitioning_test_2009 | parallel_access | parallel_access | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -834,7 +834,7 @@ BEGIN; UPDATE partitioning_test_2009 SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -846,7 +846,7 @@ BEGIN; CREATE INDEX i1000000 ON partitioning_test_2009 (id); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -859,7 +859,7 @@ BEGIN; CREATE INDEX i1000000 ON partitioning_test_2009 (id); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -873,7 +873,7 @@ BEGIN; NOTICE: truncate cascades to table "table_2" SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access table_2 | not_parallel_accessed | not_parallel_accessed | parallel_access (2 rows) @@ -884,13 +884,13 @@ BEGIN; WITH cte AS (SELECT count(*) FROM table_1) SELECT * FROM cte; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) @@ -901,13 +901,13 @@ BEGIN; WITH cte AS (SELECT count(*) FROM table_1) SELECT * FROM cte; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -917,7 +917,7 @@ BEGIN; WITH cte_1 AS (INSERT INTO table_1 VALUES (1000,1000), (1001, 1001), (1002, 1002) RETURNING *) SELECT * FROM cte_1 ORDER BY 1; key | value -------+------- +--------------------------------------------------------------------- 1000 | 1000 1001 | 1001 1002 | 1002 @@ -925,7 +925,7 @@ BEGIN; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------------+----------------------- +--------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -935,13 +935,13 @@ BEGIN; WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) SELECT count(*) FROM cte_1 ORDER BY 1; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed (1 row) @@ -951,13 +951,13 @@ BEGIN; WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) SELECT count(*) FROM cte_1 ORDER BY 1; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+----------------------- +--------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed (1 row) @@ -971,13 +971,13 @@ BEGIN; SELECT create_distributed_table('table_3', 'key'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_3') ORDER BY 1; table_name | select_access | dml_access | ddl_access -------------+-----------------------+-----------------+----------------- +--------------------------------------------------------------------- table_3 | not_parallel_accessed | parallel_access | parallel_access (1 row) diff --git a/src/test/regress/expected/remove_coordinator.out b/src/test/regress/expected/remove_coordinator.out index f06ad8c6b..a863c6530 100644 --- a/src/test/regress/expected/remove_coordinator.out +++ b/src/test/regress/expected/remove_coordinator.out @@ -1,7 +1,7 @@ -- removing coordinator from pg_dist_node should update pg_dist_colocation SELECT master_remove_node('localhost', :master_port); master_remove_node --------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index 23da69315..a8ed2b3fe 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -13,7 +13,7 @@ SET citus.log_local_commands TO ON; CREATE TABLE squares(a int, b int); SELECT create_reference_table('squares'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -22,7 +22,7 @@ INSERT INTO squares SELECT i, i * i FROM generate_series(1, 10) i; SELECT count(*) FROM squares; LOG: executing the command locally: SELECT count(*) AS count FROM replicate_ref_to_coordinator.squares_8000000 squares count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -30,7 +30,7 @@ LOG: executing the command locally: SELECT count(*) AS count FROM replicate_ref CREATE TABLE numbers(a int); SELECT create_reference_table('numbers'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -40,7 +40,7 @@ LOG: executing the command locally: INSERT INTO replicate_ref_to_coordinator.nu BEGIN; EXPLAIN INSERT INTO squares SELECT a, a*a FROM numbers; QUERY PLAN ------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All @@ -53,7 +53,7 @@ EXPLAIN INSERT INTO squares SELECT a, a*a FROM numbers; INSERT INTO squares SELECT a, a*a FROM numbers; SELECT * FROM squares WHERE a >= 20 ORDER BY a; a | b -----+----- +--------------------------------------------------------------------- 20 | 400 21 | 441 (2 rows) @@ -62,7 +62,7 @@ ROLLBACK; BEGIN; EXPLAIN INSERT INTO numbers SELECT a FROM squares WHERE a < 3; QUERY PLAN ----------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All @@ -76,7 +76,7 @@ EXPLAIN INSERT INTO numbers SELECT a FROM squares WHERE a < 3; INSERT INTO numbers SELECT a FROM squares WHERE a < 3; SELECT * FROM numbers ORDER BY a; a ----- +--------------------------------------------------------------------- 1 2 20 @@ -87,7 +87,7 @@ ROLLBACK; -- Make sure we hide shard tables ... SELECT citus_table_is_visible('numbers_8000001'::regclass::oid); citus_table_is_visible ------------------------- +--------------------------------------------------------------------- f (1 row) @@ -96,7 +96,7 @@ CREATE TABLE local_table(a int); INSERT INTO local_table VALUES (2), (4), (7), (20); EXPLAIN SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers; QUERY PLAN -------------------------------------------------------------------------------- +--------------------------------------------------------------------- Merge Join (cost=359.57..860.00 rows=32512 width=8) Merge Cond: (local_table.a = numbers_8000001.a) -> Sort (cost=179.78..186.16 rows=2550 width=4) @@ -109,7 +109,7 @@ EXPLAIN SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers; SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1; a | a -----+---- +--------------------------------------------------------------------- 20 | 20 (1 row) @@ -119,7 +119,7 @@ FROM local_table lt JOIN squares sq ON sq.a > lt.a and sq.b > 90 ORDER BY 1,2,3; a | a | b ----+----+----- +--------------------------------------------------------------------- 2 | 10 | 100 4 | 10 | 100 7 | 10 | 100 @@ -159,14 +159,14 @@ CONTEXT: SQL statement "SELECT local_table.a, numbers.a FROM local_table NATURA PL/pgSQL function test_reference_local_join_plpgsql_func() line 5 at PERFORM SELECT sum(a) FROM local_table; sum ------ +--------------------------------------------------------------------- 33 (1 row) SELECT sum(a) FROM numbers; LOG: executing the command locally: SELECT sum(a) AS sum FROM replicate_ref_to_coordinator.numbers_8000001 numbers sum ------ +--------------------------------------------------------------------- 41 (1 row) @@ -182,7 +182,7 @@ CREATE SCHEMA s1; CREATE TABLE s1.ref(a int); SELECT create_reference_table('s1.ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -239,14 +239,14 @@ WITH t AS (SELECT *, random() x FROM numbers) SELECT * FROM numbers, local_table WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a); a | a ----+--- +--------------------------------------------------------------------- (0 rows) -- shouldn't plan locally even if distributed table is in CTE or subquery CREATE TABLE dist(a int); SELECT create_distributed_table('dist', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -275,7 +275,7 @@ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; $Q$); coordinator_plan ------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 (2 rows) @@ -286,7 +286,7 @@ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN local_table_v ON squares.a = local_table_v.a; $Q$); coordinator_plan ------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) -> Distributed Subplan 24_1 -> Seq Scan on local_table @@ -307,7 +307,7 @@ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; $Q$); plan_is_distributed ---------------------- +--------------------------------------------------------------------- f (1 row) @@ -329,7 +329,7 @@ EXPLAIN (COSTS FALSE) SELECT abs(a.a) FROM local_table a, numbers b WHERE a.a = b.a; $Q$); plan_is_distributed ---------------------- +--------------------------------------------------------------------- f (1 row) @@ -338,7 +338,7 @@ EXPLAIN (COSTS FALSE) SELECT a.a FROM local_table a, numbers b WHERE a.a = b.a ORDER BY abs(a.a); $Q$); plan_is_distributed ---------------------- +--------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/replicated_partitioned_table.out b/src/test/regress/expected/replicated_partitioned_table.out index fb6c5eb16..5df222dc8 100644 --- a/src/test/regress/expected/replicated_partitioned_table.out +++ b/src/test/regress/expected/replicated_partitioned_table.out @@ -29,7 +29,7 @@ SELECT create_distributed_table('collections', 'key'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -45,7 +45,7 @@ NOTICE: Copying data from local table... CREATE TABLE collections_5 AS SELECT * FROM collections LIMIT 0; SELECT create_distributed_table('collections_5', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -65,7 +65,7 @@ GROUP BY ORDER BY 1,2; logicalrelid | placement_count ----------------+----------------- +--------------------------------------------------------------------- collections | 8 collections_1 | 8 collections_2 | 8 @@ -82,7 +82,7 @@ FROM WHERE logicalrelid::text LIKE '%collections%'; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -146,7 +146,7 @@ HINT: Run the query on the parent table "collections" instead. CREATE TABLE fkey_test (key bigint PRIMARY KEY); SELECT create_distributed_table('fkey_test', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -169,13 +169,13 @@ ALTER TABLE collections ATTACH PARTITION collections_6 FOR VALUES IN ( 6 ); -- read queries works just fine SELECT count(*) FROM collections_1 WHERE key = 1; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM collections_1 WHERE key != 1; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -186,7 +186,7 @@ CREATE TABLE collections_agg ( ); SELECT create_distributed_table('collections_agg', 'key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -213,7 +213,7 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('customer_engagements', 'id', 'hash'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -246,14 +246,14 @@ ROLLBACK; BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) ALTER TABLE customer_engagements ADD COLUMN value float DEFAULT 1.0; SELECT * FROM customer_engagements ORDER BY 1,2,3; id | event_id | value -----+----------+------- +--------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 2 | 1 | 1 @@ -264,14 +264,14 @@ ROLLBACK; BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ------------------------------ +--------------------------------------------------------------------- (1 row) INSERT INTO customer_engagements VALUES (1, 1); SELECT count(*) FROM customer_engagements; count -------- +--------------------------------------------------------------------- 5 (1 row) diff --git a/src/test/regress/expected/row_types.out b/src/test/regress/expected/row_types.out index 29d59d950..51a23c9ac 100644 --- a/src/test/regress/expected/row_types.out +++ b/src/test/regress/expected/row_types.out @@ -4,7 +4,7 @@ SET search_path TO row_types; CREATE TABLE test (x int, y int); SELECT create_distributed_table('test','x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -16,7 +16,7 @@ END; $$ language plpgsql; SELECT create_distributed_function('table_returner(int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -30,7 +30,7 @@ END; $$ language plpgsql; SELECT create_distributed_function('record_returner(int)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -43,7 +43,7 @@ END; $$ language plpgsql; SELECT create_distributed_function('identity_returner(anyelement)'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -51,7 +51,7 @@ INSERT INTO test VALUES (1,2), (1,3), (2,2), (2,3); -- multi-shard queries support row types SELECT (x,y) FROM test ORDER BY x, y; row -------- +--------------------------------------------------------------------- (1,2) (1,3) (2,2) @@ -60,7 +60,7 @@ SELECT (x,y) FROM test ORDER BY x, y; SELECT (x,y) FROM test GROUP BY x, y ORDER BY x, y; row -------- +--------------------------------------------------------------------- (1,2) (1,3) (2,2) @@ -69,7 +69,7 @@ SELECT (x,y) FROM test GROUP BY x, y ORDER BY x, y; SELECT ARRAY[NULL,(x,(y,x)),NULL,(y,(x,y))] FROM test ORDER BY x, y; array ---------------------------------------------- +--------------------------------------------------------------------- {NULL,"(1,\"(2,1)\")",NULL,"(2,\"(1,2)\")"} {NULL,"(1,\"(3,1)\")",NULL,"(3,\"(1,3)\")"} {NULL,"(2,\"(2,2)\")",NULL,"(2,\"(2,2)\")"} @@ -78,7 +78,7 @@ SELECT ARRAY[NULL,(x,(y,x)),NULL,(y,(x,y))] FROM test ORDER BY x, y; SELECT ARRAY[[(x,(y,x))],[(x,(x,y))]] FROM test ORDER BY x, y; array ---------------------------------------- +--------------------------------------------------------------------- {{"(1,\"(2,1)\")"},{"(1,\"(1,2)\")"}} {{"(1,\"(3,1)\")"},{"(1,\"(1,3)\")"}} {{"(2,\"(2,2)\")"},{"(2,\"(2,2)\")"}} @@ -87,7 +87,7 @@ SELECT ARRAY[[(x,(y,x))],[(x,(x,y))]] FROM test ORDER BY x, y; select distinct (x,y) AS foo, x, y FROM test ORDER BY x, y; foo | x | y --------+---+--- +--------------------------------------------------------------------- (1,2) | 1 | 2 (1,3) | 1 | 3 (2,2) | 2 | 2 @@ -96,7 +96,7 @@ select distinct (x,y) AS foo, x, y FROM test ORDER BY x, y; SELECT table_returner(x) FROM test ORDER BY x, y; table_returner ----------------- +--------------------------------------------------------------------- (1,1) (1,1) (2,2) @@ -105,7 +105,7 @@ SELECT table_returner(x) FROM test ORDER BY x, y; SELECT record_returner(x) FROM test ORDER BY x, y; record_returner ------------------ +--------------------------------------------------------------------- (2,returned) (2,returned) (3,returned) @@ -114,7 +114,7 @@ SELECT record_returner(x) FROM test ORDER BY x, y; SELECT NULLIF((x, y), (y, x)) FROM test ORDER BY x, y; nullif --------- +--------------------------------------------------------------------- (1,2) (1,3) @@ -123,7 +123,7 @@ SELECT NULLIF((x, y), (y, x)) FROM test ORDER BY x, y; SELECT LEAST((x, y), (y, x)) FROM test ORDER BY x, y; least -------- +--------------------------------------------------------------------- (1,2) (1,3) (2,2) @@ -132,7 +132,7 @@ SELECT LEAST((x, y), (y, x)) FROM test ORDER BY x, y; SELECT GREATEST((x, y), (y, x)) FROM test ORDER BY x, y; greatest ----------- +--------------------------------------------------------------------- (2,1) (3,1) (2,2) @@ -141,7 +141,7 @@ SELECT GREATEST((x, y), (y, x)) FROM test ORDER BY x, y; SELECT COALESCE(NULL, (x, y), (y, x)) FROM test ORDER BY x, y; coalesce ----------- +--------------------------------------------------------------------- (1,2) (1,3) (2,2) @@ -150,7 +150,7 @@ SELECT COALESCE(NULL, (x, y), (y, x)) FROM test ORDER BY x, y; SELECT CASE x WHEN 2 THEN (x, y) ELSE (y, x) END FROM test ORDER BY x, y; row -------- +--------------------------------------------------------------------- (2,1) (3,1) (2,2) @@ -159,7 +159,7 @@ SELECT CASE x WHEN 2 THEN (x, y) ELSE (y, x) END FROM test ORDER BY x, y; SELECT CASE x WHEN 2 THEN (x, y) END FROM test ORDER BY x, y; case -------- +--------------------------------------------------------------------- (2,2) @@ -181,91 +181,91 @@ ERROR: input of anonymous composite types is not implemented -- router queries support row types SELECT (x,y) FROM test WHERE x = 1 ORDER BY x, y; row -------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT (x,y) AS foo FROM test WHERE x = 1 ORDER BY x, y; foo -------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT ARRAY[NULL,(x,(y,x)),NULL,(y,(x,y))] FROM test WHERE x = 1 ORDER BY x, y; array ---------------------------------------------- +--------------------------------------------------------------------- {NULL,"(1,\"(2,1)\")",NULL,"(2,\"(1,2)\")"} {NULL,"(1,\"(3,1)\")",NULL,"(3,\"(1,3)\")"} (2 rows) SELECT ARRAY[[(x,(y,x))],[(x,(x,y))]] FROM test WHERE x = 1 ORDER BY x, y; array ---------------------------------------- +--------------------------------------------------------------------- {{"(1,\"(2,1)\")"},{"(1,\"(1,2)\")"}} {{"(1,\"(3,1)\")"},{"(1,\"(1,3)\")"}} (2 rows) select distinct (x,y) AS foo, x, y FROM test WHERE x = 1 ORDER BY x, y; foo | x | y --------+---+--- +--------------------------------------------------------------------- (1,2) | 1 | 2 (1,3) | 1 | 3 (2 rows) SELECT table_returner(x) FROM test WHERE x = 1 ORDER BY x, y; table_returner ----------------- +--------------------------------------------------------------------- (1,1) (1,1) (2 rows) SELECT record_returner(x) FROM test WHERE x = 1 ORDER BY x, y; record_returner ------------------ +--------------------------------------------------------------------- (2,returned) (2,returned) (2 rows) SELECT NULLIF((x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; nullif --------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT LEAST((x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; least -------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT GREATEST((x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; greatest ----------- +--------------------------------------------------------------------- (2,1) (3,1) (2 rows) SELECT COALESCE(NULL, (x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; coalesce ----------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT CASE x WHEN 2 THEN (x, y) ELSE (y, x) END FROM test WHERE x = 1 ORDER BY x, y; row -------- +--------------------------------------------------------------------- (2,1) (3,1) (2 rows) SELECT CASE x WHEN 2 THEN (x, y) END FROM test WHERE x = 1 ORDER BY x, y; case ------- +--------------------------------------------------------------------- (2 rows) @@ -285,14 +285,14 @@ ERROR: input of anonymous composite types is not implemented -- nested row expressions SELECT (x,(x,y)) AS foo FROM test WHERE x = 1 ORDER BY x, y; foo -------------- +--------------------------------------------------------------------- (1,"(1,2)") (1,"(1,3)") (2 rows) SELECT (x,record_returner(x)) FROM test WHERE x = 1 ORDER BY x, y; row --------------------- +--------------------------------------------------------------------- (1,"(2,returned)") (1,"(2,returned)") (2 rows) @@ -304,42 +304,42 @@ ERROR: input of anonymous composite types is not implemented PREPARE rec(int) AS SELECT (x,y*$1) FROM test WHERE x = $1 ORDER BY x, y; EXECUTE rec(1); row -------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); row -------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); row -------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); row -------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); row -------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); row -------- +--------------------------------------------------------------------- (1,2) (1,3) (2 rows) diff --git a/src/test/regress/expected/sequential_modifications.out b/src/test/regress/expected/sequential_modifications.out index 53e057836..53b47f9d7 100644 --- a/src/test/regress/expected/sequential_modifications.out +++ b/src/test/regress/expected/sequential_modifications.out @@ -65,21 +65,21 @@ CREATE OR REPLACE FUNCTION set_local_multi_shard_modify_mode_to_sequential() ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) CREATE TABLE test_table(a int, b int); SELECT create_distributed_table('test_table', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) -- not useful if not in transaction SELECT set_local_multi_shard_modify_mode_to_sequential(); set_local_multi_shard_modify_mode_to_sequential -------------------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -88,14 +88,14 @@ SELECT set_local_multi_shard_modify_mode_to_sequential(); SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT a_check CHECK(a > 0); SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -104,14 +104,14 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT b_check CHECK(b > 0); SELECT distributed_2PCs_are_equal_to_placement_count(); distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ +--------------------------------------------------------------------- t (1 row) @@ -120,14 +120,14 @@ SET citus.multi_shard_commit_protocol TO '1pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT c_check CHECK(a > 0); SELECT no_distributed_2PCs(); no_distributed_2pcs ---------------------- +--------------------------------------------------------------------- t (1 row) @@ -135,21 +135,21 @@ SET citus.multi_shard_commit_protocol TO '1pc'; SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT d_check CHECK(a > 0); SELECT no_distributed_2PCs(); no_distributed_2pcs ---------------------- +--------------------------------------------------------------------- t (1 row) CREATE TABLE ref_test(a int); SELECT create_reference_table('ref_test'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -158,14 +158,14 @@ SET citus.multi_shard_commit_protocol TO '1pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX ref_test_seq_index ON ref_test(a); SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -173,14 +173,14 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX ref_test_seq_index_2 ON ref_test(a); SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -190,7 +190,7 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE test_table_rep_2 (a int); SELECT create_distributed_table('test_table_rep_2', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -199,28 +199,28 @@ SET citus.multi_shard_commit_protocol TO '1pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_1 ON test_table_rep_2(a); SELECT no_distributed_2PCs(); no_distributed_2pcs ---------------------- +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_2 ON test_table_rep_2(a); SELECT no_distributed_2PCs(); no_distributed_2pcs ---------------------- +--------------------------------------------------------------------- t (1 row) @@ -229,28 +229,28 @@ SET citus.multi_shard_commit_protocol TO '2pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_3 ON test_table_rep_2(a); SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_4 ON test_table_rep_2(a); SELECT distributed_2PCs_are_equal_to_placement_count(); distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ +--------------------------------------------------------------------- t (1 row) @@ -260,7 +260,7 @@ SET citus.multi_shard_commit_protocol TO '2pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -268,7 +268,7 @@ CREATE INDEX CONCURRENTLY test_table_rep_2_i_5 ON test_table_rep_2(a); -- we shouldn't see any distributed transactions SELECT no_distributed_2PCs(); no_distributed_2pcs ---------------------- +--------------------------------------------------------------------- t (1 row) @@ -276,7 +276,7 @@ SET citus.multi_shard_commit_protocol TO '2pc'; SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -284,7 +284,7 @@ CREATE INDEX CONCURRENTLY test_table_rep_2_i_6 ON test_table_rep_2(a); -- we shouldn't see any distributed transactions SELECT no_distributed_2PCs(); no_distributed_2pcs ---------------------- +--------------------------------------------------------------------- t (1 row) @@ -294,7 +294,7 @@ INSERT INTO test_seq_truncate SELECT i FROM generate_series(0, 100) i; SELECT create_distributed_table('test_seq_truncate', 'a'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -302,14 +302,14 @@ NOTICE: Copying data from local table... SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate; SELECT distributed_2PCs_are_equal_to_placement_count(); distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ +--------------------------------------------------------------------- t (1 row) @@ -317,14 +317,14 @@ SELECT distributed_2PCs_are_equal_to_placement_count(); SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate; SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -333,7 +333,7 @@ CREATE TABLE test_seq_truncate_rep_2 (a int); SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('test_seq_truncate_rep_2', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -341,28 +341,28 @@ INSERT INTO test_seq_truncate_rep_2 SELECT i FROM generate_series(0, 100) i; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate_rep_2; SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate_rep_2; SELECT distributed_2PCs_are_equal_to_placement_count(); distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ +--------------------------------------------------------------------- t (1 row) @@ -372,7 +372,7 @@ CREATE TABLE multi_shard_modify_test ( t_value integer not null); SELECT create_distributed_table('multi_shard_modify_test', 't_key'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -380,14 +380,14 @@ SELECT create_distributed_table('multi_shard_modify_test', 't_key'); SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) DELETE FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_placement_count(); distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ +--------------------------------------------------------------------- t (1 row) @@ -395,14 +395,14 @@ SELECT distributed_2PCs_are_equal_to_placement_count(); SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) DELETE FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -413,7 +413,7 @@ BEGIN; -- now switch to sequential mode to enable a successful TRUNCATE SELECT set_local_multi_shard_modify_mode_to_sequential(); set_local_multi_shard_modify_mode_to_sequential -------------------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -422,7 +422,7 @@ COMMIT; -- see that all the data successfully removed SELECT count(*) FROM multi_shard_modify_test; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -431,28 +431,28 @@ SELECT count(*) FROM multi_shard_modify_test; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) INSERT INTO multi_shard_modify_test SELECT * FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) INSERT INTO multi_shard_modify_test SELECT * FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_placement_count(); distributed_2pcs_are_equal_to_placement_count ------------------------------------------------ +--------------------------------------------------------------------- t (1 row) @@ -463,7 +463,7 @@ BEGIN; -- now switch to sequential mode to enable a successful INSERT .. SELECT SELECT set_local_multi_shard_modify_mode_to_sequential(); set_local_multi_shard_modify_mode_to_sequential -------------------------------------------------- +--------------------------------------------------------------------- (1 row) @@ -472,7 +472,7 @@ COMMIT; -- see that all the data successfully inserted SELECT count(*) FROM multi_shard_modify_test; count -------- +--------------------------------------------------------------------- 210 (1 row) @@ -480,14 +480,14 @@ ALTER SYSTEM SET citus.recover_2pc_interval TO DEFAULT; SET citus.shard_replication_factor TO DEFAULT; SELECT pg_reload_conf(); pg_reload_conf ----------------- +--------------------------------------------------------------------- t (1 row) -- The following tests are added to test if create_distributed_table honors sequential mode SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -497,7 +497,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_multi_shard_update', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -506,7 +506,7 @@ BEGIN; COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -514,7 +514,7 @@ DROP TABLE test_seq_multi_shard_update; -- Check if truncate works properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -523,7 +523,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_truncate_after_create', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -532,7 +532,7 @@ BEGIN; COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) @@ -540,7 +540,7 @@ DROP TABLE test_seq_truncate_after_create; -- Check if drop table works properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -549,7 +549,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_drop_table', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -557,14 +557,14 @@ BEGIN; COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) -- Check if copy errors out properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -573,7 +573,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_copy', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -581,7 +581,7 @@ BEGIN; ROLLBACK; SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- f (1 row) @@ -589,7 +589,7 @@ DROP TABLE test_seq_copy; -- Check if DDL + CREATE INDEX works properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); recover_prepared_transactions -------------------------------- +--------------------------------------------------------------------- 0 (1 row) @@ -598,7 +598,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_ddl_index', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -608,7 +608,7 @@ BEGIN; COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); distributed_2pcs_are_equal_to_worker_count --------------------------------------------- +--------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/set_operation_and_local_tables.out b/src/test/regress/expected/set_operation_and_local_tables.out index 08bfe49a1..ebe7eb458 100644 --- a/src/test/regress/expected/set_operation_and_local_tables.out +++ b/src/test/regress/expected/set_operation_and_local_tables.out @@ -3,14 +3,14 @@ SET search_path TO recursive_set_local, public; CREATE TABLE recursive_set_local.test (x int, y int); SELECT create_distributed_table('test', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE recursive_set_local.ref (a int, b int); SELECT create_reference_table('ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -29,7 +29,7 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT intermediate_re DEBUG: Creating router plan DEBUG: Plan is router executable x ---- +--------------------------------------------------------------------- (0 rows) -- we should be able to run set operations with generate series @@ -41,7 +41,7 @@ DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT intermediate_re DEBUG: Creating router plan DEBUG: Plan is router executable x ---- +--------------------------------------------------------------------- 2 1 (2 rows) @@ -57,7 +57,7 @@ DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT intermediate_re DEBUG: Creating router plan DEBUG: Plan is router executable x ---- +--------------------------------------------------------------------- 2 1 (2 rows) @@ -67,7 +67,7 @@ DEBUG: Plan is router executable DEBUG: Creating router plan DEBUG: Plan is router executable a ---- +--------------------------------------------------------------------- 3 2 (2 rows) @@ -92,7 +92,7 @@ DEBUG: Plan 12 query after replacing subqueries and CTEs: (SELECT intermediate_ DEBUG: Creating router plan DEBUG: Plan is router executable x ---- +--------------------------------------------------------------------- 4 3 2 @@ -110,7 +110,7 @@ DEBUG: Plan 14 query after replacing subqueries and CTEs: (SELECT intermediate_ DEBUG: Creating router plan DEBUG: Plan is router executable x ---- +--------------------------------------------------------------------- 4 3 (2 rows) @@ -131,7 +131,7 @@ DEBUG: Plan 16 query after replacing subqueries and CTEs: (SELECT cte_1.user_id DEBUG: Creating router plan DEBUG: Plan is router executable user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -166,7 +166,7 @@ DEBUG: generating subplan 19_4 for subquery (SELECT cte_1.x FROM (SELECT interm DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('19_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo, recursive_set_local.test WHERE (test.y OPERATOR(pg_catalog.=) foo.x) DEBUG: Router planner cannot handle multi-shard select queries count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -197,7 +197,7 @@ DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Creating router plan DEBUG: Plan is router executable count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -215,7 +215,7 @@ DEBUG: generating subplan 27_4 for subquery SELECT intermediate_result.x FROM r DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_set_local.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('27_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -236,7 +236,7 @@ DEBUG: Plan is router executable DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_set_local.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT cte.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte)) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -282,7 +282,7 @@ DEBUG: generating subplan 42_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT test.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('42_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u RIGHT JOIN recursive_set_local.test USING (x)) ORDER BY test.x, u.y DEBUG: Router planner cannot handle multi-shard select queries x | y | y ----+---+--- +--------------------------------------------------------------------- 1 | | 1 2 | | 2 (2 rows) @@ -300,7 +300,7 @@ DEBUG: generating subplan 45_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 45 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('45_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_set_local.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries x | y | y ----+---+--- +--------------------------------------------------------------------- (0 rows) -- set operations and the sublink can be recursively planned @@ -321,7 +321,7 @@ DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT x FROM (SELECT DEBUG: Creating router plan DEBUG: Plan is router executable x ---- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -370,7 +370,7 @@ DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable x ---- +--------------------------------------------------------------------- 2 1 (2 rows) diff --git a/src/test/regress/expected/set_operations.out b/src/test/regress/expected/set_operations.out index 9958d2426..a42e1ebf1 100644 --- a/src/test/regress/expected/set_operations.out +++ b/src/test/regress/expected/set_operations.out @@ -3,21 +3,21 @@ SET search_path TO recursive_union, public; CREATE TABLE recursive_union.test (x int, y int); SELECT create_distributed_table('test', 'x'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE recursive_union.ref (a int, b int); SELECT create_reference_table('ref'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE test_not_colocated (LIKE test); SELECT create_distributed_table('test_not_colocated', 'x', colocate_with := 'none'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -35,7 +35,7 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT intermediate_re DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -48,7 +48,7 @@ DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT intermediate_re DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -58,7 +58,7 @@ DEBUG: Plan is router executable DEBUG: Creating router plan DEBUG: Plan is router executable a | b ----+--- +--------------------------------------------------------------------- 2 | 2 3 | 3 (2 rows) @@ -73,7 +73,7 @@ DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT intermediate_re DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 1 | 1 2 | 2 @@ -88,7 +88,7 @@ DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 2 | 2 @@ -99,7 +99,7 @@ DEBUG: Plan is router executable DEBUG: Creating router plan DEBUG: Plan is router executable a | b ----+--- +--------------------------------------------------------------------- 2 | 2 2 | 2 3 | 3 @@ -116,7 +116,7 @@ DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -129,7 +129,7 @@ DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 2 | 2 (1 row) @@ -137,7 +137,7 @@ DEBUG: Plan is router executable DEBUG: Creating router plan DEBUG: Plan is router executable a | b ----+--- +--------------------------------------------------------------------- 2 | 2 3 | 3 (2 rows) @@ -152,7 +152,7 @@ DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -165,7 +165,7 @@ DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 2 | 2 (1 row) @@ -173,7 +173,7 @@ DEBUG: Plan is router executable DEBUG: Creating router plan DEBUG: Plan is router executable a | b ----+--- +--------------------------------------------------------------------- 2 | 2 3 | 3 (2 rows) @@ -188,7 +188,7 @@ DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- (0 rows) (SELECT * FROM test) EXCEPT (SELECT * FROM ref) ORDER BY 1,2; @@ -199,7 +199,7 @@ DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -207,7 +207,7 @@ DEBUG: Plan is router executable DEBUG: Creating router plan DEBUG: Plan is router executable a | b ----+--- +--------------------------------------------------------------------- (0 rows) (SELECT * FROM test) EXCEPT ALL (SELECT * FROM test) ORDER BY 1,2; @@ -220,7 +220,7 @@ DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- (0 rows) (SELECT * FROM test) EXCEPT ALL (SELECT * FROM ref) ORDER BY 1,2; @@ -231,7 +231,7 @@ DEBUG: Plan 36 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -239,7 +239,7 @@ DEBUG: Plan is router executable DEBUG: Creating router plan DEBUG: Plan is router executable a | b ----+--- +--------------------------------------------------------------------- (0 rows) -- more complex set operation trees are supported @@ -262,7 +262,7 @@ DEBUG: Plan 39 query after replacing subqueries and CTEs: (((SELECT intermediat DEBUG: Creating router plan DEBUG: Plan is router executable x | y -----+---- +--------------------------------------------------------------------- 1 | 2 | 2 3 | 3 @@ -279,7 +279,7 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -297,7 +297,7 @@ DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT x, y FROM (SEL DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -313,7 +313,7 @@ DEBUG: Plan 47 query after replacing subqueries and CTEs: SELECT x, y FROM (SEL DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -323,7 +323,7 @@ SELECT * FROM ((SELECT * FROM ref) UNION (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable a | b ----+--- +--------------------------------------------------------------------- 2 | 2 3 | 3 (2 rows) @@ -331,7 +331,7 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- 1 | 1 1 | 1 2 | 2 @@ -341,7 +341,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM ((SELECT x, y FROM test) UNION ALL (SELECT y, x FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- 1 | 1 1 | 1 2 | 2 @@ -359,7 +359,7 @@ DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT x, y FROM (SEL DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 2 | 2 @@ -370,7 +370,7 @@ SELECT * FROM ((SELECT * FROM ref) UNION ALL (SELECT * FROM ref)) u ORDER BY 1,2 DEBUG: Creating router plan DEBUG: Plan is router executable a | b ----+--- +--------------------------------------------------------------------- 2 | 2 2 | 2 3 | 3 @@ -390,7 +390,7 @@ DEBUG: Plan 57 query after replacing subqueries and CTEs: SELECT x, y FROM (SEL DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -408,7 +408,7 @@ DEBUG: Plan 61 query after replacing subqueries and CTEs: SELECT x, y FROM (SEL DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -424,7 +424,7 @@ DEBUG: Plan 65 query after replacing subqueries and CTEs: SELECT x, y FROM (SEL DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 2 | 2 (1 row) @@ -432,7 +432,7 @@ SELECT * FROM ((SELECT * FROM ref) INTERSECT (SELECT * FROM ref)) u ORDER BY 1,2 DEBUG: Creating router plan DEBUG: Plan is router executable a | b ----+--- +--------------------------------------------------------------------- 2 | 2 3 | 3 (2 rows) @@ -450,7 +450,7 @@ DEBUG: Plan 69 query after replacing subqueries and CTEs: SELECT x, y FROM (SEL DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- (0 rows) SELECT * FROM ((SELECT x, y FROM test) EXCEPT (SELECT y, x FROM test)) u ORDER BY 1,2; @@ -466,7 +466,7 @@ DEBUG: Plan 73 query after replacing subqueries and CTEs: SELECT x, y FROM (SEL DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- (0 rows) SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM ref)) u ORDER BY 1,2; @@ -480,7 +480,7 @@ DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT x, y FROM (SEL DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 (1 row) @@ -488,14 +488,14 @@ SELECT * FROM ((SELECT * FROM ref) EXCEPT (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable a | b ----+--- +--------------------------------------------------------------------- (0 rows) -- unions can even be pushed down within a join SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) u JOIN test USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries x | y | y ----+---+--- +--------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 (2 rows) @@ -503,7 +503,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test)) u LEFT JOIN test USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries x | y | y ----+---+--- +--------------------------------------------------------------------- 1 | 1 | 1 1 | 1 | 1 2 | 2 | 2 @@ -524,7 +524,7 @@ DEBUG: generating subplan 83_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 83 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('83_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries x | y | y ----+---+--- +--------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 (2 rows) @@ -556,7 +556,7 @@ DEBUG: generating subplan 91_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 91 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('91_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries x | y | y ----+---+--- +--------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 (2 rows) @@ -573,7 +573,7 @@ DEBUG: generating subplan 95_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 95 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('95_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries x | y | y ----+---+--- +--------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 (2 rows) @@ -582,7 +582,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test ORDER BY x)) u JOIN generate_series(1,10) x USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -601,7 +601,7 @@ DEBUG: Plan 100 query after replacing subqueries and CTEs: SELECT u.x, u.y FROM DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- (0 rows) -- subqueries in WHERE clause with set operations fails due to the current limitaions of recursive planning IN WHERE clause @@ -618,7 +618,7 @@ DEBUG: generating subplan 105_1 for subquery SELECT y FROM recursive_union.test DEBUG: Plan 105 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT test.x, test.y FROM recursive_union.test UNION SELECT test.x, test.y FROM recursive_union.test) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('105_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -640,7 +640,7 @@ DEBUG: Plan 107 query after replacing subqueries and CTEs: SELECT x, y FROM (SE DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -661,7 +661,7 @@ DEBUG: Plan 112 query after replacing subqueries and CTEs: SELECT x, y FROM (SE DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -680,7 +680,7 @@ DEBUG: Plan 117 query after replacing subqueries and CTEs: SELECT intermediate_ DEBUG: Creating router plan DEBUG: Plan is router executable x | y | rnk ----+---+----- +--------------------------------------------------------------------- 2 | 2 | 1 1 | 1 | 1 (2 rows) @@ -707,7 +707,7 @@ DEBUG: generating subplan 122_3 for subquery SELECT intermediate_result.x, inte DEBUG: Plan 122 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('122_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries x | y | y ----+---+--- +--------------------------------------------------------------------- 2 | 2 | 2 (1 row) @@ -742,7 +742,7 @@ DEBUG: Plan 130 query after replacing subqueries and CTEs: SELECT x, y FROM (SE DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -751,7 +751,7 @@ DEBUG: Plan is router executable SELECT * FROM test a WHERE x IN (SELECT x FROM test b WHERE y = 1 UNION SELECT x FROM test c WHERE y = 2) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -771,7 +771,7 @@ DEBUG: generating subplan 136_1 for subquery SELECT b.x FROM recursive_union.te DEBUG: Plan 136 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (NOT (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('136_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)))) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- (0 rows) -- subquery union in WHERE clause without parition column equality is recursively planned @@ -787,7 +787,7 @@ DEBUG: generating subplan 140_3 for subquery SELECT intermediate_result.x FROM DEBUG: Plan 140 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('140_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -821,7 +821,7 @@ DEBUG: Plan 146 query after replacing subqueries and CTEs: SELECT x, y FROM (SE DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 2 | 2 1 | 1 (2 rows) @@ -840,7 +840,7 @@ DEBUG: Plan 150 query after replacing subqueries and CTEs: SELECT count(DISTINC DEBUG: Creating router plan DEBUG: Plan is router executable count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -857,7 +857,7 @@ DEBUG: Plan 154 query after replacing subqueries and CTEs: SELECT count(DISTINC DEBUG: Creating router plan DEBUG: Plan is router executable count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -875,7 +875,7 @@ DEBUG: Plan 158 query after replacing subqueries and CTEs: SELECT avg(DISTINCT DEBUG: Creating router plan DEBUG: Plan is router executable avg ------------------------- +--------------------------------------------------------------------- 1.50000000000000000000 (1 row) @@ -927,7 +927,7 @@ DEBUG: Plan 164 query after replacing subqueries and CTEs: SELECT intermediate_ DEBUG: Creating router plan DEBUG: Plan is router executable x ---- +--------------------------------------------------------------------- (0 rows) -- repartition is recursively planned with the set operation @@ -969,7 +969,7 @@ DEBUG: Plan 167 query after replacing subqueries and CTEs: SELECT intermediate_ DEBUG: Creating router plan DEBUG: Plan is router executable x ---- +--------------------------------------------------------------------- 2 1 (2 rows) @@ -990,7 +990,7 @@ DEBUG: Plan 170 query after replacing subqueries and CTEs: SELECT y FROM (SELEC DEBUG: Creating router plan DEBUG: Plan is router executable y ---- +--------------------------------------------------------------------- 2 1 (2 rows) @@ -1000,7 +1000,7 @@ CREATE VIEW set_view_pushdown AS (SELECT x FROM test) UNION (SELECT x FROM test) SELECT * FROM set_view_pushdown ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries x ---- +--------------------------------------------------------------------- 2 1 (2 rows) @@ -1019,7 +1019,7 @@ DEBUG: generating subplan 175_3 for subquery SELECT intermediate_result.x, inte DEBUG: Plan 175 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT u.x, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('175_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, test.y) set_view_recursive_second ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -1049,7 +1049,7 @@ DEBUG: Plan 179 query after replacing subqueries and CTEs: (SELECT intermediate DEBUG: Creating router plan DEBUG: Plan is router executable x ---- +--------------------------------------------------------------------- (0 rows) -- queries on non-colocated tables that would push down if they were not colocated are recursivelu planned @@ -1066,7 +1066,7 @@ DEBUG: Plan 188 query after replacing subqueries and CTEs: SELECT x, y FROM (SE DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -1084,7 +1084,7 @@ DEBUG: Plan 192 query after replacing subqueries and CTEs: SELECT x, y FROM (SE DEBUG: Creating router plan DEBUG: Plan is router executable x | y ----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) diff --git a/src/test/regress/expected/single_hash_repartition_join.out b/src/test/regress/expected/single_hash_repartition_join.out index 2f6e88b88..2f239e381 100644 --- a/src/test/regress/expected/single_hash_repartition_join.out +++ b/src/test/regress/expected/single_hash_repartition_join.out @@ -10,19 +10,19 @@ CREATE TABLE ref_table (id int, sum int, avg float); SET citus.shard_count TO 4; SELECT create_distributed_table('single_hash_repartition_first', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('single_hash_repartition_second', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('ref_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/sql_procedure.out b/src/test/regress/expected/sql_procedure.out index cdec3e3b7..513bbaa14 100644 --- a/src/test/regress/expected/sql_procedure.out +++ b/src/test/regress/expected/sql_procedure.out @@ -10,7 +10,7 @@ CREATE TABLE test_table(id integer , org_id integer); CREATE UNIQUE INDEX idx_table ON test_table(id, org_id); SELECT create_distributed_table('test_table','id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -23,7 +23,7 @@ $$; CALL test_procedure_delete_insert(2,3); SELECT * FROM test_table ORDER BY 1, 2; id | org_id -----+-------- +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -41,7 +41,7 @@ ERROR: COMMIT is not allowed in a SQL function CONTEXT: SQL function "test_procedure_commit" during startup SELECT * FROM test_table ORDER BY 1, 2; id | org_id -----+-------- +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -56,7 +56,7 @@ ERROR: ROLLBACK is not allowed in a SQL function CONTEXT: SQL function "test_procedure_rollback" during startup SELECT * FROM test_table ORDER BY 1, 2; id | org_id -----+-------- +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -74,7 +74,7 @@ $$; CALL test_procedure_delete_insert(2,3); SELECT * FROM test_table ORDER BY 1, 2; id | org_id -----+-------- +--------------------------------------------------------------------- 2 | 3 (1 row) @@ -95,7 +95,7 @@ SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" PL/pgSQL function test_procedure_modify_insert(integer,integer) line 5 at SQL statement SELECT * FROM test_table ORDER BY 1, 2; id | org_id -----+-------- +--------------------------------------------------------------------- 2 | 12 (1 row) @@ -115,7 +115,7 @@ SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" PL/pgSQL function test_procedure_modify_insert_commit(integer,integer) line 5 at SQL statement SELECT * FROM test_table ORDER BY 1, 2; id | org_id -----+-------- +--------------------------------------------------------------------- 2 | 30 (1 row) @@ -131,7 +131,7 @@ $$; CALL test_procedure_rollback(2,5); SELECT * FROM test_table ORDER BY 1, 2; id | org_id -----+-------- +--------------------------------------------------------------------- (0 rows) -- rollback is successfull when insert is on multiple rows @@ -146,7 +146,7 @@ $$; CALL test_procedure_rollback_2(12, 15); SELECT * FROM test_table ORDER BY 1, 2; id | org_id -----+-------- +--------------------------------------------------------------------- (0 rows) -- delete is rolled back, update is committed @@ -162,7 +162,7 @@ INSERT INTO test_table VALUES (1, 1), (2, 2); CALL test_procedure_rollback_3(2,15); SELECT * FROM test_table ORDER BY 1, 2; id | org_id -----+-------- +--------------------------------------------------------------------- 1 | 1 2 | 15 (2 rows) @@ -191,14 +191,14 @@ END; $$; SELECT * from test_table; id | org_id -----+-------- +--------------------------------------------------------------------- (0 rows) call test_procedure(1,1); call test_procedure(20, 20); SELECT * from test_table; id | org_id -----+-------- +--------------------------------------------------------------------- (0 rows) \set VERBOSITY terse diff --git a/src/test/regress/expected/ssl_by_default.out b/src/test/regress/expected/ssl_by_default.out index e05686368..50b7348ce 100644 --- a/src/test/regress/expected/ssl_by_default.out +++ b/src/test/regress/expected/ssl_by_default.out @@ -7,13 +7,13 @@ SHOW ssl_ciphers \gset SELECT :'ssl_ciphers' != 'none' AS hasssl; hasssl --------- +--------------------------------------------------------------------- t (1 row) SHOW ssl; ssl ------ +--------------------------------------------------------------------- on (1 row) @@ -21,14 +21,14 @@ SELECT run_command_on_workers($$ SHOW ssl; $$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,on) (localhost,57638,t,on) (2 rows) SHOW citus.node_conninfo; citus.node_conninfo ---------------------- +--------------------------------------------------------------------- sslmode=require (1 row) @@ -36,7 +36,7 @@ SELECT run_command_on_workers($$ SHOW citus.node_conninfo; $$); run_command_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,sslmode=require) (localhost,57638,t,sslmode=require) (2 rows) @@ -45,14 +45,14 @@ SELECT run_command_on_workers($$ SELECT ssl FROM pg_stat_ssl WHERE pid = pg_backend_pid(); $$); run_command_on_workers ------------------------- +--------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) (2 rows) SHOW ssl_ciphers; ssl_ciphers ----------------------------- +--------------------------------------------------------------------- TLSv1.2+HIGH:!aNULL:!eNULL (1 row) @@ -60,7 +60,7 @@ SELECT run_command_on_workers($$ SHOW ssl_ciphers; $$); run_command_on_workers ------------------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,TLSv1.2+HIGH:!aNULL:!eNULL) (localhost,57638,t,TLSv1.2+HIGH:!aNULL:!eNULL) (2 rows) diff --git a/src/test/regress/expected/subqueries_deep.out b/src/test/regress/expected/subqueries_deep.out index e4a3b2ba2..ef6c09251 100644 --- a/src/test/regress/expected/subqueries_deep.out +++ b/src/test/regress/expected/subqueries_deep.out @@ -39,7 +39,7 @@ DEBUG: push down of limit count: 3 DEBUG: generating subplan 1_3 for subquery SELECT users_table.user_id FROM public.users_table, (SELECT intermediate_result.avg_val FROM read_intermediate_result('1_2'::text, 'binary'::citus_copy_format) intermediate_result(avg_val numeric)) baz WHERE (baz.avg_val OPERATOR(pg_catalog.<) (users_table.user_id)::numeric) ORDER BY users_table.user_id LIMIT 3 DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) sub1 ORDER BY user_id DESC user_id ---------- +--------------------------------------------------------------------- 3 (1 row) @@ -80,7 +80,7 @@ DEBUG: generating subplan 5_3 for subquery SELECT DISTINCT ON ((e.event_type):: DEBUG: generating subplan 5_4 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('5_3'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('5_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event event | array_length --------+-------------- +--------------------------------------------------------------------- 3 | 26 4 | 21 2 | 18 @@ -138,7 +138,7 @@ DEBUG: generating subplan 10_5 for subquery SELECT min(users_table.value_1) AS DEBUG: generating subplan 10_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('10_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('10_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -198,7 +198,7 @@ DEBUG: generating subplan 17_6 for subquery SELECT avg(level_6.min) AS avg FROM DEBUG: generating subplan 17_7 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('17_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.count FROM read_intermediate_result('17_7'::text, 'binary'::citus_copy_format) intermediate_result(count bigint))) user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- (0 rows) SET client_min_messages TO DEFAULT; diff --git a/src/test/regress/expected/subquery_and_cte.out b/src/test/regress/expected/subquery_and_cte.out index 1699905ff..64d48aa19 100644 --- a/src/test/regress/expected/subquery_and_cte.out +++ b/src/test/regress/expected/subquery_and_cte.out @@ -6,7 +6,7 @@ CREATE TABLE users_table_local AS SELECT * FROM users_table; CREATE TABLE dist_table (id int, value int); SELECT create_distributed_table('dist_table', 'id', colocate_with => 'users_table'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -48,7 +48,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 3_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) count -------- +--------------------------------------------------------------------- 1644 (1 row) @@ -63,7 +63,7 @@ FROM cte1, cte2 ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type LIMIT 5; user_id | value_1 | user_id | event_type ----------+---------+---------+------------ +--------------------------------------------------------------------- 1 | 1 | 1 | 0 1 | 1 | 1 | 0 1 | 1 | 1 | 1 @@ -85,7 +85,7 @@ DEBUG: generating subplan 8_1 for CTE cte1: SELECT user_id, "time", value_1, va DEBUG: generating subplan 8_2 for CTE cte2: SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM subquery_and_ctes.events_table WHERE (user_id OPERATOR(pg_catalog.=) 6) DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.user_id FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte1, (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) cte2 ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type LIMIT 5 user_id | value_1 | user_id | user_id ----------+---------+---------+--------- +--------------------------------------------------------------------- 1 | 1 | 6 | 6 1 | 1 | 6 | 6 1 | 1 | 6 | 6 @@ -158,7 +158,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 17_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('17_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE ((foo.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) cte.user_id)) count -------- +--------------------------------------------------------------------- 30608 (1 row) @@ -188,7 +188,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 21_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) ORDER BY cte.user_id DESC user_id ---------- +--------------------------------------------------------------------- 4 3 2 @@ -218,7 +218,7 @@ DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT dist_cte.user_ DEBUG: generating subplan 25_2 for subquery SELECT DISTINCT user_id FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) DEBUG: Plan 25 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('25_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('25_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY user_id DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -247,7 +247,7 @@ ORDER BY 1 DESC; DEBUG: generating subplan 29_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 29 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('29_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo ORDER BY user_id DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -286,7 +286,7 @@ ORDER BY 1 DESC; DEBUG: generating subplan 31_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -341,7 +341,7 @@ DEBUG: push down of limit count: 2 DEBUG: generating subplan 33_3 for subquery SELECT users_table.user_id, some_events.event_type FROM subquery_and_ctes.users_table, (SELECT cte.event_type, cte.user_id FROM (SELECT intermediate_result.event_type, intermediate_result.user_id FROM read_intermediate_result('33_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer, user_id integer)) cte ORDER BY cte.event_type DESC) some_events WHERE ((users_table.user_id OPERATOR(pg_catalog.=) some_events.user_id) AND (some_events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY some_events.event_type, users_table.user_id LIMIT 2 DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT DISTINCT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('33_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC LIMIT 5 user_id ---------- +--------------------------------------------------------------------- 1 (1 row) @@ -382,7 +382,7 @@ DEBUG: generating subplan 37_3 for subquery SELECT DISTINCT cte.user_id FROM su DEBUG: Plan 37 query after replacing subqueries and CTEs: SELECT foo.user_id, events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('37_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2) ORDER BY events_table."time" DESC, events_table.user_id DESC, foo.user_id DESC LIMIT 5 DEBUG: push down of limit count: 5 user_id | user_id | time | event_type | value_2 | value_3 | value_4 ----------+---------+---------------------------------+------------+---------+---------+--------- +--------------------------------------------------------------------- 4 | 1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 | 2 | 4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 | 4 | 3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 | @@ -433,7 +433,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 42_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) count -------- +--------------------------------------------------------------------- 432 (1 row) @@ -489,7 +489,7 @@ DEBUG: generating subplan 48_3 for subquery SELECT count(*) AS cnt FROM (SELECT DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('48_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, subquery_and_ctes.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2) ORDER BY users_table."time" DESC, foo.cnt DESC, users_table.user_id DESC, users_table.value_1 DESC LIMIT 5 DEBUG: push down of limit count: 5 cnt | user_id | time | value_1 | value_2 | value_3 | value_4 ------+---------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 432 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | 432 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | 432 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | diff --git a/src/test/regress/expected/subquery_basics.out b/src/test/regress/expected/subquery_basics.out index 2062ceaf3..cc56a2f4c 100644 --- a/src/test/regress/expected/subquery_basics.out +++ b/src/test/regress/expected/subquery_basics.out @@ -20,7 +20,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 1_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY user_id DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -46,7 +46,7 @@ FROM DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo ORDER BY value_1 DESC value_1 ---------- +--------------------------------------------------------------------- 5 4 3 @@ -74,7 +74,7 @@ FROM DEBUG: generating subplan 5_1 for subquery SELECT users_table.value_2, avg(users_table.value_1) AS avg FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT value_2, avg FROM (SELECT intermediate_result.value_2, intermediate_result.avg FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, avg numeric)) foo ORDER BY avg DESC, value_2 value_2 | avg ----------+-------------------- +--------------------------------------------------------------------- 4 | 2.8453608247422680 2 | 2.6833855799373041 5 | 2.6238938053097345 @@ -105,7 +105,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 7_1 for subquery SELECT value_2 FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) ORDER BY value_2 DESC LIMIT 5 DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT foo.value_2, bar.i FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT i.i FROM generate_series(0, 100) i(i)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.i) ORDER BY bar.i DESC, foo.value_2 value_2 | i ----------+--- +--------------------------------------------------------------------- 5 | 5 5 | 5 5 | 5 @@ -127,7 +127,7 @@ FROM DEBUG: generating subplan 9_1 for subquery SELECT count(*) AS count FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) foo count -------- +--------------------------------------------------------------------- 87 (1 row) @@ -147,7 +147,7 @@ FROM DEBUG: generating subplan 11_1 for subquery SELECT sum(user_id) AS sum FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) HAVING (min(value_2) OPERATOR(pg_catalog.>) 2) DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT sum FROM (SELECT intermediate_result.sum FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint)) foo sum ------ +--------------------------------------------------------------------- (0 rows) -- multiple subqueries in FROM clause should be replaced @@ -181,7 +181,7 @@ DEBUG: generating subplan 13_1 for subquery SELECT users_table.value_2 FROM pub DEBUG: generating subplan 13_2 for subquery SELECT users_table.value_3 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) GROUP BY users_table.value_3 ORDER BY users_table.value_3 DESC DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT foo.value_2, bar.value_3 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.value_3 FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(value_3 double precision)) bar WHERE ((foo.value_2)::double precision OPERATOR(pg_catalog.=) bar.value_3) ORDER BY bar.value_3 DESC, foo.value_2 value_2 | value_3 ----------+--------- +--------------------------------------------------------------------- 5 | 5 4 | 4 3 | 3 @@ -220,7 +220,7 @@ DEBUG: generating subplan 16_1 for subquery SELECT users_table.value_2 FROM pub DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT DISTINCT ON (bar.citus) bar.citus, foo.postgres, (bar.citus OPERATOR(pg_catalog.+) 1) AS c1, (foo.postgres OPERATOR(pg_catalog.-) 1) AS p1 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo(postgres), (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar(citus) WHERE (foo.postgres OPERATOR(pg_catalog.=) bar.citus) ORDER BY bar.citus DESC, foo.postgres DESC LIMIT 3 DEBUG: push down of limit count: 3 citus | postgres | c1 | p1 --------+----------+----+---- +--------------------------------------------------------------------- 5 | 5 | 6 | 4 4 | 4 | 5 | 3 3 | 3 | 4 | 2 @@ -257,7 +257,7 @@ DEBUG: generating subplan 18_1 for subquery SELECT users_table.value_2 FROM pub DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT foo.value_2, bar.user_id FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC, foo.value_2 DESC LIMIT 3 DEBUG: push down of limit count: 3 value_2 | user_id ----------+--------- +--------------------------------------------------------------------- 5 | 5 5 | 5 5 | 5 @@ -273,7 +273,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 20_1 for subquery SELECT DISTINCT value_2 FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY user_id DESC user_id ---------- +--------------------------------------------------------------------- 4 3 2 @@ -303,7 +303,7 @@ DEBUG: generating subplan 22_1 for subquery SELECT user_id, event_type FROM pub DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT users_table.user_id FROM public.users_table, (SELECT bar.event_type, bar.user_id FROM (SELECT foo.event_type, users_table_1.user_id FROM public.users_table users_table_1, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) users_table_1.user_id)) bar) baz WHERE (baz.user_id OPERATOR(pg_catalog.=) users_table.user_id)) sub1 ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -339,7 +339,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 24_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT user_id, array_length(events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q ORDER BY (array_length(events_table, 1)) DESC, user_id user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 5 | 364 (1 row) @@ -417,7 +417,7 @@ DEBUG: push down of limit count: 10 DEBUG: generating subplan 26_1 for subquery SELECT user_id, count(*) AS count_pay FROM public.users_table WHERE ((user_id OPERATOR(pg_catalog.>=) 1) AND (user_id OPERATOR(pg_catalog.<=) 3) AND (value_1 OPERATOR(pg_catalog.>) 3) AND (value_1 OPERATOR(pg_catalog.<) 5)) GROUP BY user_id HAVING (count(*) OPERATOR(pg_catalog.>) 1) LIMIT 10 DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT subquery_top.user_id FROM (SELECT subquery_1.user_id, subquery_2.count_pay FROM ((SELECT users_table_1.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<) 3)) UNION SELECT users_table_1.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 2) AND (events_table.event_type OPERATOR(pg_catalog.<) 4))) subquery_1 LEFT JOIN (SELECT intermediate_result.user_id, intermediate_result.count_pay FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, count_pay bigint)) subquery_2 ON ((subquery_1.user_id OPERATOR(pg_catalog.=) subquery_2.user_id))) GROUP BY subquery_1.user_id, subquery_2.count_pay) subquery_top GROUP BY subquery_top.count_pay, subquery_top.user_id)) GROUP BY user_id HAVING ((count(*) OPERATOR(pg_catalog.>) 1) AND (sum(value_2) OPERATOR(pg_catalog.>) 29)) ORDER BY user_id user_id ---------- +--------------------------------------------------------------------- 2 3 (2 rows) @@ -441,6 +441,6 @@ DEBUG: generating subplan 28_1 for subquery SELECT users_table.user_id FROM pub DEBUG: generating subplan 28_2 for subquery SELECT count(*) AS count FROM public.users_table GROUP BY user_id DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.count FROM read_intermediate_result('28_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint))) user_id ---------- +--------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/subquery_complex_target_list.out b/src/test/regress/expected/subquery_complex_target_list.out index 582d98506..7ffb27f5c 100644 --- a/src/test/regress/expected/subquery_complex_target_list.out +++ b/src/test/regress/expected/subquery_complex_target_list.out @@ -20,7 +20,7 @@ DEBUG: push down of limit count: 20 DEBUG: generating subplan 1_1 for subquery SELECT user_id FROM public.users_table GROUP BY user_id ORDER BY (count(*)) DESC LIMIT 20 DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 event_type | count -------------+------- +--------------------------------------------------------------------- 6 | 1 5 | 3 4 | 6 @@ -36,7 +36,7 @@ ORDER BY 1 DESC, 2 DESC, 3 DESC LIMIT 5; DEBUG: generating subplan 3_1 for subquery SELECT user_id, value_1, value_2 FROM public.users_table OFFSET 0 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT x, y, value_2 FROM (SELECT intermediate_result.user_id, intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer, value_2 integer)) foo(x, y, value_2) ORDER BY x DESC, y DESC, value_2 DESC LIMIT 5 x | y | value_2 ----+---+--------- +--------------------------------------------------------------------- 6 | 5 | 2 6 | 5 | 0 6 | 3 | 2 @@ -76,7 +76,7 @@ DEBUG: push down of limit count: 4 DEBUG: generating subplan 5_4 for subquery SELECT user_id, sum(DISTINCT value_2) AS sum FROM public.users_table GROUP BY user_id ORDER BY user_id DESC LIMIT 4 DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.cnt_1, baz.cnt_2, bat.sum FROM (SELECT intermediate_result.avg FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) foo, (SELECT intermediate_result.cnt_1 FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt_1 bigint)) bar, (SELECT intermediate_result.cnt_2 FROM read_intermediate_result('5_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt_2 bigint)) baz, (SELECT intermediate_result.user_id, intermediate_result.sum FROM read_intermediate_result('5_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, sum bigint)) bat, public.events_table WHERE ((foo.avg OPERATOR(pg_catalog.<>) (bar.cnt_1)::numeric) AND (baz.cnt_2 OPERATOR(pg_catalog.=) events_table.event_type)) ORDER BY foo.avg DESC avg | cnt_1 | cnt_2 | sum ---------------------+-------+-------+----- +--------------------------------------------------------------------- 3.5000000000000000 | 6 | 6 | 10 (1 row) @@ -119,7 +119,7 @@ DEBUG: push down of limit count: 3 DEBUG: generating subplan 10_3 for subquery SELECT min("time") AS min, max("time") AS max, count("time") AS count, count(*) FILTER (WHERE (user_id OPERATOR(pg_catalog.=) 3)) AS cnt_with_filter, count(*) FILTER (WHERE ((user_id)::text OPERATOR(pg_catalog.~~) '%3%'::text)) AS cnt_with_filter_2 FROM public.users_table ORDER BY (min("time")) DESC LIMIT 3 DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT foo."?column?", foo."?column?_1" AS "?column?", foo.sum, foo.count, foo.avg, bar."?column?", bar."?column?_1" AS "?column?", bar.sum, bar.count, bar.avg, baz.min, baz.max, baz.count, baz.cnt_with_filter, baz.cnt_with_filter_2 FROM (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer, sum bigint, count double precision, avg bigint)) foo("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" double precision, "?column?_1" double precision, sum double precision, count bigint, avg double precision)) bar("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result.min, intermediate_result.max, intermediate_result.count, intermediate_result.cnt_with_filter, intermediate_result.cnt_with_filter_2 FROM read_intermediate_result('10_3'::text, 'binary'::citus_copy_format) intermediate_result(min timestamp without time zone, max timestamp without time zone, count bigint, cnt_with_filter bigint, cnt_with_filter_2 bigint)) baz ORDER BY foo."?column?" DESC ?column? | ?column? | sum | count | avg | ?column? | ?column? | sum | count | avg | min | max | count | cnt_with_filter | cnt_with_filter_2 -----------+----------+-----+-------+-----+----------+----------+-----+-------+-----------------+---------------------------------+---------------------------------+-------+-----------------+------------------- +--------------------------------------------------------------------- 2 | 3 | 376 | 101 | 4 | 0 | 2.5 | 273 | 101 | 2.7029702970297 | Wed Nov 22 18:19:49.944985 2017 | Thu Nov 23 17:30:34.635085 2017 | 101 | 17 | 17 (1 row) @@ -174,7 +174,7 @@ DEBUG: push down of limit count: 25 DEBUG: generating subplan 14_4 for subquery SELECT COALESCE(value_3, (20)::double precision) AS count_pay FROM public.users_table ORDER BY COALESCE(value_3, (20)::double precision) OFFSET 20 LIMIT 5 DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.cnt_1, baz.cnt_2, baz.cnt_3, baz.sum_1, baz.l_year, baz.pos, tar.count_pay FROM (SELECT intermediate_result.avg FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) foo, (SELECT intermediate_result.cnt_1 FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt_1 double precision)) bar, (SELECT intermediate_result.cnt_2, intermediate_result.cnt_3, intermediate_result.sum_1, intermediate_result.l_year, intermediate_result.pos FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt_2 numeric, cnt_3 numeric, sum_1 bigint, l_year double precision, pos integer)) baz, (SELECT intermediate_result.count_pay FROM read_intermediate_result('14_4'::text, 'binary'::citus_copy_format) intermediate_result(count_pay double precision)) tar, public.events_table WHERE (((foo.avg)::double precision OPERATOR(pg_catalog.<>) bar.cnt_1) AND (baz.cnt_2 OPERATOR(pg_catalog.<>) (events_table.event_type)::numeric)) ORDER BY foo.avg DESC avg | cnt_1 | cnt_2 | cnt_3 | sum_1 | l_year | pos | count_pay --------------------------+------------------+--------------------+-------+-------+--------+-----+----------- +--------------------------------------------------------------------- 30.14666771571734992301 | 3308.14619815793 | 2.5000000000000000 | | 31 | 2017 | 0 | 1 (1 row) @@ -196,7 +196,7 @@ DEBUG: generating subplan 19_1 for subquery SELECT avg(value_3) AS avg FROM pub DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.avg2 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(avg double precision)) foo, (SELECT avg(users_table.value_3) AS avg2 FROM public.users_table GROUP BY users_table.value_1, users_table.value_2, users_table.user_id) bar WHERE (foo.avg OPERATOR(pg_catalog.=) bar.avg2) ORDER BY foo.avg DESC, bar.avg2 DESC LIMIT 3 DEBUG: push down of limit count: 3 avg | avg2 ------+------ +--------------------------------------------------------------------- 5 | 5 4 | 4 3.5 | 3.5 @@ -255,7 +255,7 @@ DEBUG: generating subplan 21_2 for subquery SELECT value_2 FROM public.users_ta DEBUG: generating subplan 21_3 for subquery SELECT avg(user_id) AS avg FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) GROUP BY value_2 HAVING (sum(value_1) OPERATOR(pg_catalog.>) 10) ORDER BY ((sum(value_3) OPERATOR(pg_catalog.-) (avg(value_1))::double precision) OPERATOR(pg_catalog.-) (COALESCE((array_upper(ARRAY[max(user_id)], 1) OPERATOR(pg_catalog.*) 5), 0))::double precision) DESC LIMIT 3 DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT a.user_id, b.value_2, c.avg FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a, (SELECT intermediate_result.value_2 FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) b, (SELECT intermediate_result.avg FROM read_intermediate_result('21_3'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) c WHERE (b.value_2 OPERATOR(pg_catalog.<>) a.user_id) ORDER BY c.avg DESC, b.value_2 DESC, a.user_id DESC LIMIT 5 user_id | value_2 | avg ----------+---------+-------------------- +--------------------------------------------------------------------- 4 | 5 | 4.1666666666666667 3 | 5 | 4.1666666666666667 5 | 4 | 4.1666666666666667 @@ -292,7 +292,7 @@ DEBUG: generating subplan 25_1 for subquery SELECT DISTINCT users_table.user_id DEBUG: generating subplan 25_2 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND false AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 25 query after replacing subqueries and CTEs: SELECT bar.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('25_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('25_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.>) bar.user_id) ORDER BY bar.user_id DESC user_id ---------- +--------------------------------------------------------------------- (0 rows) -- window functions tests, both is recursively planned @@ -338,7 +338,7 @@ DEBUG: generating subplan 28_1 for subquery SELECT user_id, "time", event_type, DEBUG: generating subplan 28_2 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4, rank() OVER my_win AS rnk FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) 3) WINDOW my_win AS (PARTITION BY event_type ORDER BY "time" DESC) DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT foo.user_id, foo."time", foo.rnk, bar.user_id, bar."time", bar.rnk FROM (SELECT foo_1.user_id, foo_1."time", foo_1.rnk FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.rnk FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint, rnk bigint)) foo_1 ORDER BY foo_1.rnk DESC, foo_1.user_id DESC, foo_1."time" DESC) foo, (SELECT foo_1.user_id, foo_1."time", foo_1.rnk FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.rnk FROM read_intermediate_result('28_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint, rnk bigint)) foo_1 ORDER BY foo_1.rnk DESC, foo_1.user_id DESC, foo_1."time" DESC) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY foo.rnk DESC, foo."time" DESC, bar."time" LIMIT 5 user_id | time | rnk | user_id | time | rnk ----------+------+-----+---------+------+----- +--------------------------------------------------------------------- (0 rows) -- cursor test @@ -359,25 +359,25 @@ DEBUG: generating subplan 31_1 for subquery SELECT user_id FROM public.users_ta DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 FETCH 1 FROM recursive_subquery; event_type | count -------------+------- +--------------------------------------------------------------------- 6 | 1 (1 row) FETCH 1 FROM recursive_subquery; event_type | count -------------+------- +--------------------------------------------------------------------- 5 | 3 (1 row) FETCH 1 FROM recursive_subquery; event_type | count -------------+------- +--------------------------------------------------------------------- 4 | 6 (1 row) FETCH 1 FROM recursive_subquery; event_type | count -------------+------- +--------------------------------------------------------------------- (0 rows) COMMIT; @@ -399,7 +399,7 @@ DEBUG: generating subplan 33_1 for subquery SELECT user_id FROM public.users_ta DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 FETCH ALL FROM recursive_subquery; event_type | count -------------+------- +--------------------------------------------------------------------- 6 | 1 5 | 3 4 | 6 @@ -407,7 +407,7 @@ DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT event_type, co FETCH ALL FROM recursive_subquery; event_type | count -------------+------- +--------------------------------------------------------------------- (0 rows) COMMIT; diff --git a/src/test/regress/expected/subquery_executors.out b/src/test/regress/expected/subquery_executors.out index 43ab91eb6..1b7fa7fae 100644 --- a/src/test/regress/expected/subquery_executors.out +++ b/src/test/regress/expected/subquery_executors.out @@ -19,7 +19,7 @@ WHERE foo.value_2 = bar.user_id; DEBUG: generating subplan 2_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 15) OFFSET 0 DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('2_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -38,7 +38,7 @@ WHERE foo.counter = bar.user_id; DEBUG: generating subplan 4_1 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) 15)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.counter OPERATOR(pg_catalog.=) bar.user_id) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -56,7 +56,7 @@ WHERE foo.value_2 = bar.user_id; DEBUG: generating subplan 6_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.<>) 15) OFFSET 0 DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) count -------- +--------------------------------------------------------------------- 1612 (1 row) @@ -75,7 +75,7 @@ WHERE foo.value_2 = bar.user_id; DEBUG: generating subplan 8_1 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) count -------- +--------------------------------------------------------------------- 58 (1 row) @@ -102,7 +102,7 @@ DEBUG: generating subplan 10_3 for subquery SELECT DISTINCT users_table.value_2 DEBUG: generating subplan 10_4 for subquery SELECT user_id FROM subquery_executor.users_table_local WHERE (user_id OPERATOR(pg_catalog.=) 2) DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('10_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz, (SELECT intermediate_result.user_id FROM read_intermediate_result('10_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) baw WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (baz.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (bar.user_id OPERATOR(pg_catalog.=) baw.user_id)) count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -122,7 +122,7 @@ DEBUG: generating subplan 14_1 for subquery SELECT value_2 FROM public.users_ta DEBUG: generating subplan 14_2 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 2) OFFSET 0 DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) count -------- +--------------------------------------------------------------------- 18 (1 row) @@ -140,7 +140,7 @@ WHERE foo.value_2 = bar.user_id; DEBUG: generating subplan 17_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) OFFSET 0 DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.<>) 2)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) count -------- +--------------------------------------------------------------------- 103 (1 row) diff --git a/src/test/regress/expected/subquery_in_where.out b/src/test/regress/expected/subquery_in_where.out index 8a66110a0..d2ceae235 100644 --- a/src/test/regress/expected/subquery_in_where.out +++ b/src/test/regress/expected/subquery_in_where.out @@ -18,7 +18,7 @@ DEBUG: generating subplan 1_1 for CTE event_id: SELECT user_id AS events_user_i DEBUG: generating subplan 1_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('1_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -47,7 +47,7 @@ DEBUG: generating subplan 6_1 for subquery SELECT 1 AS id, 2 AS value_1, 3 AS v DEBUG: generating subplan 6_2 for subquery SELECT user_id FROM public.events_table DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT id, value_1, value_3 FROM (SELECT intermediate_result.id, intermediate_result.value_1, intermediate_result.value_3 FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value_1 integer, value_3 integer)) tt1 WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('6_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) id | value_1 | value_3 -----+---------+--------- +--------------------------------------------------------------------- 1 | 2 | 3 2 | 3 | 4 (2 rows) @@ -80,7 +80,7 @@ DEBUG: generating subplan 8_4 for CTE event_id: SELECT user_id AS events_user_i DEBUG: generating subplan 8_5 for subquery SELECT events_user_id, events_time, event_type FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('8_4'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id ORDER BY events_user_id, events_time, event_type LIMIT 10 DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('8_5'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('8_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -138,7 +138,7 @@ DEBUG: generating subplan 14_4 for subquery SELECT value_1 FROM public.users_ta DEBUG: generating subplan 14_5 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION ALL SELECT intermediate_result.value_1 FROM read_intermediate_result('14_4'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer) DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('14_5'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.events_user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer))) user_id ---------- +--------------------------------------------------------------------- 1 1 1 @@ -176,7 +176,7 @@ DEBUG: generating subplan 20_1 for subquery SELECT user_id AS events_user_id, " DEBUG: generating subplan 20_2 for subquery SELECT max((abs((user_id OPERATOR(pg_catalog.*) 1)) OPERATOR(pg_catalog.+) mod(user_id, 3))) AS val_1 FROM public.users_table GROUP BY user_id DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.val_1 FROM read_intermediate_result('20_2'::text, 'binary'::citus_copy_format) intermediate_result(val_1 integer))) count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -206,7 +206,7 @@ DEBUG: generating subplan 23_1 for subquery SELECT user_id AS events_user_id, " DEBUG: generating subplan 23_2 for subquery SELECT DISTINCT user_id FROM public.users_table GROUP BY user_id DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -249,7 +249,7 @@ DEBUG: generating subplan 26_2 for subquery SELECT min(user_id) AS min FROM pub DEBUG: generating subplan 26_3 for subquery SELECT max(user_id) AS max FROM public.users_table GROUP BY user_id DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE ((events_user_id OPERATOR(pg_catalog.>=) ANY (SELECT intermediate_result.min FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.max FROM read_intermediate_result('26_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)))) count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -292,7 +292,7 @@ DEBUG: generating subplan 30_2 for subquery SELECT min(user_id) AS min FROM pub DEBUG: generating subplan 30_3 for subquery SELECT max(value_2) AS max FROM public.users_table GROUP BY user_id DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE ((events_user_id OPERATOR(pg_catalog.>=) ANY (SELECT intermediate_result.min FROM read_intermediate_result('30_2'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.max FROM read_intermediate_result('30_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)))) count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -331,7 +331,7 @@ DEBUG: generating subplan 35_2 for subquery SELECT value_2 FROM public.events_t DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('35_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('35_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -367,7 +367,7 @@ DEBUG: generating subplan 38_2 for subquery SELECT value_2 FROM public.events_t DEBUG: generating subplan 38_3 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table_1 WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('38_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table_2 count -------- +--------------------------------------------------------------------- 10 (1 row) @@ -423,7 +423,7 @@ DEBUG: generating subplan 42_4 for subquery SELECT t1.user_id, t2.user_id_2 FRO DEBUG: generating subplan 42_5 for subquery SELECT min(user_id) AS min FROM public.events_table GROUP BY user_id DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('42_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (user_id OPERATOR(pg_catalog.>) ANY (SELECT intermediate_result.min FROM read_intermediate_result('42_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) sum ------ +--------------------------------------------------------------------- 18 (1 row) @@ -476,7 +476,7 @@ DEBUG: generating subplan 48_4 for subquery SELECT t1.user_id, t2.user_id_2 FRO DEBUG: generating subplan 48_5 for subquery SELECT 1, 2 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.=) user_id) DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('48_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('48_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer))) sum ------ +--------------------------------------------------------------------- 67 (1 row) @@ -530,7 +530,7 @@ DEBUG: generating subplan 54_4 for subquery SELECT t1.user_id, t2.user_id_2 FRO DEBUG: generating subplan 54_5 for subquery SELECT 1, 2 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.=) (user_id OPERATOR(pg_catalog.+) 6)) DEBUG: Plan 54 query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('54_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (NOT (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('54_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)))) sum ------ +--------------------------------------------------------------------- 67 (1 row) @@ -558,7 +558,7 @@ DEBUG: push down of limit count: 10 DEBUG: generating subplan 60_2 for subquery SELECT user_id, value_1 FROM public.users_table ORDER BY user_id, value_1 LIMIT 10 DEBUG: Plan 60 query after replacing subqueries and CTEs: SELECT user_id, value_1 FROM (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('60_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) t3 WHERE ((user_id, value_1) OPERATOR(pg_catalog.=) (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('60_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer))) user_id | value_1 ----------+--------- +--------------------------------------------------------------------- (0 rows) -- Recursively plan subquery in WHERE clause when the FROM clause has a subquery @@ -584,7 +584,7 @@ ORDER BY DEBUG: generating subplan 63_1 for subquery SELECT value_2 FROM public.events_table DEBUG: Plan 63 query after replacing subqueries and CTEs: SELECT generate_series FROM (SELECT generate_series.generate_series FROM generate_series(1, 10) generate_series(generate_series)) gst WHERE (generate_series OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('63_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY generate_series generate_series ------------------ +--------------------------------------------------------------------- 1 2 3 @@ -622,7 +622,7 @@ ORDER BY DEBUG: generating subplan 65_1 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT generate_series.generate_series FROM generate_series(1, 3) generate_series(generate_series))) DEBUG: Plan 65 query after replacing subqueries and CTEs: SELECT generate_series FROM (SELECT generate_series.generate_series FROM generate_series(1, 10) generate_series(generate_series)) gst WHERE (generate_series OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('65_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY generate_series generate_series ------------------ +--------------------------------------------------------------------- 1 2 3 @@ -650,7 +650,7 @@ DEBUG: generating subplan 67_1 for subquery SELECT id, value_1 FROM subquery_in DEBUG: generating subplan 67_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 67 query after replacing subqueries and CTEs: SELECT id, value_1 FROM (SELECT intermediate_result.id, intermediate_result.value_1 FROM read_intermediate_result('67_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value_1 integer)) sub_table WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('67_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) id | value_1 -----+--------- +--------------------------------------------------------------------- 1 | 1 2 | 2 (2 rows) @@ -679,7 +679,7 @@ DEBUG: push down of limit count: 10 DEBUG: generating subplan 69_2 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table ORDER BY user_id LIMIT 10 DEBUG: Plan 69 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('69_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.id FROM read_intermediate_result('69_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer))) count -------- +--------------------------------------------------------------------- 10 (1 row) diff --git a/src/test/regress/expected/subquery_local_tables.out b/src/test/regress/expected/subquery_local_tables.out index 56c0b6748..bbdbbf8dd 100644 --- a/src/test/regress/expected/subquery_local_tables.out +++ b/src/test/regress/expected/subquery_local_tables.out @@ -36,7 +36,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 3_2 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY foo.user_id DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -70,7 +70,7 @@ FROM DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT users_table_local.user_id FROM subquery_local_tables.users_table_local, subquery_local_tables.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table_local.user_id DESC LIMIT 5 DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY foo.user_id DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -88,7 +88,7 @@ DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT value_2 FROM subquer DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY user_id LIMIT 5 DEBUG: push down of limit count: 5 user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -120,7 +120,7 @@ DEBUG: generating subplan 7_1 for subquery SELECT user_id, event_type FROM subq DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT users_table.user_id FROM public.users_table, (SELECT bar.event_type, bar.user_id FROM (SELECT foo.event_type, users_table_1.user_id FROM public.users_table users_table_1, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) users_table_1.user_id)) bar) baz WHERE (baz.user_id OPERATOR(pg_catalog.=) users_table.user_id)) sub1 ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -159,7 +159,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 8_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT user_id, array_length(events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q ORDER BY (array_length(events_table, 1)) DESC, user_id user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 5 | 364 (1 row) @@ -236,7 +236,7 @@ ORDER BY 1; DEBUG: generating subplan 10_1 for subquery SELECT user_id, count(*) AS count_pay FROM subquery_local_tables.users_table_local WHERE ((user_id OPERATOR(pg_catalog.>=) 1) AND (user_id OPERATOR(pg_catalog.<=) 3) AND (value_1 OPERATOR(pg_catalog.>) 3) AND (value_1 OPERATOR(pg_catalog.<) 5)) GROUP BY user_id HAVING (count(*) OPERATOR(pg_catalog.>) 1) LIMIT 10 DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT subquery_top.user_id FROM (SELECT subquery_1.user_id, subquery_2.count_pay FROM ((SELECT users_table_1.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<) 3)) UNION SELECT users_table_1.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 2) AND (events_table.event_type OPERATOR(pg_catalog.<) 4))) subquery_1 LEFT JOIN (SELECT intermediate_result.user_id, intermediate_result.count_pay FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, count_pay bigint)) subquery_2 ON ((subquery_1.user_id OPERATOR(pg_catalog.=) subquery_2.user_id))) GROUP BY subquery_1.user_id, subquery_2.count_pay) subquery_top GROUP BY subquery_top.count_pay, subquery_top.user_id)) GROUP BY user_id HAVING ((count(*) OPERATOR(pg_catalog.>) 1) AND (sum(value_2) OPERATOR(pg_catalog.>) 29)) ORDER BY user_id user_id ---------- +--------------------------------------------------------------------- 2 3 (2 rows) diff --git a/src/test/regress/expected/subquery_partitioning.out b/src/test/regress/expected/subquery_partitioning.out index 856646b0c..dd8dbbe0f 100644 --- a/src/test/regress/expected/subquery_partitioning.out +++ b/src/test/regress/expected/subquery_partitioning.out @@ -20,7 +20,7 @@ SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -40,7 +40,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT id FROM subquery_and_partitioning.partitioning_test LIMIT 5 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT id FROM (SELECT intermediate_result.id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) foo ORDER BY id DESC id ----- +--------------------------------------------------------------------- 4 3 2 @@ -71,7 +71,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 5_2 for subquery SELECT DISTINCT "time" FROM subquery_and_partitioning.partitioning_test LIMIT 5 DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT foo.id, bar."time" FROM (SELECT intermediate_result.id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) foo, (SELECT intermediate_result."time" FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) bar WHERE ((foo.id)::double precision OPERATOR(pg_catalog.=) date_part('day'::text, bar."time")) ORDER BY bar."time" DESC, foo.id id | time -----+------------ +--------------------------------------------------------------------- 3 | 03-03-2010 (1 row) @@ -100,7 +100,7 @@ DEBUG: generating subplan 8_1 for subquery SELECT DISTINCT "time" FROM subquery DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT foo."time", bar.id FROM (SELECT intermediate_result."time" FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) foo, (SELECT DISTINCT partitioning_test.id FROM subquery_and_partitioning.partitioning_test) bar WHERE (date_part('day'::text, foo."time") OPERATOR(pg_catalog.=) (bar.id)::double precision) ORDER BY bar.id DESC, foo."time" DESC LIMIT 3 DEBUG: push down of limit count: 3 time | id -------------+---- +--------------------------------------------------------------------- 03-03-2010 | 3 (1 row) @@ -130,7 +130,7 @@ DEBUG: generating subplan 10_1 for subquery SELECT DISTINCT "time" FROM subquer DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT foo."time", bar.id, partitioning_test.id, partitioning_test.value_1, partitioning_test."time" FROM (SELECT intermediate_result."time" FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) foo, (SELECT DISTINCT partitioning_test_1.id FROM subquery_and_partitioning.partitioning_test partitioning_test_1) bar, subquery_and_partitioning.partitioning_test WHERE ((date_part('day'::text, foo."time") OPERATOR(pg_catalog.=) (bar.id)::double precision) AND (partitioning_test.id OPERATOR(pg_catalog.=) bar.id)) ORDER BY bar.id DESC, foo."time" DESC LIMIT 3 DEBUG: push down of limit count: 3 time | id | id | value_1 | time -------------+----+----+---------+------------ +--------------------------------------------------------------------- 03-03-2010 | 3 | 3 | 3 | 11-22-2017 (1 row) @@ -142,7 +142,7 @@ WHERE DEBUG: generating subplan 12_1 for subquery SELECT DISTINCT date_part('day'::text, "time") AS date_part FROM subquery_and_partitioning.partitioning_test DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT DISTINCT id FROM subquery_and_partitioning.partitioning_test WHERE ((id)::double precision OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.date_part FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(date_part double precision))) id ----- +--------------------------------------------------------------------- 3 (1 row) @@ -161,7 +161,7 @@ WHERE foo.value_1 = bar.user_id; DEBUG: generating subplan 14_1 for subquery SELECT DISTINCT p1.value_1 FROM subquery_and_partitioning.partitioning_test p1, subquery_and_partitioning.partitioning_test p2 WHERE (p1.id OPERATOR(pg_catalog.=) p2.value_1) DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_1 OPERATOR(pg_catalog.=) bar.user_id) count -------- +--------------------------------------------------------------------- 47 (1 row) @@ -220,7 +220,7 @@ DEBUG: generating subplan 16_3 for subquery SELECT count(*) AS cnt FROM (SELECT DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT cnt, user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('16_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, public.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2)) subquery_and_ctes ORDER BY "time" DESC, cnt DESC, user_id DESC, value_1 DESC LIMIT 5 DEBUG: push down of limit count: 5 cnt | user_id | time | value_1 | value_2 | value_3 | value_4 ------+---------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 105 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | 105 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | 105 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | @@ -276,7 +276,7 @@ DEBUG: generating subplan 23_5 for subquery SELECT min(partitioning_test.value_ DEBUG: generating subplan 23_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('23_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('23_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar count -------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/subquery_prepared_statements.out b/src/test/regress/expected/subquery_prepared_statements.out index 3e8ae81cb..4a33969a8 100644 --- a/src/test/regress/expected/subquery_prepared_statements.out +++ b/src/test/regress/expected/subquery_prepared_statements.out @@ -4,7 +4,7 @@ CREATE SCHEMA subquery_prepared_statements; SELECT run_command_on_workers('CREATE SCHEMA subquery_prepared_statements;'); run_command_on_workers -------------------------------------- +--------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") (2 rows) @@ -61,7 +61,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 1_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('1_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -71,7 +71,7 @@ DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT DISTINCT values EXECUTE subquery_prepare_without_param; values_of_subquery --------------------- +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -81,7 +81,7 @@ EXECUTE subquery_prepare_without_param; EXECUTE subquery_prepare_without_param; values_of_subquery --------------------- +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -91,7 +91,7 @@ EXECUTE subquery_prepare_without_param; EXECUTE subquery_prepare_without_param; values_of_subquery --------------------- +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -101,7 +101,7 @@ EXECUTE subquery_prepare_without_param; EXECUTE subquery_prepare_without_param; values_of_subquery --------------------- +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -111,7 +111,7 @@ EXECUTE subquery_prepare_without_param; EXECUTE subquery_prepare_without_param; values_of_subquery --------------------- +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -121,7 +121,7 @@ EXECUTE subquery_prepare_without_param; EXECUTE subquery_prepare_without_param; values_of_subquery --------------------- +--------------------------------------------------------------------- (6,4) (6,3) (6,2) @@ -134,7 +134,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('3_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -147,7 +147,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('5_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -160,7 +160,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 7_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('7_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -173,7 +173,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 9_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('9_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -186,7 +186,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 11_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('11_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -199,7 +199,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 14_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('14_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (2,4) (2,3) (2,2) @@ -212,7 +212,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 16_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('16_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (6,1) (5,1) (4,1) @@ -225,7 +225,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 18_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('18_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (6,1) (5,1) (4,1) @@ -238,7 +238,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 20_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('20_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (6,1) (5,1) (4,1) @@ -251,7 +251,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 22_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('22_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (6,1) (5,1) (4,1) @@ -264,7 +264,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 24_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('24_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (6,1) (5,1) (4,1) @@ -277,7 +277,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 27_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('27_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC values_of_subquery --------------------- +--------------------------------------------------------------------- (6,1) (5,1) (4,1) diff --git a/src/test/regress/expected/subquery_view.out b/src/test/regress/expected/subquery_view.out index 7724d8712..7a698b51a 100644 --- a/src/test/regress/expected/subquery_view.out +++ b/src/test/regress/expected/subquery_view.out @@ -23,7 +23,7 @@ ORDER BY 1 DESC LIMIT 5; DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 DESC DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) view_without_subquery ORDER BY value_1 DESC LIMIT 5 value_1 ---------- +--------------------------------------------------------------------- 5 4 3 @@ -50,7 +50,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) view_without_subquery_second ORDER BY user_id user_id ---------- +--------------------------------------------------------------------- 2 3 4 @@ -78,7 +78,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 7_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY foo.user_id DESC) subquery_limit ORDER BY user_id DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -105,7 +105,7 @@ SELECT * FROM subquery_non_p_key_group_by ORDER BY 1 DESC; DEBUG: generating subplan 9_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT foo.value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo ORDER BY foo.value_1 DESC) subquery_non_p_key_group_by ORDER BY value_1 DESC value_1 ---------- +--------------------------------------------------------------------- 5 4 3 @@ -145,7 +145,7 @@ DEBUG: generating subplan 11_1 for subquery SELECT users_table.value_2 FROM pub DEBUG: generating subplan 11_2 for subquery SELECT users_table.value_3 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) GROUP BY users_table.value_3 ORDER BY users_table.value_3 DESC DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT value_2, value_3 FROM (SELECT foo.value_2, bar.value_3 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.value_3 FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(value_3 double precision)) bar WHERE ((foo.value_2)::double precision OPERATOR(pg_catalog.=) bar.value_3) ORDER BY bar.value_3 DESC, foo.value_2) final_query_router ORDER BY value_2 value_2 | value_3 ----------+--------- +--------------------------------------------------------------------- 0 | 0 1 | 1 2 | 2 @@ -194,7 +194,7 @@ DEBUG: generating subplan 14_2 for subquery SELECT foo.value_2, bar.user_id FRO DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT ON (users_table.value_2) users_table.value_2, users_table."time", users_table.value_3 FROM (SELECT intermediate_result.value_2, intermediate_result.user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, user_id integer)) final_query_realtime, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) final_query_realtime.user_id) ORDER BY users_table.value_2 DESC, users_table."time" DESC, users_table.value_3 DESC LIMIT 3 DEBUG: push down of limit count: 3 value_2 | time | value_3 ----------+---------------------------------+--------- +--------------------------------------------------------------------- 5 | Thu Nov 23 16:28:38.455322 2017 | 4 4 | Thu Nov 23 10:22:39.468816 2017 | 3 3 | Thu Nov 23 15:55:08.493462 2017 | 3 @@ -214,7 +214,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 17_1 for subquery SELECT DISTINCT value_2 FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) subquery_in_where ORDER BY user_id DESC user_id ---------- +--------------------------------------------------------------------- 4 3 2 @@ -257,7 +257,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 19_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT user_id, array_length FROM (SELECT q.user_id, array_length(q.events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q) subquery_from_from_where ORDER BY array_length DESC, user_id user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 5 | 364 (1 row) @@ -291,7 +291,7 @@ DEBUG: generating subplan 21_1 for subquery SELECT user_id FROM public.users_ta DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT user_id, array_length FROM (SELECT q.user_id, array_length(q.events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q) subquery_from_from_where ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 user_id | array_length ----------+-------------- +--------------------------------------------------------------------- 5 | 364 (1 row) @@ -315,7 +315,7 @@ DEBUG: generating subplan 23_1 for subquery SELECT DISTINCT users_table.value_2 DEBUG: generating subplan 23_2 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) repartition_view count -------- +--------------------------------------------------------------------- 58 (1 row) @@ -347,7 +347,7 @@ DEBUG: generating subplan 26_4 for subquery SELECT user_id FROM subquery_view.u DEBUG: generating subplan 26_5 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('26_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz, (SELECT intermediate_result.user_id FROM read_intermediate_result('26_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) baw WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (baz.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (bar.user_id OPERATOR(pg_catalog.=) baw.user_id)) DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('26_5'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) all_executors_view count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -406,7 +406,7 @@ DEBUG: generating subplan 31_3 for subquery SELECT count(*) AS cnt FROM (SELECT DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT cnt, user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('31_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, public.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2)) subquery_and_ctes ORDER BY "time" DESC, cnt DESC, user_id DESC, value_1 DESC LIMIT 5 DEBUG: push down of limit count: 5 cnt | user_id | time | value_1 | value_2 | value_3 | value_4 ------+---------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 432 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | 432 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | 432 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | @@ -450,7 +450,7 @@ DEBUG: generating subplan 38_3 for subquery SELECT DISTINCT cte.user_id FROM pu DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT "time", event_type, value_2, value_3 FROM (SELECT events_table."time", events_table.event_type, events_table.value_2, events_table.value_3 FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('38_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.events_table WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2)) subquery_and_ctes_second ORDER BY value_2 DESC, event_type DESC, "time" DESC LIMIT 5 DEBUG: push down of limit count: 5 time | event_type | value_2 | value_3 ----------------------------------+------------+---------+--------- +--------------------------------------------------------------------- Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 Wed Nov 22 21:24:22.849224 2017 | 5 | 4 | 1 Wed Nov 22 21:05:25.194441 2017 | 5 | 4 | 1 @@ -511,7 +511,7 @@ DEBUG: generating subplan 43_6 for subquery SELECT avg(level_6.min) AS avg FROM DEBUG: generating subplan 43_7 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('43_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('43_7'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) deep_subquery count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -549,7 +549,7 @@ DEBUG: push down of limit count: 9 DEBUG: generating subplan 51_2 for subquery SELECT result_of_view_is_also_recursively_planned.user_id, events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('51_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY foo.user_id DESC) result_of_view_is_also_recursively_planned, public.events_table WHERE (events_table.value_2 OPERATOR(pg_catalog.=) result_of_view_is_also_recursively_planned.user_id) ORDER BY events_table."time" DESC OFFSET 4 LIMIT 5 DEBUG: Plan 51 query after replacing subqueries and CTEs: SELECT user_id, user_id_1 AS user_id, "time", event_type, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_1 AS user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('51_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_1 integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) foo(user_id, user_id_1, "time", event_type, value_2, value_3, value_4) ORDER BY "time" DESC LIMIT 5 user_id | user_id | time | event_type | value_2 | value_3 | value_4 ----------+---------+---------------------------------+------------+---------+---------+--------- +--------------------------------------------------------------------- 2 | 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | 2 | 5 | Thu Nov 23 16:11:02.929469 2017 | 4 | 2 | 0 | 4 | 5 | Thu Nov 23 14:40:40.467511 2017 | 1 | 4 | 1 | diff --git a/src/test/regress/expected/task_tracker_assign_task.out b/src/test/regress/expected/task_tracker_assign_task.out index d9dff929b..dcf336ed2 100644 --- a/src/test/regress/expected/task_tracker_assign_task.out +++ b/src/test/regress/expected/task_tracker_assign_task.out @@ -15,13 +15,13 @@ SELECT task_tracker_assign_task(:JobId, :SimpleTaskId, 'COPY (SELECT * FROM lineitem) TO ' '''base/pgsql_job_cache/job_401010/task_101101'''); task_tracker_assign_task --------------------------- +--------------------------------------------------------------------- (1 row) SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :BadQueryString); task_tracker_assign_task --------------------------- +--------------------------------------------------------------------- (1 row) @@ -30,40 +30,40 @@ SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :BadQueryString); -- not enough, the regression tests may fail on an overloaded box. SELECT pg_sleep(3.0); pg_sleep ----------- +--------------------------------------------------------------------- (1 row) SELECT task_tracker_task_status(:JobId, :SimpleTaskId); task_tracker_task_status --------------------------- +--------------------------------------------------------------------- 6 (1 row) SELECT task_tracker_task_status(:JobId, :RecoverableTaskId); task_tracker_task_status --------------------------- +--------------------------------------------------------------------- 5 (1 row) COPY :SimpleTaskTable FROM 'base/pgsql_job_cache/job_401010/task_101101'; SELECT COUNT(*) FROM :SimpleTaskTable; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :SelectAll FROM :SimpleTaskTable EXCEPT ALL :SelectAll FROM lineitem ) diff; diff_lhs ----------- +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) As diff_rhs FROM ( :SelectAll FROM lineitem EXCEPT ALL :SelectAll FROM :SimpleTaskTable ) diff; diff_rhs ----------- +--------------------------------------------------------------------- 0 (1 row) @@ -71,19 +71,19 @@ SELECT COUNT(*) As diff_rhs FROM ( :SelectAll FROM lineitem EXCEPT ALL -- the task's query string, and reschedules the updated task for execution. SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :GoodQueryString); task_tracker_assign_task --------------------------- +--------------------------------------------------------------------- (1 row) SELECT pg_sleep(2.0); pg_sleep ----------- +--------------------------------------------------------------------- (1 row) SELECT task_tracker_task_status(:JobId, :RecoverableTaskId); task_tracker_task_status --------------------------- +--------------------------------------------------------------------- 6 (1 row) diff --git a/src/test/regress/expected/task_tracker_cleanup_job.out b/src/test/regress/expected/task_tracker_cleanup_job.out index 664084a77..112412a2f 100644 --- a/src/test/regress/expected/task_tracker_cleanup_job.out +++ b/src/test/regress/expected/task_tracker_cleanup_job.out @@ -8,25 +8,25 @@ SET citus.next_shard_id TO 1060000; -- Test worker_cleanup_job_schema_cache SELECT * FROM task_tracker_assign_task(2, 2, ''); task_tracker_assign_task --------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = 'pg_merge_job_0002'; count -------- +--------------------------------------------------------------------- 1 (1 row) SELECT worker_cleanup_job_schema_cache(); worker_cleanup_job_schema_cache ---------------------------------- +--------------------------------------------------------------------- (1 row) SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = 'pg_merge_job_0002'; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -36,44 +36,44 @@ SELECT task_tracker_assign_task(:JobId, :CompletedTaskId, 'COPY (SELECT * FROM lineitem) TO ' '''base/pgsql_job_cache/job_401010/task_801107'''); task_tracker_assign_task --------------------------- +--------------------------------------------------------------------- (1 row) SELECT task_tracker_assign_task(:JobId, :RunningTaskId, 'SELECT pg_sleep(100)'); task_tracker_assign_task --------------------------- +--------------------------------------------------------------------- (1 row) SELECT pg_sleep(2.0); pg_sleep ----------- +--------------------------------------------------------------------- (1 row) SELECT task_tracker_task_status(:JobId, :CompletedTaskId); task_tracker_task_status --------------------------- +--------------------------------------------------------------------- 6 (1 row) SELECT task_tracker_task_status(:JobId, :RunningTaskId); task_tracker_task_status --------------------------- +--------------------------------------------------------------------- 3 (1 row) SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010/task_801107'); isdir -------- +--------------------------------------------------------------------- f (1 row) SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010'); isdir -------- +--------------------------------------------------------------------- t (1 row) @@ -81,13 +81,13 @@ SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010'); -- files, and connections associated with these tasks should all be cleaned up. SELECT task_tracker_cleanup_job(:JobId); task_tracker_cleanup_job --------------------------- +--------------------------------------------------------------------- (1 row) SELECT pg_sleep(1.0); pg_sleep ----------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/task_tracker_partition_task.out b/src/test/regress/expected/task_tracker_partition_task.out index 6d30e3d48..6dbf0f740 100644 --- a/src/test/regress/expected/task_tracker_partition_task.out +++ b/src/test/regress/expected/task_tracker_partition_task.out @@ -23,19 +23,19 @@ SELECT task_tracker_assign_task(:JobId, :PartitionTaskId, '401010, 801106, ''SELECT * FROM lineitem'', ' '''l_orderkey'', 20, ARRAY[1000, 3000]::_int8)'); task_tracker_assign_task --------------------------- +--------------------------------------------------------------------- (1 row) SELECT pg_sleep(4.0); pg_sleep ----------- +--------------------------------------------------------------------- (1 row) SELECT task_tracker_task_status(:JobId, :PartitionTaskId); task_tracker_task_status --------------------------- +--------------------------------------------------------------------- 6 (1 row) @@ -44,13 +44,13 @@ COPY :TablePart01 FROM :'Table_File_01'; COPY :TablePart02 FROM :'Table_File_02'; SELECT COUNT(*) FROM :TablePart00; count -------- +--------------------------------------------------------------------- 1004 (1 row) SELECT COUNT(*) FROM :TablePart02; count -------- +--------------------------------------------------------------------- 8970 (1 row) @@ -60,7 +60,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :SelectAll FROM :TablePart00 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn < 1000 ) diff; diff_lhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -69,7 +69,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn >= 1000 AND :PartitionColumn < 3000 ) diff; diff_lhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -77,7 +77,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :SelectAll FROM :TablePart02 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn >= 3000 ) diff; diff_lhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -85,7 +85,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn < 1000 EXCEPT ALL :SelectAll FROM :TablePart00 ) diff; diff_rhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -94,7 +94,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :PartitionColumn < 3000 EXCEPT ALL :SelectAll FROM :TablePart01 ) diff; diff_rhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -102,7 +102,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn >= 3000 EXCEPT ALL :SelectAll FROM :TablePart02 ) diff; diff_rhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/upgrade_basic_after.out b/src/test/regress/expected/upgrade_basic_after.out index 70c1c3137..e4c53db00 100644 --- a/src/test/regress/expected/upgrade_basic_after.out +++ b/src/test/regress/expected/upgrade_basic_after.out @@ -2,7 +2,7 @@ SET search_path TO upgrade_basic, public, pg_catalog; BEGIN; SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' ORDER BY tablename; schemaname | tablename | indexname | tablespace | indexdef ----------------+-----------+-----------+------------+----------------------------------------------------------------- +--------------------------------------------------------------------- upgrade_basic | r | r_pkey | | CREATE UNIQUE INDEX r_pkey ON upgrade_basic.r USING btree (a) upgrade_basic | t | t_a_idx | | CREATE INDEX t_a_idx ON upgrade_basic.t USING hash (a) upgrade_basic | tp | tp_pkey | | CREATE UNIQUE INDEX tp_pkey ON upgrade_basic.tp USING btree (a) @@ -10,31 +10,31 @@ SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' ORDER BY tablename; SELECT nextval('pg_dist_shardid_seq') = MAX(shardid)+1 FROM pg_dist_shard; ?column? ----------- +--------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_placement_placementid_seq') = MAX(placementid)+1 FROM pg_dist_placement; ?column? ----------- +--------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node; ?column? ----------- +--------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node; ?column? ----------- +--------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation; ?column? ----------- +--------------------------------------------------------------------- t (1 row) @@ -51,7 +51,7 @@ SELECT sequence_name FROM information_schema.sequences 'pg_dist_colocationid_seq' ); sequence_name ---------------- +--------------------------------------------------------------------- (0 rows) SELECT logicalrelid FROM pg_dist_partition @@ -62,7 +62,7 @@ SELECT logicalrelid FROM pg_dist_partition AND relnamespace='upgrade_basic'::regnamespace ORDER BY logicalrelid; logicalrelid --------------- +--------------------------------------------------------------------- t tp t_ab @@ -80,7 +80,7 @@ SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4 AND tgname LIKE 'truncate_trigger_%' ORDER BY tgrelid::regclass; tgrelid | tgfoid | tgisinternal | tgenabled | tgtype -----------+------------------------+--------------+-----------+---------- +--------------------------------------------------------------------- t | citus_truncate_trigger | t | O | 00100000 tp | citus_truncate_trigger | t | O | 00100000 t_ab | citus_truncate_trigger | t | O | 00100000 @@ -91,7 +91,7 @@ SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4 SELECT * FROM t ORDER BY a; a ---- +--------------------------------------------------------------------- 1 2 3 @@ -101,14 +101,14 @@ SELECT * FROM t ORDER BY a; SELECT * FROM t WHERE a = 1; a ---- +--------------------------------------------------------------------- 1 (1 row) INSERT INTO t SELECT * FROM generate_series(10, 15); EXPLAIN (COSTS FALSE) SELECT * from t; QUERY PLAN ---------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 32 Tasks Shown: One of 32 @@ -119,7 +119,7 @@ EXPLAIN (COSTS FALSE) SELECT * from t; EXPLAIN (COSTS FALSE) SELECT * from t WHERE a = 1; QUERY PLAN ---------------------------------------------------------- +--------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All @@ -133,13 +133,13 @@ EXPLAIN (COSTS FALSE) SELECT * from t WHERE a = 1; SELECT * FROM t WHERE a = 10; a ----- +--------------------------------------------------------------------- 10 (1 row) SELECT * FROM t WHERE a = 11; a ----- +--------------------------------------------------------------------- 11 (1 row) @@ -147,7 +147,7 @@ COPY t FROM PROGRAM 'echo 20 && echo 21 && echo 22 && echo 23 && echo 24' WITH C ALTER TABLE t ADD COLUMN b int DEFAULT 10; SELECT * FROM t ORDER BY a; a | b -----+---- +--------------------------------------------------------------------- 1 | 10 2 | 10 3 | 10 @@ -169,7 +169,7 @@ SELECT * FROM t ORDER BY a; TRUNCATE TABLE t; SELECT * FROM T; a | b ----+--- +--------------------------------------------------------------------- (0 rows) DROP TABLE t; @@ -177,7 +177,7 @@ DROP TABLE t; -- verify that the table whose column is dropped before a pg_upgrade still works as expected. SELECT * FROM t_ab ORDER BY b; b ----- +--------------------------------------------------------------------- 11 22 33 @@ -185,13 +185,13 @@ SELECT * FROM t_ab ORDER BY b; SELECT * FROM t_ab WHERE b = 11; b ----- +--------------------------------------------------------------------- 11 (1 row) SELECT * FROM t_ab WHERE b = 22; b ----- +--------------------------------------------------------------------- 22 (1 row) @@ -199,7 +199,7 @@ SELECT * FROM t_ab WHERE b = 22; -- before the upgrade SELECT * FROM t2 ORDER BY a; a | b ----+---- +--------------------------------------------------------------------- 1 | 11 2 | 22 3 | 33 @@ -208,13 +208,13 @@ SELECT * FROM t2 ORDER BY a; SELECT create_distributed_table('t2', 'a'); NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT * FROM t2 ORDER BY a; a | b ----+---- +--------------------------------------------------------------------- 1 | 11 2 | 22 3 | 33 @@ -225,7 +225,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT * FROM r ORDER BY a; a ---- +--------------------------------------------------------------------- 1 2 3 @@ -235,7 +235,7 @@ SELECT * FROM r ORDER BY a; SELECT * FROM tr ORDER BY pk; pk | a -----+--- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -246,7 +246,7 @@ SELECT * FROM tr ORDER BY pk; DELETE FROM r where a = 1; SELECT * FROM r ORDER BY a; a ---- +--------------------------------------------------------------------- 2 3 4 @@ -255,7 +255,7 @@ SELECT * FROM r ORDER BY a; SELECT * FROM tr ORDER BY pk; pk | a -----+--- +--------------------------------------------------------------------- 2 | 2 3 | 3 4 | 4 @@ -265,7 +265,7 @@ SELECT * FROM tr ORDER BY pk; UPDATE r SET a = 30 WHERE a = 3; SELECT * FROM r ORDER BY a; a ----- +--------------------------------------------------------------------- 2 4 5 @@ -274,7 +274,7 @@ SELECT * FROM r ORDER BY a; SELECT * FROM tr ORDER BY pk; pk | a -----+---- +--------------------------------------------------------------------- 2 | 2 3 | 30 4 | 4 @@ -285,7 +285,7 @@ SELECT * FROM tr ORDER BY pk; CREATE TABLE t3(a int, b int); SELECT create_distributed_table('t3', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -294,7 +294,7 @@ INSERT INTO t3 VALUES (2, 22); INSERT INTO t3 VALUES (3, 33); SELECT * FROM t3 ORDER BY a; a | b ----+---- +--------------------------------------------------------------------- 1 | 11 2 | 22 3 | 33 @@ -304,14 +304,14 @@ SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 't_append'::regclass ORDER BY shardminvalue, shardmaxvalue; shardminvalue | shardmaxvalue ----------------+--------------- +--------------------------------------------------------------------- 1 | 3 5 | 7 (2 rows) SELECT * FROM t_append ORDER BY id; id | value_1 -----+--------- +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 @@ -325,7 +325,7 @@ SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 't_append'::regclass ORDER BY shardminvalue, shardmaxvalue; shardminvalue | shardmaxvalue ----------------+--------------- +--------------------------------------------------------------------- 1 | 3 5 | 7 9 | 11 @@ -333,7 +333,7 @@ SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard SELECT * FROM t_append ORDER BY id; id | value_1 -----+--------- +--------------------------------------------------------------------- 1 | 2 2 | 3 3 | 4 diff --git a/src/test/regress/expected/upgrade_basic_before.out b/src/test/regress/expected/upgrade_basic_before.out index ef63212cf..5a81db535 100644 --- a/src/test/regress/expected/upgrade_basic_before.out +++ b/src/test/regress/expected/upgrade_basic_before.out @@ -4,7 +4,7 @@ CREATE TABLE t(a int); CREATE INDEX ON t USING HASH (a); SELECT create_distributed_table('t', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -12,7 +12,7 @@ INSERT INTO t SELECT * FROM generate_series(1, 5); CREATE TABLE tp(a int PRIMARY KEY); SELECT create_distributed_table('tp', 'a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -23,7 +23,7 @@ INSERT INTO tp SELECT * FROM generate_series(1, 5); CREATE TABLE t_ab(a int, b int); SELECT create_distributed_table('t_ab', 'b'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -39,7 +39,7 @@ ALTER TABLE t_ab DROP a; CREATE TABLE r(a int PRIMARY KEY); SELECT create_reference_table('r'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -47,7 +47,7 @@ INSERT INTO r SELECT * FROM generate_series(1, 5); CREATE TABLE tr(pk int, a int REFERENCES r(a) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_distributed_table('tr', 'pk'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -55,7 +55,7 @@ INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c; CREATE TABLE t_append(id int, value_1 int); SELECT master_create_distributed_table('t_append', 'id', 'append'); master_create_distributed_table ---------------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/upgrade_distributed_function_after.out b/src/test/regress/expected/upgrade_distributed_function_after.out index 21d8456d3..b1515de0a 100644 --- a/src/test/regress/expected/upgrade_distributed_function_after.out +++ b/src/test/regress/expected/upgrade_distributed_function_after.out @@ -2,7 +2,7 @@ SET search_path TO upgrade_distributed_function_before, public; -- make sure that the metadata synced SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE isactive AND noderole = 'primary'; bool_and - ---------- +--------------------------------------------------------------------- t (1 row) @@ -12,14 +12,14 @@ SET client_min_messages TO DEBUG1; SELECT count_values(11); DEBUG: pushing down the function call count_values - --------------------------------------------------------------------- +--------------------------------------------------------------------- 1 (1 row) SELECT count_values(12); DEBUG: pushing down the function call count_values - --------------------------------------------------------------------- +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/upgrade_distributed_function_before.out b/src/test/regress/expected/upgrade_distributed_function_before.out index e82f86ba7..a5639468a 100644 --- a/src/test/regress/expected/upgrade_distributed_function_before.out +++ b/src/test/regress/expected/upgrade_distributed_function_before.out @@ -5,7 +5,7 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE t1 (a int PRIMARY KEY, b int); SELECT create_distributed_table('t1','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -22,20 +22,20 @@ $$ $$ LANGUAGE plpgsql; SELECT create_distributed_function('count_values(int)', '$1', colocate_with:='t1'); create_distributed_function ------------------------------ +--------------------------------------------------------------------- (1 row) -- make sure that the metadata synced before running the queries SELECT wait_until_metadata_sync(); wait_until_metadata_sync --------------------------- +--------------------------------------------------------------------- (1 row) SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE isactive AND noderole = 'primary'; bool_and ----------- +--------------------------------------------------------------------- t (1 row) @@ -43,14 +43,14 @@ SET client_min_messages TO DEBUG1; SELECT count_values(11); DEBUG: pushing down the function call count_values --------------- +--------------------------------------------------------------------- 1 (1 row) SELECT count_values(12); DEBUG: pushing down the function call count_values --------------- +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_after.out b/src/test/regress/expected/upgrade_rebalance_strategy_after.out index 4bfe9ed4c..51795030d 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_after.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_after.out @@ -1,6 +1,6 @@ SELECT * FROM pg_catalog.pg_dist_rebalance_strategy ORDER BY name; name | default_strategy | shard_cost_function | node_capacity_function | shard_allowed_on_node_function | default_threshold | minimum_threshold ------------------+------------------+-----------------------------------------+---------------------------------------------------+------------------------------------------+-------------------+------------------- +--------------------------------------------------------------------- by_disk_size | f | citus_shard_cost_by_disk_size | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0.1 | 0.01 by_shard_count | f | citus_shard_cost_1 | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0 | 0 custom_strategy | t | upgrade_rebalance_strategy.shard_cost_2 | upgrade_rebalance_strategy.capacity_high_worker_1 | upgrade_rebalance_strategy.only_worker_2 | 0.5 | 0.2 diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_before.out b/src/test/regress/expected/upgrade_rebalance_strategy_before.out index 327d05ec2..65808f9e6 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_before.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_before.out @@ -25,13 +25,13 @@ SELECT citus_add_rebalance_strategy( 0.2 ); citus_add_rebalance_strategy ------------------------------- +--------------------------------------------------------------------- (1 row) SELECT citus_set_default_rebalance_strategy('custom_strategy'); citus_set_default_rebalance_strategy --------------------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/upgrade_ref2ref_after.out b/src/test/regress/expected/upgrade_ref2ref_after.out index af339e44f..b00a021ef 100644 --- a/src/test/regress/expected/upgrade_ref2ref_after.out +++ b/src/test/regress/expected/upgrade_ref2ref_after.out @@ -3,7 +3,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT * FROM ref_table_1 ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -13,7 +13,7 @@ SELECT * FROM ref_table_1 ORDER BY id; SELECT * FROM ref_table_2 ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -23,7 +23,7 @@ SELECT * FROM ref_table_2 ORDER BY id; SELECT * FROM ref_table_3 ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -33,7 +33,7 @@ SELECT * FROM ref_table_3 ORDER BY id; SELECT * FROM dist_table ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -44,7 +44,7 @@ SELECT * FROM dist_table ORDER BY id; UPDATE ref_table_1 SET id = 10 where id = 1; SELECT * FROM ref_table_1 ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 2 | 2 3 | 3 4 | 4 @@ -54,7 +54,7 @@ SELECT * FROM ref_table_1 ORDER BY id; SELECT * FROM ref_table_2 ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 1 | 10 2 | 2 3 | 3 @@ -64,7 +64,7 @@ SELECT * FROM ref_table_2 ORDER BY id; SELECT * FROM ref_table_3 ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -74,7 +74,7 @@ SELECT * FROM ref_table_3 ORDER BY id; SELECT * FROM dist_table ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -85,7 +85,7 @@ SELECT * FROM dist_table ORDER BY id; DELETE FROM ref_table_1 WHERE id = 4; SELECT * FROM ref_table_1 ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 2 | 2 3 | 3 5 | 5 @@ -94,7 +94,7 @@ SELECT * FROM ref_table_1 ORDER BY id; SELECT * FROM ref_table_2 ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 1 | 10 2 | 2 3 | 3 @@ -103,7 +103,7 @@ SELECT * FROM ref_table_2 ORDER BY id; SELECT * FROM ref_table_3 ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 @@ -112,7 +112,7 @@ SELECT * FROM ref_table_3 ORDER BY id; SELECT * FROM dist_table ORDER BY id; id | value -----+------- +--------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 diff --git a/src/test/regress/expected/upgrade_ref2ref_before.out b/src/test/regress/expected/upgrade_ref2ref_before.out index 46df5f0ec..9b8faf074 100644 --- a/src/test/regress/expected/upgrade_ref2ref_before.out +++ b/src/test/regress/expected/upgrade_ref2ref_before.out @@ -3,28 +3,28 @@ SET search_path TO upgrade_ref2ref, public; CREATE TABLE ref_table_1(id int PRIMARY KEY, value int); SELECT create_reference_table('ref_table_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE ref_table_2(id int PRIMARY KEY, value int REFERENCES ref_table_1(id) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_reference_table('ref_table_2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE ref_table_3(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_reference_table('ref_table_3'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE dist_table(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_distributed_table('dist_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/upgrade_type_after.out b/src/test/regress/expected/upgrade_type_after.out index 2f353522e..6d6eb5496 100644 --- a/src/test/regress/expected/upgrade_type_after.out +++ b/src/test/regress/expected/upgrade_type_after.out @@ -5,7 +5,7 @@ SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; INSERT INTO tt VALUES (1, (2,3)::type1); SELECT * FROM tt; a | b ----+------- +--------------------------------------------------------------------- 1 | (2,3) 2 | (3,4) (2 rows) diff --git a/src/test/regress/expected/upgrade_type_before.out b/src/test/regress/expected/upgrade_type_before.out index b13bfa4b3..50bf00ddd 100644 --- a/src/test/regress/expected/upgrade_type_before.out +++ b/src/test/regress/expected/upgrade_type_before.out @@ -4,7 +4,7 @@ CREATE TYPE type1 AS (a int, b int); CREATE TABLE tt (a int PRIMARY KEY, b type1); SELECT create_distributed_table('tt','a'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/validate_constraint.out b/src/test/regress/expected/validate_constraint.out index a8e1e19dc..d52341f3f 100644 --- a/src/test/regress/expected/validate_constraint.out +++ b/src/test/regress/expected/validate_constraint.out @@ -56,21 +56,21 @@ CREATE VIEW constraint_validations AS CREATE TABLE referenced_table (id int UNIQUE, test_column int); SELECT create_reference_table('referenced_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE referencing_table (id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE constrained_table (id int, constrained_column int); SELECT create_distributed_table('constrained_table', 'constrained_column'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -112,7 +112,7 @@ SELECT * FROM constraint_validations ORDER BY 1, 2; Constraint | Validated? -------------------------+------------ +--------------------------------------------------------------------- validatable_constraint | t (1 row) @@ -120,7 +120,7 @@ SELECT * FROM constraint_validations_in_workers ORDER BY 1, 2; name | validated ---------------------------------+----------- +--------------------------------------------------------------------- validatable_constraint_8000009 | t validatable_constraint_8000010 | t validatable_constraint_8000011 | t diff --git a/src/test/regress/expected/window_functions.out b/src/test/regress/expected/window_functions.out index cef8ea196..3f838a4cc 100644 --- a/src/test/regress/expected/window_functions.out +++ b/src/test/regress/expected/window_functions.out @@ -12,7 +12,7 @@ ORDER BY 1 DESC, 2 DESC, 3 DESC LIMIT 5; user_id | count | rank ----------+-------+------ +--------------------------------------------------------------------- 6 | 10 | 1 6 | 10 | 1 6 | 10 | 1 @@ -31,7 +31,7 @@ GROUP BY ORDER BY 2 DESC NULLS LAST, 1 DESC; user_id | avg ----------+------------------ +--------------------------------------------------------------------- 2 | 3 4 | 2.82608695652174 3 | 2.70588235294118 @@ -58,7 +58,7 @@ GROUP BY ORDER BY 2 DESC, 1; user_id | max ----------+----- +--------------------------------------------------------------------- 1 | 5 3 | 5 3 | 5 @@ -111,7 +111,7 @@ ORDER BY 2 LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 13 1 | 13 1 | 13 @@ -135,7 +135,7 @@ ORDER BY 3 DESC, 2 DESC, 1 DESC LIMIT 5; user_id | value_1 | sum ----------+---------+----- +--------------------------------------------------------------------- 5 | 5 | 15 4 | 5 | 15 3 | 5 | 15 @@ -160,7 +160,7 @@ ORDER BY 2 DESC, 1 LIMIT 10; user_id | rank ----------+------ +--------------------------------------------------------------------- 5 | 6 2 | 5 4 | 5 @@ -186,7 +186,7 @@ HAVING count(*) > 4 ORDER BY 2 DESC, 1; user_id | rank ----------+------ +--------------------------------------------------------------------- 4 | 2 5 | 2 2 | 1 @@ -209,7 +209,7 @@ ORDER BY rnk DESC, 1 DESC LIMIT 10; user_id | rnk ----------+----- +--------------------------------------------------------------------- 3 | 121 5 | 118 2 | 116 @@ -235,7 +235,7 @@ ORDER BY rnk DESC, 1 DESC LIMIT 10; user_id | rnk ----------+----- +--------------------------------------------------------------------- 2 | 24 2 | 23 2 | 22 @@ -275,7 +275,7 @@ WINDOW ORDER BY 3 DESC, 2 DESC, 1 DESC; user_id | rnk | avg_val_2 ----------+-----+-------------------- +--------------------------------------------------------------------- 1 | 1 | 3.3750000000000000 3 | 2 | 3.1666666666666667 5 | 1 | 2.6666666666666667 @@ -314,7 +314,7 @@ ORDER BY cnt_with_filter_2 DESC NULLS LAST, filtered_count DESC NULLS LAST, datee DESC NULLS LAST, rnnk DESC, cnt2 DESC, cnt1 DESC, user_id DESC LIMIT 5; count | cnt1 | cnt2 | datee | rnnk | filtered_count | cnt_with_filter_2 --------+------+------+--------------------------+------+------------------------+------------------- +--------------------------------------------------------------------- 23 | 1 | 7 | Thu Nov 23 02:14:00 2017 | 6 | 0.00000000000000000000 | 72.7272727272727 10 | 1 | 3 | Wed Nov 22 23:01:00 2017 | 1 | 1.00000000000000000000 | 57.1428571428571 17 | 1 | 5 | Wed Nov 22 23:24:00 2017 | 8 | 3.0000000000000000 | 28.5714285714286 @@ -342,7 +342,7 @@ ORDER BY my_rank DESC, user_id DESC; user_id | my_rank | avg | mx_time ----------+---------+------------------------+--------------------------------- +--------------------------------------------------------------------- 6 | 1 | 3.0000000000000000 | Thu Nov 23 14:00:13.20013 2017 6 | 2 | 3.0000000000000000 | Thu Nov 23 11:16:13.106691 2017 6 | 1 | 3.0000000000000000 | Thu Nov 23 07:27:32.822068 2017 @@ -395,7 +395,7 @@ GROUP BY ORDER BY 4 DESC,3 DESC,2 DESC ,1 DESC; user_id | rank | dense_rank | cume_dist | percent_rank ----------+------+------------+-----------+-------------- +--------------------------------------------------------------------- 6 | 1 | 1 | 1 | 0 5 | 1 | 1 | 1 | 0 4 | 1 | 1 | 1 | 0 @@ -417,7 +417,7 @@ WHERE ORDER BY user_id, value_1, 3, 4; user_id | value_1 | array_agg | array_agg ----------+---------+-------------------------------------------------------+----------------------------------------------------- +--------------------------------------------------------------------- 3 | 0 | {0} | 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} @@ -502,7 +502,7 @@ WINDOW ORDER BY user_id, value_1, 3, 4; user_id | value_1 | array_agg | array_agg ----------+---------+---------------------------------------+------------------------------------- +--------------------------------------------------------------------- 3 | 0 | {0,1,1,1,1,1,1} | {1,1,1,1,1,1} 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} @@ -587,7 +587,7 @@ WINDOW ORDER BY user_id, value_1, 3, 4; user_id | value_1 | array_agg | array_agg ----------+---------+-----------+----------- +--------------------------------------------------------------------- 3 | 0 | {0,1} | {1} 3 | 1 | {0,1,1} | {0,1} 3 | 1 | {1,1,1} | {1,1} @@ -670,7 +670,7 @@ ORDER BY LIMIT 5; user_id | sum | event_type ----------+-----+------------ +--------------------------------------------------------------------- 4 | 4 | 4 3 | 4 | 4 2 | 4 | 4 @@ -691,7 +691,7 @@ ORDER BY LIMIT 10; user_id | sum ----------+----- +--------------------------------------------------------------------- 5 | 3 4 | 2 (2 rows) @@ -708,7 +708,7 @@ ORDER BY LIMIT 10; user_id | sum ----------+----- +--------------------------------------------------------------------- 4 | 2 5 | 3 (2 rows) @@ -725,7 +725,7 @@ ORDER BY LIMIT 10; user_id | sum ----------+----- +--------------------------------------------------------------------- 5 | 3 4 | 2 (2 rows) @@ -742,7 +742,7 @@ GROUP BY ORDER BY 3 DESC, 2 DESC, 1 DESC; user_id | avg | avg ----------+--------------------+------------------------ +--------------------------------------------------------------------- 6 | 2.1000000000000000 | 6.0000000000000000 5 | 2.6538461538461538 | 5.0000000000000000 4 | 2.7391304347826087 | 4.0000000000000000 @@ -765,7 +765,7 @@ ORDER BY 3 DESC, 2 DESC, 1 DESC; $Q$); coordinator_plan ------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Sort Sort Key: remote_scan.avg_1 DESC, remote_scan.avg DESC, remote_scan.user_id DESC -> HashAggregate @@ -785,7 +785,7 @@ GROUP BY ORDER BY user_id, value_2; user_id | ?column? | ?column? ----------+----------+-------------------- +--------------------------------------------------------------------- 1 | 5 | 3.2500000000000000 1 | 4 | 3.2500000000000000 1 | 6 | 3.2500000000000000 @@ -832,7 +832,7 @@ ORDER BY 2 DESC, 1 LIMIT 5; user_id | ?column? | ?column? ----------+----------+-------------------- +--------------------------------------------------------------------- 4 | 28 | 3.5000000000000000 5 | 24 | 3.5000000000000000 2 | 17 | 3.5000000000000000 @@ -850,7 +850,7 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, value_2 DESC; user_id | avg | rank ----------+------------------------+------ +--------------------------------------------------------------------- 1 | 3.6666666666666667 | 4 1 | 2.5000000000000000 | 3 1 | 3.0000000000000000 | 2 @@ -895,7 +895,7 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC; user_id | avg | rank ----------+------------------------+------ +--------------------------------------------------------------------- 1 | 4.0000000000000000 | 1 1 | 3.6666666666666667 | 2 1 | 3.0000000000000000 | 3 @@ -940,7 +940,7 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC -> HashAggregate @@ -968,7 +968,7 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC; user_id | avg | rank ----------+------------------------+------ +--------------------------------------------------------------------- 1 | 4.0000000000000000 | 1 1 | 3.6666666666666667 | 2 1 | 3.0000000000000000 | 3 @@ -1015,7 +1015,7 @@ GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC @@ -1045,7 +1045,7 @@ GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------ +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC @@ -1075,7 +1075,7 @@ GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC @@ -1105,7 +1105,7 @@ GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC diff --git a/src/test/regress/expected/with_basics.out b/src/test/regress/expected/with_basics.out index 30bf7a169..c4f85b577 100644 --- a/src/test/regress/expected/with_basics.out +++ b/src/test/regress/expected/with_basics.out @@ -7,7 +7,7 @@ WITH cte AS ( ) SELECT * FROM cte; user_id | value_2 ----------+--------- +--------------------------------------------------------------------- 1 | 0 1 | 2 1 | 3 @@ -30,7 +30,7 @@ ORDER BY LIMIT 5; value_2 ---------- +--------------------------------------------------------------------- 0 0 0 @@ -50,7 +50,7 @@ WITH cte_1 AS ( ) SELECT value_2 FROM users_table WHERE user_id IN (SELECT user_id FROM cte_1) ORDER BY value_2 LIMIT 1; value_2 ---------- +--------------------------------------------------------------------- 0 (1 row) @@ -72,7 +72,7 @@ ORDER BY LIMIT 5; max | value_2 ------+--------- +--------------------------------------------------------------------- 5 | 5 6 | 5 6 | 5 @@ -88,7 +88,7 @@ SELECT user_id FROM ( SELECT user_id FROM cte WHERE value_2 > 0 ) a ORDER BY 1 LIMIT 3; user_id ---------- +--------------------------------------------------------------------- 2 2 2 @@ -116,7 +116,7 @@ WITH cte AS ( ) SELECT (SELECT * FROM cte ORDER BY 1 LIMIT 1); user_id ---------- +--------------------------------------------------------------------- 1 (1 row) @@ -135,7 +135,7 @@ ORDER BY 1, 2 LIMIT 5; user_id | count ----------+------- +--------------------------------------------------------------------- 1 | 7 2 | 18 3 | 17 @@ -161,7 +161,7 @@ ORDER BY 1, 2 LIMIT 5; twice | min --------+----- +--------------------------------------------------------------------- 6 | 3 8 | 4 10 | 5 @@ -183,7 +183,7 @@ ORDER BY LIMIT 1; user_id ---------- +--------------------------------------------------------------------- 2 (1 row) @@ -202,7 +202,7 @@ ORDER BY LIMIT 1; user_id ---------- +--------------------------------------------------------------------- 2 (1 row) @@ -217,7 +217,7 @@ FROM JOIN users_table USING (user_id); user_id ---------- +--------------------------------------------------------------------- 6 6 6 @@ -236,7 +236,7 @@ WITH top_ten(id, val1) AS ( ) SELECT * FROM top_ten; id | val1 -----+------ +--------------------------------------------------------------------- 6 | 5 6 | 5 5 | 5 @@ -263,7 +263,7 @@ WITH top_ten(id) AS ( ) SELECT * FROM top_ten ORDER BY value_1 DESC; id | value_1 -----+--------- +--------------------------------------------------------------------- 6 | 5 6 | 5 5 | 5 @@ -282,7 +282,7 @@ WITH top_ten(id, val, val_mul, val_sum) AS ( ) SELECT * FROM top_ten ORDER BY id DESC, val_mul DESC, (val_sum + 1) DESC; id | val | val_mul | val_sum -----+-----+---------+--------- +--------------------------------------------------------------------- 6 | 5 | 10 | 7 6 | 5 | 10 | 5 5 | 5 | 10 | 10 @@ -301,7 +301,7 @@ WITH top_ten(id, val, val_mul, val_sum) AS ( ) SELECT id, val, id * val, val_sum * 2, val_sum + val_sum FROM top_ten ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; id | val | ?column? | ?column? | ?column? -----+-----+----------+----------+---------- +--------------------------------------------------------------------- 6 | 5 | 30 | 14 | 14 6 | 5 | 30 | 10 | 10 5 | 5 | 25 | 20 | 20 @@ -322,7 +322,7 @@ SELECT id, count(*), avg(val), max(val_mul), min(val_sum) FROM top_ten GROUP BY id ORDER BY 2 DESC, 1 DESC; id | count | avg | max | min -----+-------+--------------------+-----+----- +--------------------------------------------------------------------- 5 | 26 | 2.6538461538461538 | 10 | 2 4 | 23 | 2.7391304347826087 | 10 | 0 2 | 18 | 2.3333333333333333 | 8 | 1 @@ -346,7 +346,7 @@ ORDER BY LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 6 6 6 @@ -369,7 +369,7 @@ ORDER BY LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 6 6 6 @@ -392,7 +392,7 @@ ORDER BY LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 6 6 6 @@ -415,7 +415,7 @@ ORDER BY LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 6 6 6 @@ -441,7 +441,7 @@ ORDER BY LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 6 6 6 @@ -485,7 +485,7 @@ ORDER BY LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 1 1 @@ -523,7 +523,7 @@ ORDER BY LIMIT 5; user_id ---------- +--------------------------------------------------------------------- 1 1 1 @@ -548,7 +548,7 @@ ORDER BY LIMIT 5; user_id | sum ----------+----- +--------------------------------------------------------------------- 3 | 651 2 | 552 4 | 544 @@ -568,7 +568,7 @@ ORDER BY LIMIT 10; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | @@ -592,7 +592,7 @@ ORDER BY LIMIT 10; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | @@ -618,7 +618,7 @@ ORDER BY LIMIT 5; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | @@ -634,7 +634,7 @@ cte_2 AS ( ) SELECT * FROM cte UNION ALL SELECT * FROM cte_2; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | @@ -736,7 +736,7 @@ WITH cte_user AS ( ) SELECT user_id, sum(value_2) FROM cte_user GROUP BY 1 ORDER BY 1, 2; user_id | sum ----------+------ +--------------------------------------------------------------------- 1 | 294 2 | 1026 3 | 782 @@ -747,7 +747,7 @@ SELECT user_id, sum(value_2) FROM cte_user GROUP BY 1 ORDER BY 1, 2; SELECT * FROM cte_view ORDER BY 1, 2 LIMIT 5; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- 1 | 5 2 | 4 3 | 5 @@ -761,7 +761,7 @@ WITH cte_user_with_view AS ) SELECT user_id, value_1 FROM cte_user_with_view ORDER BY 1, 2 LIMIT 10 OFFSET 2; user_id | value_1 ----------+--------- +--------------------------------------------------------------------- (0 rows) DROP VIEW basic_view; diff --git a/src/test/regress/expected/with_dml.out b/src/test/regress/expected/with_dml.out index 89543cef2..7fea4642c 100644 --- a/src/test/regress/expected/with_dml.out +++ b/src/test/regress/expected/with_dml.out @@ -3,21 +3,21 @@ SET search_path TO with_dml, public; CREATE TABLE with_dml.distributed_table (tenant_id text PRIMARY KEY, dept int); SELECT create_distributed_table('distributed_table', 'tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE with_dml.second_distributed_table (tenant_id text, dept int); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE with_dml.reference_table (id text, name text); SELECT create_reference_table('reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/with_executors.out b/src/test/regress/expected/with_executors.out index 5e7955c94..7ce963c2d 100644 --- a/src/test/regress/expected/with_executors.out +++ b/src/test/regress/expected/with_executors.out @@ -16,7 +16,7 @@ WITH cte AS ( ) SELECT count(*) FROM cte; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -34,7 +34,7 @@ WITH cte AS ( ) SELECT * FROM cte ORDER BY 1; user_id ---------- +--------------------------------------------------------------------- 1 2 3 @@ -64,7 +64,7 @@ ORDER BY LIMIT 10; id_1 | id_2 | user_id | time | value_1 | value_2 | value_3 | value_4 -------+------+---------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 3 | 6 | 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | 3 | 6 | 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | 3 | 6 | 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | @@ -92,7 +92,7 @@ WITH cte AS ( ) SELECT * FROM cte ORDER BY 2 LIMIT 5; uid | event_type ------+------------ +--------------------------------------------------------------------- 1 | 0 1 | 0 1 | 0 @@ -106,7 +106,7 @@ WITH real_time_cte AS ( ) SELECT * FROM real_time_cte ORDER BY 1, 2, 3, 4, 5, 6 LIMIT 10; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | @@ -136,7 +136,7 @@ WITH cte AS ( ) SELECT * FROM cte WHERE uid=1 ORDER BY 2 LIMIT 5; uid | event_type ------+------------ +--------------------------------------------------------------------- 1 | 0 1 | 0 1 | 0 @@ -187,7 +187,7 @@ ORDER BY 1, 2, 3, 4 LIMIT 10; uid_1 | uid_2 | value_2 | value_3 --------+-------+---------+--------- +--------------------------------------------------------------------- 1 | 1 | 0 | 0 1 | 1 | 0 | 0 1 | 1 | 0 | 0 @@ -238,7 +238,7 @@ WITH cte AS ( ) SELECT DISTINCT uid_1, time, value_3 FROM cte ORDER BY 1, 2, 3 LIMIT 20; uid_1 | time | value_3 --------+---------------------------------+--------- +--------------------------------------------------------------------- 2 | Wed Nov 22 18:19:49.944985 2017 | 1 (1 row) @@ -280,7 +280,7 @@ WITH cte AS ( ) SELECT DISTINCT uid_1, val_3 FROM cte join events_table on cte.val_3=events_table.event_type ORDER BY 1, 2; uid_1 | val_3 --------+------- +--------------------------------------------------------------------- 2 | 1 (1 row) @@ -313,7 +313,7 @@ FROM WHERE cte.count=user_id and user_id=5; row_number | count -------------+------- +--------------------------------------------------------------------- 1 | 0 (1 row) @@ -335,7 +335,7 @@ WITH cte AS ( ) SELECT count(*) FROM cte, users_table where cte.count=user_id; count -------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/with_join.out b/src/test/regress/expected/with_join.out index 3e7f6ff69..105dd926b 100644 --- a/src/test/regress/expected/with_join.out +++ b/src/test/regress/expected/with_join.out @@ -4,7 +4,7 @@ SET citus.next_shard_id TO 1501000; CREATE TABLE with_join.reference_table(user_id int); SELECT create_reference_table('with_join.reference_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -37,7 +37,7 @@ GROUP BY ORDER BY 2 DESC, 1; user_id | count ----------+------- +--------------------------------------------------------------------- 3 | 30168 4 | 27768 2 | 25327 @@ -73,7 +73,7 @@ GROUP BY ORDER BY 2 DESC, 1; user_id | count ----------+------- +--------------------------------------------------------------------- 2 | 67507 4 | 23040 3 | 14580 @@ -143,7 +143,7 @@ ORDER BY LIMIT 5; uid ------ +--------------------------------------------------------------------- 6 5 4 @@ -182,7 +182,7 @@ ORDER BY LIMIT 5; user_id | time | event_type ----------+---------------------------------+------------ +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 0 1 | Wed Nov 22 22:51:43.132261 2017 | 0 1 | Wed Nov 22 22:51:43.132261 2017 | 1 @@ -205,7 +205,7 @@ ORDER BY LIMIT 5; user_id | time | event_type ----------+---------------------------------+------------ +--------------------------------------------------------------------- 1 | Thu Nov 23 09:26:42.145043 2017 | 0 1 | Thu Nov 23 09:26:42.145043 2017 | 0 1 | Thu Nov 23 09:26:42.145043 2017 | 1 @@ -260,7 +260,7 @@ ORDER BY LIMIT 5; row_number | user_id -------------+--------- +--------------------------------------------------------------------- 2 | 6 1 | (2 rows) @@ -273,25 +273,25 @@ CREATE TABLE reference_1 (col1 int, col2 int); CREATE TABLE reference_2(col1 int, col2 int); SELECT create_distributed_table('distributed_1','distrib_col'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_distributed_table('distributed_2','distrib_col'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('reference_1'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) SELECT create_reference_table('reference_2'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -304,7 +304,7 @@ LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join (select distrib_col,count(*) from distributed_2 group by distrib_col) d2 ON d2.distrib_col=d1.distrib_col; count -------- +--------------------------------------------------------------------- 1734 (1 row) @@ -314,7 +314,7 @@ LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join d2 ON d2.distrib_col=d1.distrib_col; count -------- +--------------------------------------------------------------------- 1734 (1 row) @@ -324,7 +324,7 @@ LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join d2 ON d2.distrib_col=d1.distrib_col; count -------- +--------------------------------------------------------------------- 87584 (1 row) @@ -334,7 +334,7 @@ LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join cte_1 ON cte_1.col1=d1.distrib_col; count -------- +--------------------------------------------------------------------- 86181 (1 row) diff --git a/src/test/regress/expected/with_modifying.out b/src/test/regress/expected/with_modifying.out index 41306b7d7..8eff622b1 100644 --- a/src/test/regress/expected/with_modifying.out +++ b/src/test/regress/expected/with_modifying.out @@ -5,14 +5,14 @@ SET search_path TO with_modifying, public; CREATE TABLE with_modifying.modify_table (id int, val int); SELECT create_distributed_table('modify_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE with_modifying.users_table (LIKE public.users_table INCLUDING ALL); SELECT create_distributed_table('with_modifying.users_table', 'user_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -20,14 +20,14 @@ INSERT INTO with_modifying.users_table SELECT * FROM public.users_table; CREATE TABLE with_modifying.summary_table (id int, counter int); SELECT create_distributed_table('summary_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE with_modifying.anchor_table (id int); SELECT create_reference_table('anchor_table'); create_reference_table ------------------------- +--------------------------------------------------------------------- (1 row) @@ -42,7 +42,7 @@ FROM ORDER BY user_id; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | | | | | 2 | | | | | 3 | | | | | @@ -61,7 +61,7 @@ ORDER BY time LIMIT 10; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 41 | 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 41 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 41 | @@ -86,7 +86,7 @@ ORDER BY time LIMIT 10; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 42 | 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 42 | 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 42 | @@ -112,7 +112,7 @@ ORDER BY time LIMIT 10; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 6 | Wed Nov 22 20:15:53.317797 2017 | 1 | 1 | 42 | 6 | Wed Nov 22 23:01:24.82289 2017 | 2 | 4 | 1 | 6 | Thu Nov 23 00:07:11.068353 2017 | 1 | 1 | 42 | @@ -138,7 +138,7 @@ ORDER BY time LIMIT 10; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 41 | 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 41 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 41 | @@ -163,7 +163,7 @@ ORDER BY time LIMIT 10; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 42 | 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | @@ -233,7 +233,7 @@ WITH raw_data AS ( INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT * FROM summary_table ORDER BY id; id | counter -----+--------- +--------------------------------------------------------------------- 2 | 20 3 | 38 4 | 24 @@ -242,7 +242,7 @@ SELECT * FROM summary_table ORDER BY id; SELECT COUNT(*) FROM modify_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -253,7 +253,7 @@ WITH raw_data AS ( INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT * FROM summary_table ORDER BY id, counter; id | counter -----+--------- +--------------------------------------------------------------------- 1 | 1 2 | 1 2 | 20 @@ -265,7 +265,7 @@ SELECT * FROM summary_table ORDER BY id, counter; SELECT COUNT(*) FROM modify_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -274,7 +274,7 @@ WITH insert_reference AS ( ) SELECT id FROM insert_reference ORDER BY id; id ----- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -299,13 +299,13 @@ GROUP BY id; SELECT COUNT(*) FROM modify_table; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM summary_table ORDER BY id, counter; id | counter -----+--------- +--------------------------------------------------------------------- 1 | 1 2 | 21 3 | 39 @@ -322,13 +322,13 @@ raw_data AS ( INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT COUNT(*) FROM modify_table; count -------- +--------------------------------------------------------------------- 3 (1 row) SELECT * FROM summary_table ORDER BY id, counter; id | counter -----+--------- +--------------------------------------------------------------------- 1 | 1 1 | 1 2 | 21 @@ -344,7 +344,7 @@ WITH summary_data AS ( INSERT INTO summary_table SELECT id, SUM(counter) AS counter FROM summary_data GROUP BY id; SELECT * FROM summary_table ORDER BY id, counter; id | counter -----+--------- +--------------------------------------------------------------------- 1 | 2 2 | 21 3 | 39 @@ -354,7 +354,7 @@ SELECT * FROM summary_table ORDER BY id, counter; SELECT * FROM modify_table ORDER BY id, val; id | val -----+----- +--------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 @@ -362,7 +362,7 @@ SELECT * FROM modify_table ORDER BY id, val; SELECT * FROM anchor_table ORDER BY id; id ----- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -386,7 +386,7 @@ update_data AS ( ) SELECT count(*) FROM update_data; count -------- +--------------------------------------------------------------------- 3 (1 row) @@ -400,7 +400,7 @@ update_data AS ( ) SELECT count(*) FROM update_data; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -414,7 +414,7 @@ WITH update_data AS ( ) SELECT COUNT(*) FROM update_data; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -423,7 +423,7 @@ WITH delete_rows AS ( ) SELECT * FROM delete_rows ORDER BY id, val; id | val -----+----- +--------------------------------------------------------------------- 21 | 300 22 | 200 23 | 100 @@ -434,7 +434,7 @@ WITH delete_rows AS ( ) SELECT * FROM delete_rows ORDER BY id, counter; id | counter -----+--------- +--------------------------------------------------------------------- 11 | 1 12 | 1 13 | 1 @@ -451,14 +451,14 @@ WITH insert_reference AS ( ) SELECT id FROM insert_reference ORDER BY id; id ----- +--------------------------------------------------------------------- 3 4 (2 rows) SELECT * FROM summary_table ORDER BY id, counter; id | counter -----+--------- +--------------------------------------------------------------------- 1 | 1 1 | 2 2 | 1 @@ -471,12 +471,12 @@ SELECT * FROM summary_table ORDER BY id, counter; SELECT * FROM modify_table ORDER BY id, val; id | val -----+----- +--------------------------------------------------------------------- (0 rows) SELECT * FROM anchor_table ORDER BY id; id ----- +--------------------------------------------------------------------- 1 2 3 @@ -486,7 +486,7 @@ SELECT * FROM anchor_table ORDER BY id; ROLLBACK; SELECT * FROM summary_table ORDER BY id, counter; id | counter -----+--------- +--------------------------------------------------------------------- 1 | 2 2 | 21 3 | 39 @@ -496,7 +496,7 @@ SELECT * FROM summary_table ORDER BY id, counter; SELECT * FROM modify_table ORDER BY id, val; id | val -----+----- +--------------------------------------------------------------------- 1 | 2 2 | 4 3 | 6 @@ -504,7 +504,7 @@ SELECT * FROM modify_table ORDER BY id, val; SELECT * FROM anchor_table ORDER BY id; id ----- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -515,7 +515,7 @@ WITH deleted_rows AS ( ) SELECT * FROM deleted_rows; id | val -----+----- +--------------------------------------------------------------------- 1 | 2 (1 row) @@ -524,7 +524,7 @@ WITH deleted_rows AS ( ) SELECT * FROM deleted_rows; id | val -----+----- +--------------------------------------------------------------------- 2 | 4 (1 row) @@ -536,7 +536,7 @@ deleted_rows AS ( ) SELECT * FROM deleted_rows; id | val -----+----- +--------------------------------------------------------------------- (0 rows) WITH deleted_rows AS ( @@ -544,7 +544,7 @@ WITH deleted_rows AS ( ) SELECT * FROM deleted_rows; id | val -----+----- +--------------------------------------------------------------------- 3 | 6 (1 row) @@ -556,7 +556,7 @@ deleted_rows AS ( ) SELECT * FROM deleted_rows; id | val -----+----- +--------------------------------------------------------------------- (0 rows) WITH deleted_rows AS ( @@ -597,7 +597,7 @@ raw_data AS ( ) SELECT COUNT(*) FROM raw_data; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -610,7 +610,7 @@ raw_data AS ( ) SELECT * FROM raw_data ORDER BY val; id | val -----+----- +--------------------------------------------------------------------- 1 | 2 1 | 6 (2 rows) @@ -623,13 +623,13 @@ raw_data AS ( ) SELECT * FROM raw_data ORDER BY val; id | val -----+----- +--------------------------------------------------------------------- 2 | 0 (1 row) SELECT * FROM modify_table ORDER BY id, val; id | val -----+----- +--------------------------------------------------------------------- 2 | 0 3 | 5 (2 rows) @@ -646,7 +646,7 @@ raw_data AS ( ) SELECT * FROM raw_data ORDER BY val; id | val -----+----- +--------------------------------------------------------------------- (0 rows) -- Test with replication factor 2 @@ -655,7 +655,7 @@ DROP TABLE modify_table; CREATE TABLE with_modifying.modify_table (id int, val int); SELECT create_distributed_table('modify_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -664,19 +664,19 @@ DROP TABLE summary_table; CREATE TABLE with_modifying.summary_table (id int, counter int); SELECT create_distributed_table('summary_table', 'id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) SELECT COUNT(*) FROM modify_table; count -------- +--------------------------------------------------------------------- 107 (1 row) SELECT * FROM summary_table ORDER BY id, counter; id | counter -----+--------- +--------------------------------------------------------------------- (0 rows) WITH raw_data AS ( @@ -685,13 +685,13 @@ WITH raw_data AS ( INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT COUNT(*) FROM modify_table; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT * FROM summary_table ORDER BY id, counter; id | counter -----+--------- +--------------------------------------------------------------------- 1 | 8 2 | 19 3 | 18 @@ -706,7 +706,7 @@ BEGIN; INSERT INTO modify_table (id) VALUES (10000); WITH test_cte AS (SELECT count(*) FROM modify_table) SELECT * FROM test_cte; count -------- +--------------------------------------------------------------------- 1 (1 row) @@ -715,7 +715,7 @@ ROLLBACK; WITH first_query AS (INSERT INTO modify_table (id) VALUES (10001)), second_query AS (SELECT * FROM modify_table) SELECT count(*) FROM second_query; count -------- +--------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/with_nested.out b/src/test/regress/expected/with_nested.out index 6002b27e8..97051124a 100644 --- a/src/test/regress/expected/with_nested.out +++ b/src/test/regress/expected/with_nested.out @@ -15,7 +15,7 @@ cte_2 AS ( ) SELECT user_id FROM cte_2 LIMIT 1; user_id ---------- +--------------------------------------------------------------------- 2 (1 row) @@ -50,7 +50,7 @@ ORDER BY 1, 2 LIMIT 20; user_id | event_type ----------+------------ +--------------------------------------------------------------------- 1 | 0 1 | 0 1 | 0 @@ -152,7 +152,7 @@ FROM GROUP BY 1; uid | avg | sum | sum ------+------------------------+-----+----- +--------------------------------------------------------------------- 1 | 1.00000000000000000000 | 3 | 72 (1 row) @@ -229,7 +229,7 @@ ORDER BY LIMIT 20; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | @@ -319,7 +319,7 @@ ORDER BY LIMIT 20; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | @@ -397,7 +397,7 @@ cte2 AS ( ) SELECT * FROM cte2; user_id | time | value_1 | min ----------+---------------------------------+---------+----- +--------------------------------------------------------------------- 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 5 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 5 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 5 diff --git a/src/test/regress/expected/with_partitioning.out b/src/test/regress/expected/with_partitioning.out index 95c415b3c..2b151b984 100644 --- a/src/test/regress/expected/with_partitioning.out +++ b/src/test/regress/expected/with_partitioning.out @@ -17,7 +17,7 @@ SELECT create_distributed_table('with_partitioning.partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -27,7 +27,7 @@ WITH cte AS ( ) SELECT DISTINCT ON (id) id, cte.time FROM cte join partitioning_test on cte.time::date=partitioning_test.time ORDER BY 1, 2 LIMIT 3; id | time -----+--------------------------------- +--------------------------------------------------------------------- 1 | Thu Nov 23 00:07:11.068353 2017 3 | Wed Nov 22 18:19:49.944985 2017 (2 rows) @@ -38,7 +38,7 @@ WITH cte AS ( ) SELECT DISTINCT ON (id) id, cte.time FROM cte join partitioning_test on cte.time::date=partitioning_test.time WHERE partitioning_test.time >'2017-11-20' ORDER BY 1, 2 LIMIT 3; id | time -----+--------------------------------- +--------------------------------------------------------------------- 1 | Thu Nov 23 00:07:11.068353 2017 3 | Wed Nov 22 18:19:49.944985 2017 (2 rows) @@ -58,7 +58,7 @@ cte_joined_2 AS ( ) SELECT DISTINCT ON (event_type) event_type, cte_joined_2.user_id FROM events_table join cte_joined_2 on (cte_joined_2.time=events_table.time::date) ORDER BY 1, 2 LIMIT 10 OFFSET 2; event_type | user_id -------------+--------- +--------------------------------------------------------------------- 2 | 1 3 | 1 4 | 1 @@ -82,7 +82,7 @@ cte_joined_2 AS ( ) SELECT DISTINCT ON (id) id, cte_joined_2.time FROM cte_joined_2 join partitioning_test on (cte_joined_2.time=partitioning_test.time) ORDER BY 1, 2; id | time -----+------------ +--------------------------------------------------------------------- 1 | 11-23-2017 3 | 11-22-2017 (2 rows) diff --git a/src/test/regress/expected/with_prepare.out b/src/test/regress/expected/with_prepare.out index 4a1a55129..58c9191a3 100644 --- a/src/test/regress/expected/with_prepare.out +++ b/src/test/regress/expected/with_prepare.out @@ -188,7 +188,7 @@ WHERE events_user_id IN (SELECT user_id FROM users_table); EXECUTE prepared_test_1; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | @@ -203,7 +203,7 @@ EXECUTE prepared_test_1; EXECUTE prepared_test_1; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | @@ -218,7 +218,7 @@ EXECUTE prepared_test_1; EXECUTE prepared_test_1; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | @@ -233,7 +233,7 @@ EXECUTE prepared_test_1; EXECUTE prepared_test_1; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | @@ -248,7 +248,7 @@ EXECUTE prepared_test_1; EXECUTE prepared_test_1; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | @@ -263,7 +263,7 @@ EXECUTE prepared_test_1; EXECUTE prepared_test_1; user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | @@ -278,7 +278,7 @@ EXECUTE prepared_test_1; EXECUTE prepared_test_2; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -289,7 +289,7 @@ EXECUTE prepared_test_2; EXECUTE prepared_test_2; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -300,7 +300,7 @@ EXECUTE prepared_test_2; EXECUTE prepared_test_2; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -311,7 +311,7 @@ EXECUTE prepared_test_2; EXECUTE prepared_test_2; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -322,7 +322,7 @@ EXECUTE prepared_test_2; EXECUTE prepared_test_2; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -333,7 +333,7 @@ EXECUTE prepared_test_2; EXECUTE prepared_test_2; user_id | sum ----------+----- +--------------------------------------------------------------------- 1 | 29 3 | 29 6 | 29 @@ -344,7 +344,7 @@ EXECUTE prepared_test_2; EXECUTE prepared_test_3(1); user_id | sum ----------+-------- +--------------------------------------------------------------------- 1 | 10850 6 | 15500 3 | 52700 @@ -355,7 +355,7 @@ EXECUTE prepared_test_3(1); EXECUTE prepared_test_3(2); user_id | sum ----------+------- +--------------------------------------------------------------------- 1 | 10850 6 | 15500 5 | 20150 @@ -366,27 +366,27 @@ EXECUTE prepared_test_3(2); EXECUTE prepared_test_3(3); user_id | sum ----------+----- +--------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_3(4); user_id | sum ----------+----- +--------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_3(5); user_id | sum ----------+----- +--------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_3(6); user_id | sum ----------+----- +--------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_4(1, 2, 3); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | @@ -401,7 +401,7 @@ EXECUTE prepared_test_4(1, 2, 3); EXECUTE prepared_test_4(2, 3, 4); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | @@ -416,7 +416,7 @@ EXECUTE prepared_test_4(2, 3, 4); EXECUTE prepared_test_4(3, 4, 5); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | @@ -431,7 +431,7 @@ EXECUTE prepared_test_4(3, 4, 5); EXECUTE prepared_test_4(4, 5, 6); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | @@ -446,7 +446,7 @@ EXECUTE prepared_test_4(4, 5, 6); EXECUTE prepared_test_4(5, 6, 7); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | 2 | Thu Nov 23 11:48:24.943542 2017 | 0 | 5 | 5 | 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | @@ -461,12 +461,12 @@ EXECUTE prepared_test_4(5, 6, 7); EXECUTE prepared_test_4(6, 7, 8); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_5(1, 2, 3); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | @@ -481,7 +481,7 @@ EXECUTE prepared_test_5(1, 2, 3); EXECUTE prepared_test_5(2, 3, 4); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | @@ -496,7 +496,7 @@ EXECUTE prepared_test_5(2, 3, 4); EXECUTE prepared_test_5(3, 4, 5); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | @@ -511,7 +511,7 @@ EXECUTE prepared_test_5(3, 4, 5); EXECUTE prepared_test_5(4, 5, 6); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 4 | Wed Nov 22 21:33:03.616802 2017 | 5 | 4 | 1 | 4 | Wed Nov 22 23:48:11.949567 2017 | 2 | 0 | 0 | 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 3 | @@ -526,7 +526,7 @@ EXECUTE prepared_test_5(4, 5, 6); EXECUTE prepared_test_5(5, 6, 7); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 5 | Wed Nov 22 20:43:18.667473 2017 | 0 | 3 | 2 | 5 | Wed Nov 22 21:02:07.575129 2017 | 2 | 0 | 2 | 5 | Wed Nov 22 22:10:24.315371 2017 | 1 | 2 | 1 | @@ -541,7 +541,7 @@ EXECUTE prepared_test_5(5, 6, 7); EXECUTE prepared_test_5(6, 7, 8); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+---------------------------------+---------+---------+---------+--------- +--------------------------------------------------------------------- 6 | Wed Nov 22 20:15:53.317797 2017 | 1 | 1 | 1 | 6 | Wed Nov 22 23:01:24.82289 2017 | 2 | 4 | 1 | 6 | Thu Nov 23 00:07:11.068353 2017 | 1 | 1 | 4 | @@ -556,73 +556,73 @@ EXECUTE prepared_test_5(6, 7, 8); EXECUTE prepared_test_6; count -------- +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; count -------- +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; count -------- +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; count -------- +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; count -------- +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; count -------- +--------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_partition_column_insert(1); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- 1 | | | | | (1 row) EXECUTE prepared_partition_column_insert(2); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- 2 | | | | | (1 row) EXECUTE prepared_partition_column_insert(3); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- 3 | | | | | (1 row) EXECUTE prepared_partition_column_insert(4); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- 4 | | | | | (1 row) EXECUTE prepared_partition_column_insert(5); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- 5 | | | | | (1 row) EXECUTE prepared_partition_column_insert(6); user_id | time | value_1 | value_2 | value_3 | value_4 ----------+------+---------+---------+---------+--------- +--------------------------------------------------------------------- 6 | | | | | (1 row) diff --git a/src/test/regress/expected/with_set_operations.out b/src/test/regress/expected/with_set_operations.out index bb82c67c3..39425e5da 100644 --- a/src/test/regress/expected/with_set_operations.out +++ b/src/test/regress/expected/with_set_operations.out @@ -12,7 +12,7 @@ DEBUG: generating subplan 1_1 for CTE cte_1: SELECT user_id FROM public.users_t DEBUG: generating subplan 1_2 for CTE cte_2: SELECT user_id FROM public.events_table DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 ORDER BY 1 DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -35,7 +35,7 @@ DEBUG: generating subplan 4_2 for CTE cte_2: SELECT user_id FROM public.events_ DEBUG: generating subplan 4_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('4_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('4_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -50,7 +50,7 @@ DEBUG: generating subplan 8_2 for CTE cte_2: SELECT user_id FROM public.events_ DEBUG: generating subplan 8_3 for CTE cte_3: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_3 ORDER BY user_id DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -69,7 +69,7 @@ DEBUG: generating subplan 13_2 for subquery SELECT user_id FROM public.users_ta DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 ORDER BY user_id DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -89,7 +89,7 @@ DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: generating subplan 16_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -112,7 +112,7 @@ DEBUG: generating subplan 21_2 for subquery SELECT user_id FROM public.events_t DEBUG: generating subplan 21_3 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.>) 4) DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT (SELECT intermediate_result.user_id FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) EXCEPT SELECT intermediate_result.user_id FROM read_intermediate_result('21_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) ORDER BY 1 DESC user_id ---------- +--------------------------------------------------------------------- 2 1 (2 rows) @@ -123,7 +123,7 @@ DEBUG: generating subplan 27_1 for CTE cte_1: SELECT user_id FROM public.events DEBUG: generating subplan 27_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('27_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('27_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 user_id ---------- +--------------------------------------------------------------------- 1 2 (2 rows) @@ -145,7 +145,7 @@ DEBUG: generating subplan 31_2 for subquery SELECT user_id FROM public.events_t DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('31_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1) foo WHERE (users_table.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY users_table.user_id DESC user_id ---------- +--------------------------------------------------------------------- 1 (1 row) @@ -167,7 +167,7 @@ DEBUG: generating subplan 34_2 for CTE cte_2: SELECT user_id FROM public.events DEBUG: generating subplan 34_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('34_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.event_type) count -------- +--------------------------------------------------------------------- 95 (1 row) @@ -180,7 +180,7 @@ DEBUG: generating subplan 38_1 for subquery SELECT DISTINCT events_table.user_i DEBUG: generating subplan 38_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -200,7 +200,7 @@ DEBUG: push down of limit count: 10 DEBUG: generating subplan 41_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10 DEBUG: Plan 41 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('41_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('41_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -230,7 +230,7 @@ DEBUG: generating subplan 46_2 for subquery SELECT DISTINCT events_table.user_i DEBUG: Plan 46 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('46_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('46_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('44_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) events_table, (SELECT foo_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('44_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo_1) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2) count -------- +--------------------------------------------------------------------- 5 (1 row) @@ -251,7 +251,7 @@ DEBUG: generating subplan 49_3 for subquery SELECT DISTINCT events_table.user_i DEBUG: generating subplan 49_4 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('49_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('49_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: Plan 49 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT events_table_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('49_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) events_table_1) events_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('49_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.user_id) count -------- +--------------------------------------------------------------------- 6 (1 row) @@ -262,7 +262,7 @@ DEBUG: generating subplan 54_1 for CTE cte_1: SELECT user_id FROM public.users_ DEBUG: generating subplan 54_2 for CTE cte_1: SELECT user_id FROM public.users_table DEBUG: Plan 54 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('54_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('54_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 ORDER BY 1 DESC user_id ---------- +--------------------------------------------------------------------- 6 5 4 @@ -286,7 +286,7 @@ DEBUG: generating subplan 57_2 for CTE cte_1: SELECT user_id FROM public.users_ DEBUG: generating subplan 57_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('57_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('57_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 DEBUG: Plan 57 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('57_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.users_table WHERE (users_table.value_2 OPERATOR(pg_catalog.=) foo.user_id) count -------- +--------------------------------------------------------------------- 92 (1 row) @@ -297,7 +297,7 @@ DEBUG: generating subplan 61_1 for CTE cte_1: SELECT user_id, value_2 FROM publ DEBUG: generating subplan 61_2 for CTE cte_1: SELECT user_id, value_2 FROM public.users_table DEBUG: Plan 61 query after replacing subqueries and CTEs: SELECT cte_1.x, cte_1.value_2 FROM (SELECT intermediate_result.user_id AS x, intermediate_result.value_2 FROM read_intermediate_result('61_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) cte_1 UNION SELECT cte_1.x, cte_1.value_2 FROM (SELECT intermediate_result.user_id AS x, intermediate_result.value_2 FROM read_intermediate_result('61_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) cte_1 ORDER BY 1 DESC, 2 DESC LIMIT 5 x | value_2 ----+--------- +--------------------------------------------------------------------- 6 | 6 | 4 6 | 3 @@ -327,7 +327,7 @@ DEBUG: generating subplan 65_2 for subquery SELECT user_id FROM public.events_t DEBUG: Plan 65 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('65_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('65_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: Plan 64 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('64_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1)) ORDER BY (count(*)) DESC count -------- +--------------------------------------------------------------------- 92 (1 row) @@ -350,7 +350,7 @@ DEBUG: generating subplan 68_2 for CTE cte_2: SELECT user_id FROM public.events DEBUG: generating subplan 68_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('68_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('68_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 DEBUG: Plan 68 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('68_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY (count(*)) DESC count -------- +--------------------------------------------------------------------- 92 (1 row) @@ -378,7 +378,7 @@ DEBUG: generating subplan 72_2 for CTE cte_1: SELECT user_id FROM public.users_ DEBUG: generating subplan 72_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('72_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('72_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 DEBUG: Plan 72 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('72_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.users_table WHERE ((users_table.value_2 OPERATOR(pg_catalog.=) foo.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) ORDER BY user_id DESC user_id ---------- +--------------------------------------------------------------------- 5 4 3 diff --git a/src/test/regress/expected/with_transactions.out b/src/test/regress/expected/with_transactions.out index 95cc90eaf..91c2f3257 100644 --- a/src/test/regress/expected/with_transactions.out +++ b/src/test/regress/expected/with_transactions.out @@ -6,14 +6,14 @@ SET citus.next_placement_id TO 800000; CREATE TABLE with_transactions.raw_table (tenant_id int, income float, created_at timestamptz); SELECT create_distributed_table('raw_table', 'tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) CREATE TABLE with_transactions.second_raw_table (tenant_id int, income float, created_at timestamptz); SELECT create_distributed_table('second_raw_table', 'tenant_id'); create_distributed_table --------------------------- +--------------------------------------------------------------------- (1 row) @@ -43,13 +43,13 @@ ROLLBACK; -- see that both UPDATE and DELETE commands are rollbacked SELECT count(*) FROM raw_table; count -------- +--------------------------------------------------------------------- 101 (1 row) SELECT max(income) FROM raw_table; max ------- +--------------------------------------------------------------------- 1000 (1 row) @@ -57,7 +57,7 @@ SELECT max(income) FROM raw_table; BEGIN; SELECT count (*) FROM second_raw_table; count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -86,19 +86,19 @@ DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE with_transacti -- make sure that everything committed SELECT count(*) FROM raw_table; count -------- +--------------------------------------------------------------------- 105 (1 row) SELECT count(*) FROM raw_table WHERE created_at = '2001-02-10 20:00:00'; count -------- +--------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM second_raw_table; count -------- +--------------------------------------------------------------------- 0 (1 row) @@ -114,7 +114,7 @@ DEBUG: generating subplan 17_1 for CTE ids_inserted: INSERT INTO with_transacti DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT income FROM with_transactions.second_raw_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) ORDER BY income DESC LIMIT 3 DEBUG: push down of limit count: 3 income --------- +--------------------------------------------------------------------- (0 rows) ROLLBACK; @@ -136,7 +136,7 @@ END; $BODY$; SELECT count(*) FROM (SELECT run_ctes(s) FROM generate_series(1,current_setting('max_connections')::int+2) s) a; count -------- +--------------------------------------------------------------------- 102 (1 row) diff --git a/src/test/regress/expected/with_where.out b/src/test/regress/expected/with_where.out index 4e0704f86..00f62f9a6 100644 --- a/src/test/regress/expected/with_where.out +++ b/src/test/regress/expected/with_where.out @@ -26,7 +26,7 @@ IN FROM events); count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -56,7 +56,7 @@ WHERE users ); count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -86,7 +86,7 @@ WHERE users ); count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -117,7 +117,7 @@ WHERE users ); count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -146,7 +146,7 @@ IN SELECT * FROM events LIMIT 10 ); count -------- +--------------------------------------------------------------------- 101 (1 row) @@ -176,7 +176,7 @@ WHERE SELECT * FROM users LIMIT 10 ); count -------- +--------------------------------------------------------------------- 101 (1 row) diff --git a/src/test/regress/expected/worker_binary_data_partition.out b/src/test/regress/expected/worker_binary_data_partition.out index b07d7d66e..9c793079b 100644 --- a/src/test/regress/expected/worker_binary_data_partition.out +++ b/src/test/regress/expected/worker_binary_data_partition.out @@ -22,7 +22,7 @@ CREATE TABLE :Table_Name(textcolumn text, binarycolumn bytea); COPY :Table_Name FROM stdin; SELECT length(binarycolumn) FROM :Table_Name; length --------- +--------------------------------------------------------------------- 2 4 3 @@ -44,7 +44,7 @@ SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); worker_range_partition_table ------------------------------- +--------------------------------------------------------------------- (1 row) @@ -61,7 +61,7 @@ SELECT COUNT(*) AS total_row_count FROM ( SELECT * FROM :Table_Part_01 UNION ALL SELECT * FROM :Table_Part_02 ) AS all_rows; total_row_count ------------------ +--------------------------------------------------------------------- 14 (1 row) @@ -72,7 +72,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column IS NULL OR :Partition_Column < 'aaa' ) diff; diff_lhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -81,7 +81,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column >= 'aaa' AND :Partition_Column < 'some' ) diff; diff_lhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -89,7 +89,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column >= 'some' ) diff; diff_lhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -98,7 +98,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Partition_Column < 'aaa' EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; diff_rhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -107,7 +107,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Partition_Column < 'some' EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; diff_rhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -115,7 +115,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column >= 'some' EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; diff_rhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_check_invalid_arguments.out b/src/test/regress/expected/worker_check_invalid_arguments.out index 81c7dce62..413031fdd 100644 --- a/src/test/regress/expected/worker_check_invalid_arguments.out +++ b/src/test/regress/expected/worker_check_invalid_arguments.out @@ -21,7 +21,7 @@ CREATE TABLE :Table_Name(textcolumn text, binarycolumn bytea); COPY :Table_Name FROM stdin; SELECT COUNT(*) FROM :Table_Name; count -------- +--------------------------------------------------------------------- 2 (1 row) @@ -53,7 +53,7 @@ SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Name, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); worker_range_partition_table ------------------------------- +--------------------------------------------------------------------- (1 row) @@ -93,7 +93,7 @@ SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['textcolumn', 'binarycolumn'], ARRAY['text', 'bytea']); worker_merge_files_into_table -------------------------------- +--------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/worker_hash_partition.out b/src/test/regress/expected/worker_hash_partition.out index c21c943d4..172d96a7e 100644 --- a/src/test/regress/expected/worker_hash_partition.out +++ b/src/test/regress/expected/worker_hash_partition.out @@ -27,7 +27,7 @@ SELECT worker_hash_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type::regtype, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); worker_hash_partition_table ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -37,25 +37,25 @@ COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; count -------- +--------------------------------------------------------------------- 2885 (1 row) SELECT COUNT(*) FROM :Table_Part_01; count -------- +--------------------------------------------------------------------- 3009 (1 row) SELECT COUNT(*) FROM :Table_Part_02; count -------- +--------------------------------------------------------------------- 3104 (1 row) SELECT COUNT(*) FROM :Table_Part_03; count -------- +--------------------------------------------------------------------- 3002 (1 row) @@ -65,7 +65,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 0) ) diff; diff_lhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -73,7 +73,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 1) ) diff; diff_lhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -81,7 +81,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 2) ) diff; diff_lhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -89,7 +89,7 @@ SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_All FROM :Table_Part_03 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 3) ) diff; diff_lhs_03 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -97,7 +97,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 0) EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; diff_rhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -105,7 +105,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 1) EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; diff_rhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -113,7 +113,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 2) EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; diff_rhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -121,7 +121,7 @@ SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 3) EXCEPT ALL :Select_All FROM :Table_Part_03 ) diff; diff_rhs_03 -------------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_hash_partition_complex.out b/src/test/regress/expected/worker_hash_partition_complex.out index 4931d0032..12899e510 100644 --- a/src/test/regress/expected/worker_hash_partition_complex.out +++ b/src/test/regress/expected/worker_hash_partition_complex.out @@ -31,7 +31,7 @@ SELECT worker_hash_partition_table(:JobId, :TaskId, :Partition_Column_Text, :Partition_Column_Type, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); worker_hash_partition_table ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -42,13 +42,13 @@ COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; count -------- +--------------------------------------------------------------------- 1883 (1 row) SELECT COUNT(*) FROM :Table_Part_03; count -------- +--------------------------------------------------------------------- 1913 (1 row) @@ -59,7 +59,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 0) ) diff; diff_lhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -68,7 +68,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 1) ) diff; diff_lhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -77,7 +77,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 2) ) diff; diff_lhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -86,7 +86,7 @@ SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 3) ) diff; diff_lhs_03 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -95,7 +95,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( (:Hash_Mod_Function = 0) EXCEPT ALL :Select_Columns FROM :Table_Part_00 ) diff; diff_rhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -104,7 +104,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( (:Hash_Mod_Function = 1) EXCEPT ALL :Select_Columns FROM :Table_Part_01 ) diff; diff_rhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -113,7 +113,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( (:Hash_Mod_Function = 2) EXCEPT ALL :Select_Columns FROM :Table_Part_02 ) diff; diff_rhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -122,7 +122,7 @@ SELECT COUNT(*) AS diff_rhs_03 FROM ( (:Hash_Mod_Function = 3) EXCEPT ALL :Select_Columns FROM :Table_Part_03 ) diff; diff_rhs_03 -------------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_merge_hash_files.out b/src/test/regress/expected/worker_merge_hash_files.out index 89246b89f..ee361819e 100644 --- a/src/test/regress/expected/worker_merge_hash_files.out +++ b/src/test/regress/expected/worker_merge_hash_files.out @@ -16,7 +16,7 @@ SELECT worker_merge_files_into_table(:JobId, :TaskId, 'decimal(15, 2)', 'decimal(15, 2)', 'char(1)', 'char(1)', 'date', 'date', 'date', 'char(25)', 'char(10)', 'varchar(44)']::_text); worker_merge_files_into_table -------------------------------- +--------------------------------------------------------------------- (1 row) @@ -24,27 +24,27 @@ SELECT worker_merge_files_into_table(:JobId, :TaskId, -- partitioned. We then compute the difference of these two tables. SELECT COUNT(*) FROM :Task_Table_Name; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) FROM lineitem; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :Select_All FROM :Task_Table_Name EXCEPT ALL :Select_All FROM lineitem ) diff; diff_lhs ----------- +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs FROM ( :Select_All FROM lineitem EXCEPT ALL :Select_All FROM :Task_Table_Name ) diff; diff_rhs ----------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_merge_range_files.out b/src/test/regress/expected/worker_merge_range_files.out index b39f52731..f7e46399f 100644 --- a/src/test/regress/expected/worker_merge_range_files.out +++ b/src/test/regress/expected/worker_merge_range_files.out @@ -16,7 +16,7 @@ SELECT worker_merge_files_into_table(:JobId, :TaskId, 'decimal(15, 2)', 'decimal(15, 2)', 'char(1)', 'char(1)', 'date', 'date', 'date', 'char(25)', 'char(10)', 'varchar(44)']::_text); worker_merge_files_into_table -------------------------------- +--------------------------------------------------------------------- (1 row) @@ -24,27 +24,27 @@ SELECT worker_merge_files_into_table(:JobId, :TaskId, -- partitioned. We then compute the difference of these two tables. SELECT COUNT(*) FROM :Task_Table_Name; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) FROM lineitem; count -------- +--------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :Select_All FROM :Task_Table_Name EXCEPT ALL :Select_All FROM lineitem ) diff; diff_lhs ----------- +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs FROM ( :Select_All FROM lineitem EXCEPT ALL :Select_All FROM :Task_Table_Name ) diff; diff_rhs ----------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_null_data_partition.out b/src/test/regress/expected/worker_null_data_partition.out index 08b3e0976..3a42eaabc 100644 --- a/src/test/regress/expected/worker_null_data_partition.out +++ b/src/test/regress/expected/worker_null_data_partition.out @@ -24,7 +24,7 @@ SELECT worker_range_partition_table(:JobId, :Range_TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[0, 10]::_int4); worker_range_partition_table ------------------------------- +--------------------------------------------------------------------- (1 row) @@ -34,13 +34,13 @@ COPY :Range_Table_Part_01 FROM :'Range_Table_File_01'; COPY :Range_Table_Part_02 FROM :'Range_Table_File_02'; SELECT COUNT(*) FROM :Range_Table_Part_00; count -------- +--------------------------------------------------------------------- 6 (1 row) SELECT COUNT(*) FROM :Range_Table_Part_02; count -------- +--------------------------------------------------------------------- 588 (1 row) @@ -51,7 +51,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( (:Select_All FROM supplier WHERE :Partition_Column < 0 OR :Partition_Column IS NULL) ) diff; diff_lhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -60,7 +60,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 0 AND :Partition_Column < 10 ) diff; diff_lhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -68,7 +68,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_02 ) diff; diff_rhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -77,7 +77,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Partition_Column IS NULL) EXCEPT ALL :Select_All FROM :Range_Table_Part_00 ) diff; diff_rhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -86,7 +86,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Partition_Column < 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_01 ) diff; diff_rhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -94,7 +94,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_02 ) diff; diff_rhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -117,7 +117,7 @@ SELECT worker_hash_partition_table(:JobId, :Hash_TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); worker_hash_partition_table ------------------------------ +--------------------------------------------------------------------- (1 row) @@ -126,13 +126,13 @@ COPY :Hash_Table_Part_01 FROM :'Hash_Table_File_01'; COPY :Hash_Table_Part_02 FROM :'Hash_Table_File_02'; SELECT COUNT(*) FROM :Hash_Table_Part_00; count -------- +--------------------------------------------------------------------- 282 (1 row) SELECT COUNT(*) FROM :Hash_Table_Part_02; count -------- +--------------------------------------------------------------------- 102 (1 row) @@ -143,7 +143,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( (:Select_All FROM supplier WHERE (:Hash_Mod_Function = 0) OR :Partition_Column IS NULL) ) diff; diff_lhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -151,7 +151,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Hash_Table_Part_01 EXCEPT ALL :Select_All FROM supplier WHERE (:Hash_Mod_Function = 1) ) diff; diff_lhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -159,7 +159,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Hash_Table_Part_02 EXCEPT ALL :Select_All FROM supplier WHERE (:Hash_Mod_Function = 2) ) diff; diff_lhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -168,7 +168,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Partition_Column IS NULL) EXCEPT ALL :Select_All FROM :Hash_Table_Part_00 ) diff; diff_rhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -176,7 +176,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM supplier WHERE (:Hash_Mod_Function = 1) EXCEPT ALL :Select_All FROM :Hash_Table_Part_01 ) diff; diff_rhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -184,7 +184,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE (:Hash_Mod_Function = 2) EXCEPT ALL :Select_All FROM :Hash_Table_Part_02 ) diff; diff_rhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_range_partition.out b/src/test/regress/expected/worker_range_partition.out index bb4041ac3..9acd12002 100644 --- a/src/test/regress/expected/worker_range_partition.out +++ b/src/test/regress/expected/worker_range_partition.out @@ -23,7 +23,7 @@ SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[1, 3000, 12000]::_int8); worker_range_partition_table ------------------------------- +--------------------------------------------------------------------- (1 row) @@ -33,13 +33,13 @@ COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; count -------- +--------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) FROM :Table_Part_03; count -------- +--------------------------------------------------------------------- 3047 (1 row) @@ -49,7 +49,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column < 1 ) diff; diff_lhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -58,7 +58,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 1 AND :Partition_Column < 3000 ) diff; diff_lhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -67,7 +67,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 3000 AND :Partition_Column < 12000 ) diff; diff_lhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -75,7 +75,7 @@ SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_All FROM :Table_Part_03 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 12000 ) diff; diff_lhs_03 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -83,7 +83,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM lineitem WHERE :Partition_Column < 1 EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; diff_rhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -92,7 +92,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Partition_Column < 3000 EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; diff_rhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -101,7 +101,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Partition_Column < 12000 EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; diff_rhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -109,7 +109,7 @@ SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 12000 EXCEPT ALL :Select_All FROM :Table_Part_03 ) diff; diff_rhs_03 -------------- +--------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_range_partition_complex.out b/src/test/regress/expected/worker_range_partition_complex.out index ff739a3c1..c39857389 100644 --- a/src/test/regress/expected/worker_range_partition_complex.out +++ b/src/test/regress/expected/worker_range_partition_complex.out @@ -28,7 +28,7 @@ SELECT worker_range_partition_table(:JobId, :TaskId, :Partition_Column_Text, :Partition_Column_Type, ARRAY[101, 12000, 18000]::_int4); worker_range_partition_table ------------------------------- +--------------------------------------------------------------------- (1 row) @@ -39,13 +39,13 @@ COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; count -------- +--------------------------------------------------------------------- 3 (1 row) SELECT COUNT(*) FROM :Table_Part_03; count -------- +--------------------------------------------------------------------- 7022 (1 row) @@ -56,7 +56,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column < 101 ) diff; diff_lhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -66,7 +66,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Partition_Column >= 101 AND :Partition_Column < 12000 ) diff; diff_lhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -76,7 +76,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Partition_Column >= 12000 AND :Partition_Column < 18000 ) diff; diff_lhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -85,7 +85,7 @@ SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 18000 ) diff; diff_lhs_03 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -94,7 +94,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Partition_Column < 101 EXCEPT ALL :Select_Columns FROM :Table_Part_00 ) diff; diff_rhs_00 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -104,7 +104,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Partition_Column < 12000 EXCEPT ALL :Select_Columns FROM :Table_Part_01 ) diff; diff_rhs_01 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -114,7 +114,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Partition_Column < 18000 EXCEPT ALL :Select_Columns FROM :Table_Part_02 ) diff; diff_rhs_02 -------------- +--------------------------------------------------------------------- 0 (1 row) @@ -123,7 +123,7 @@ SELECT COUNT(*) AS diff_rhs_03 FROM ( :Partition_Column >= 18000 EXCEPT ALL :Select_Columns FROM :Table_Part_03 ) diff; diff_rhs_03 -------------- +--------------------------------------------------------------------- 0 (1 row)