From a448ca01bc6287468dd46663b047ad4b83b0056f Mon Sep 17 00:00:00 2001 From: Burak Velioglu Date: Wed, 22 Dec 2021 21:40:04 +0300 Subject: [PATCH] Add TODOs --- src/test/regress/sql/citus_local_tables_mx.sql | 1 + src/test/regress/sql/local_shard_execution_dropped_column.sql | 2 ++ src/test/regress/sql/multi_cluster_management.sql | 4 +--- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/test/regress/sql/citus_local_tables_mx.sql b/src/test/regress/sql/citus_local_tables_mx.sql index cb94ed086..d1841bd8e 100644 --- a/src/test/regress/sql/citus_local_tables_mx.sql +++ b/src/test/regress/sql/citus_local_tables_mx.sql @@ -35,6 +35,7 @@ FOR EACH ROW EXECUTE FUNCTION dummy_function(); -- Show that we can sync metadata successfully. That means, we create -- the function that trigger needs in mx workers too. +-- TODO: Metadata sync won't propagate objects anymore SELECT start_metadata_sync_to_node('localhost', :worker_1_port); CREATE EXTENSION seg; diff --git a/src/test/regress/sql/local_shard_execution_dropped_column.sql b/src/test/regress/sql/local_shard_execution_dropped_column.sql index 36effbf71..f2522d552 100644 --- a/src/test/regress/sql/local_shard_execution_dropped_column.sql +++ b/src/test/regress/sql/local_shard_execution_dropped_column.sql @@ -15,6 +15,8 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE t1 (a int, b int, c int UNIQUE); SELECT create_distributed_table('t1', 'c'); ALTER TABLE t1 DROP COLUMN b; + +-- Syncing metadata won't propagate t1 SELECT start_metadata_sync_to_node('localhost',:worker_1_port); SELECT start_metadata_sync_to_node('localhost',:worker_2_port); diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index 24d516351..bc53a7b01 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -78,10 +78,8 @@ SELECT master_get_active_worker_nodes(); -- try to disable a node which does not exist and see that an error is thrown SELECT citus_disable_node('localhost.noexist', 2345); -table pg_dist_node; -\d - -- drop the table without leaving a shard placement behind (messes up other tests) +-- TODO: Replication ref table multiple times SELECT master_activate_node('localhost', :worker_2_port); DROP TABLE test_reference_table, cluster_management_test;