Update activate node

velioglu/wo_seq_test_1
Burak Velioglu 2022-01-05 16:33:11 +03:00
parent c60f77bb99
commit 299043dfaa
No known key found for this signature in database
GPG Key ID: F6827E620F6549C6
8 changed files with 59 additions and 44 deletions

View File

@ -389,6 +389,14 @@ ShouldSyncTableMetadata(Oid relationId)
bool citusTableWithNoDistKey =
IsCitusTableTypeCacheEntry(tableEntry, CITUS_TABLE_WITH_NO_DIST_KEY);
/*
* We shouldn't sync metadata for distributed tables created with master_create_distributed_table
*/
if (hashDistributed && tableEntry->colocationId == INVALID_COLOCATION_ID)
{
return false;
}
return ShouldSyncTableMetadataInternal(hashDistributed, citusTableWithNoDistKey);
}

View File

@ -510,10 +510,6 @@ citus_disable_node(PG_FUNCTION_ARGS)
workerNode->workerName,
nodePort)));
}
bool forceRemoteDelete = false;
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
forceRemoteDelete);
}
TransactionModifiedNodeMetadata = true;
@ -1190,7 +1186,7 @@ ActivateNode(char *nodeName, int nodePort)
*/
if (!NodeIsCoordinator(workerNode) && NodeIsPrimary(workerNode) && !workerNode->isActive)
{
; bool forceRemoteDelete = true;
bool forceRemoteDelete = true;
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
forceRemoteDelete);
}
@ -1212,6 +1208,7 @@ ActivateNode(char *nodeName, int nodePort)
}
SetUpDistributedTableWithDependencies(workerNode);
SetUpMultipleDistributedTableIntegrations(workerNode);
if (syncMetadata)
{
@ -1220,7 +1217,6 @@ ActivateNode(char *nodeName, int nodePort)
if (!NodeIsCoordinator(workerNode) && NodeIsPrimary(workerNode))
{
ClearDistributedObjectsFromNode(workerNode);
SetUpMultipleDistributedTableIntegrations(workerNode);
SetUpObjectMetadata(workerNode);
}
}

View File

@ -270,8 +270,7 @@ worker_drop_distributed_table_metadata_only(PG_FUNCTION_ARGS)
List *shardPlacementList = ShardPlacementListIncludingOrphanedPlacements(shardId);
if (shardPlacementList == NULL)
{
ereport(NOTICE, (errmsg("placement for relation with oid %d does not exist, skipping", relationId)));
PG_RETURN_VOID();
ereport(WARNING, (errmsg("placement for relation with oid %d does not exist, skipping", relationId)));
}
ShardPlacement *placement = NULL;

View File

@ -71,6 +71,8 @@ extern void GetDependentSequencesWithRelation(Oid relationId, List **attnumList,
extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum);
#define DELETE_ALL_NODES "TRUNCATE pg_dist_node CASCADE"
#define DELETE_ALL_PLACEMENTS "TRUNCATE pg_dist_placement CASCADE"
#define DELETE_ALL_SHARDS "TRUNCATE pg_dist_shard CASCADE"
#define DELETE_ALL_DISTRIBUTED_OBJECTS "TRUNCATE citus.pg_dist_object"
#define REMOVE_ALL_CLUSTERED_TABLES_ONLY_COMMAND \
"SELECT worker_drop_distributed_table_only(logicalrelid::regclass::text) FROM pg_dist_partition"

View File

@ -500,14 +500,7 @@ SELECT shardid, nodename, nodeport
WHERE logicalrelid = 'numbers_append'::regclass order by placementid;
-- add the node back
table pg_dist_node;
\c - - - :worker_1_port
table pg_dist_node;
table pg_dist_placement;
table pg_dist_shard;
table pg_dist_shard_placement;
\c - - - :master_port
set citus.log_remote_commands to true;
DROP FOREIGN TABLE foreign_table_to_distribute;
SELECT 1 FROM master_activate_node('localhost', :worker_1_port);
reset citus.log_remote_commands;
RESET client_min_messages;

View File

@ -36,7 +36,6 @@ test: alter_database_owner
test: multi_test_catalog_views
test: multi_table_ddl
test: multi_sequence_default
test: turn_mx_off
test: multi_name_lengths
test: multi_name_resolution
test: multi_metadata_access

View File

@ -76,10 +76,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
-- re-add the nodes to the cluster
set citus.log_remote_commands to true;
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
reset citus.log_remote_commands;
-- verify that a table can be created after the extension has been dropped and recreated
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);

View File

@ -6,6 +6,12 @@
-- metadata changes to MX tables.
-- Turn metadata sync off at first
\c - - - :worker_1_port
table pg_dist_partition;
table pg_dist_node;
\c - - - :master_port
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
@ -47,18 +53,14 @@ COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regcla
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
-- Create a test table with constraints and SERIAL and default from user defined sequence
CREATE SEQUENCE user_defined_seq;
CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq'));
SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash');
SELECT master_create_worker_shards('mx_test_table', 8, 1);
set citus.shard_count to 8;
set citus.shard_replication_factor to 1;
SELECT create_distributed_table('mx_test_table', 'col_1');
reset citus.shard_count;
reset citus.shard_replication_factor;
-- Set the replication model of the test table to streaming replication so that it is
-- considered as an MX table
@ -87,7 +89,7 @@ UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_tabl
SELECT unnest(master_metadata_snapshot()) order by 1;
-- Test start_metadata_sync_to_node UDF
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
-- Ensure that hasmetadata=false for all nodes
SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
@ -103,12 +105,27 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888;
-- Add a node to another cluster to make sure it's also synced
SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
-- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
\c - - - :worker_1_port
table pg_dist_partition;
table pg_dist_node;
table pg_dist_shard;
table pg_dist_shard_placement;
\c - - - :master_port
-- Run start_metadata_sync_to_node and citus_activate_node and check that it marked hasmetadata for that worker
table pg_dist_partition;
\d
set citus.log_remote_commands to true;
set citus.worker_min_messages to debug5;
SELECT citus_activate_node('localhost', :worker_1_port);
reset citus.log_remote_commands;
reset citus.worker_min_messages;
SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port;
-- Check that the metadata has been copied to the worker
\c - - - :worker_1_port
table pg_dist_partition;
SELECT * FROM pg_dist_local_group;
SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid;
@ -126,7 +143,7 @@ SELECT * FROM pg_dist_colocation ORDER BY colocationid;
-- Make sure that truncate trigger has been set for the MX table on worker
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
-- Make sure that start_metadata_sync_to_node considers foreign key constraints
-- Make sure that citus_activate_node considers foreign key constraints
\c - - - :master_port
-- Since we're superuser, we can set the replication model to 'streaming' to
@ -142,7 +159,7 @@ CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text,
SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1');
SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1');
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT citus_activate_node('localhost', :worker_1_port);
-- Check that foreign key metadata exists on the worker
\c - - - :worker_1_port
@ -154,10 +171,10 @@ DROP TABLE mx_testing_schema.fk_test_1;
RESET citus.shard_replication_factor;
-- Check that repeated calls to start_metadata_sync_to_node has no side effects
-- Check that repeated calls to citus_activate_node has no side effects
\c - - - :master_port
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT citus_activate_node('localhost', :worker_1_port);
SELECT citus_activate_node('localhost', :worker_1_port);
\c - - - :worker_1_port
SELECT * FROM pg_dist_local_group;
SELECT * FROM pg_dist_node ORDER BY nodeid;
@ -171,10 +188,11 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
relid = 'mx_testing_schema.mx_index'::regclass;
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
-- Make sure that start_metadata_sync_to_node can be called inside a transaction and rollbacked
-- Make sure that citus_activate_node can be called inside a transaction and rollbacked
\c - - - :master_port
BEGIN;
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
SELECT citus_activate_node('localhost', :worker_2_port);
ROLLBACK;
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
@ -182,7 +200,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
-- Check that the distributed table can be queried from the worker
\c - - - :master_port
SET citus.shard_replication_factor TO 1;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT citus_activate_node('localhost', :worker_1_port);
CREATE TABLE mx_query_test (a int, b text, c int);
SELECT create_distributed_table('mx_query_test', 'a');
@ -441,7 +459,7 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
-- sync table with serial column after create_distributed_table
CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL);
SELECT create_distributed_table('mx_table_with_small_sequence', 'a');
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT citus_activate_node('localhost', :worker_1_port);
DROP TABLE mx_table_with_small_sequence;
-- Show that create_distributed_table works with a serial column
@ -486,7 +504,7 @@ SELECT nextval('mx_table_with_sequence_c_seq');
-- Check that adding a new metadata node sets the sequence space correctly
\c - - - :master_port
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
SELECT citus_activate_node('localhost', :worker_2_port);
\c - - - :worker_2_port
SELECT groupid FROM pg_dist_local_group;
@ -531,7 +549,7 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence;
\c - - - :master_port
-- Remove a node so that shards and sequences won't be created on table creation. Therefore,
-- we can test that start_metadata_sync_to_node can actually create the sequence with proper
-- we can test that citus_activate_node can actually create the sequence with proper
-- owner
CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement;
CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition;
@ -557,7 +575,7 @@ SELECT create_distributed_table('mx_table', 'a');
\c - postgres - :master_port
SELECT master_add_node('localhost', :worker_2_port);
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
SELECT citus_activate_node('localhost', :worker_2_port);
\c - mx_user - :worker_1_port
SELECT nextval('mx_table_b_seq');
@ -771,7 +789,7 @@ SELECT pg_reload_conf();
UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port;
SELECT master_add_node('localhost', :worker_2_port);
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
SELECT citus_activate_node('localhost', :worker_2_port);
CREATE SEQUENCE mx_test_sequence_0;
CREATE SEQUENCE mx_test_sequence_1;
@ -865,6 +883,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
-- Turn metadata sync back on at the end
-- Turn metadata sync back on and ativate them at the end
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
SELECT citus_activate_node('localhost', :worker_1_port);
SELECT citus_activate_node('localhost', :worker_2_port);