From 108114ab99e194fb2070eb54c95b068ea40939a2 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Wed, 27 Apr 2016 10:36:03 +0300 Subject: [PATCH] Apply final code review feedback - Fix o(n^2) loop to o(n) - Collapse two if statements into a single one - Some coding conventions feedback --- .../distributed/planner/multi_logical_optimizer.c | 4 ++-- .../distributed/planner/multi_physical_planner.c | 6 +++--- src/backend/distributed/utils/metadata_cache.c | 13 +++++-------- 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 6ce28687c..e68804d8c 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -2311,8 +2311,8 @@ TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode, */ partitionMethod = PartitionMethod(relationId); - if (partitionMethod == DISTRIBUTE_BY_RANGE - || partitionMethod == DISTRIBUTE_BY_HASH) + if (partitionMethod == DISTRIBUTE_BY_RANGE || + partitionMethod == DISTRIBUTE_BY_HASH) { Var *tablePartitionColumn = tableNode->partitionColumn; bool groupedByPartitionColumn = false; diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index 0d13273fe..9d927c99f 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -1965,9 +1965,9 @@ SubquerySqlTaskList(Job *job) List *shardIntervalList = LoadShardIntervalList(relationId); List *finalShardIntervalList = NIL; ListCell *fragmentCombinationCell = NULL; + ListCell *shardIntervalCell = NULL; uint32 tableId = rangeTableIndex + 1; /* tableId starts from 1 */ uint32 finalShardCount = 0; - uint32 shardIndex = 0; if (opExpressionList != NIL) { @@ -1991,9 +1991,9 @@ SubquerySqlTaskList(Job *job) fragmentCombinationCell = list_head(fragmentCombinationList); - for (shardIndex = 0; shardIndex < finalShardCount; shardIndex++) + foreach(shardIntervalCell, finalShardIntervalList) { - ShardInterval *shardInterval = list_nth(finalShardIntervalList, shardIndex); + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); RangeTableFragment *shardFragment = palloc0(fragmentSize); shardFragment->fragmentReference = &(shardInterval->shardId); diff --git a/src/backend/distributed/utils/metadata_cache.c b/src/backend/distributed/utils/metadata_cache.c index 1c81daa2d..9c8a81edc 100644 --- a/src/backend/distributed/utils/metadata_cache.c +++ b/src/backend/distributed/utils/metadata_cache.c @@ -321,14 +321,6 @@ LookupDistTableCacheEntry(Oid relationId) shardIntervalArrayLength, shardIntervalCompareFunction); - /* check the shard distribution for hash partitioned tables */ - if (partitionMethod == DISTRIBUTE_BY_HASH) - { - hasUniformHashDistribution = - HasUniformHashDistribution(sortedShardIntervalArray, - shardIntervalArrayLength); - } - /* check if there exists any shard intervals with no min/max values */ hasUninitializedShardInterval = HasUninitializedShardInterval(sortedShardIntervalArray, shardIntervalArrayLength); @@ -347,6 +339,11 @@ LookupDistTableCacheEntry(Oid relationId) sizeof(FmgrInfo)); fmgr_info_copy(hashFunction, &(typeEntry->hash_proc_finfo), CacheMemoryContext); + + /* check the shard distribution for hash partitioned tables */ + hasUniformHashDistribution = + HasUniformHashDistribution(sortedShardIntervalArray, + shardIntervalArrayLength); } cacheEntry = hash_search(DistTableCacheHash, hashKey, HASH_ENTER, NULL);