Added schemaname in Query

pull/7013/head
Shabnam Khan 2023-06-20 16:04:35 +05:30
parent 08d4a72867
commit 474739ba31
1 changed files with 29 additions and 21 deletions

View File

@ -10,6 +10,7 @@
#include "distributed/distribution_column.h"
#include "utils/builtins.h"
#include "distributed/shard_split.h"
#include "utils/lsyscache.h"
PG_FUNCTION_INFO_V1(citus_auto_shard_split_start);
@ -31,6 +32,7 @@ typedef struct ShardInfoData
char *distributionColumn;
char *datatype;
char *shardname;
char *schemaname;
Oid tableId;
Oid distributionColumnId;
}ShardInfoData;
@ -64,7 +66,7 @@ ErrorOnConcurrentOperation()
/*
* For a given SplitPoints , it creates the SQL query for the shard Splitting
* For a given SplitPoints , it creates the SQL query for the Shard Splitting
*/
StringInfo
GetShardSplitQuery(ShardInfo shardinfo, List *SplitPoints, char *shardSplitMode)
@ -86,7 +88,8 @@ GetShardSplitQuery(ShardInfo shardinfo, List *SplitPoints , char* shardSplitMode
{
appendStringInfo(splitQuery, "%d,", shardinfo->nodeid);
}
appendStringInfo(splitQuery, "%d], %s)", shardinfo->nodeid, quote_literal_cstr(shardSplitMode));
appendStringInfo(splitQuery, "%d], %s)", shardinfo->nodeid, quote_literal_cstr(
shardSplitMode));
return splitQuery;
}
@ -96,7 +99,8 @@ GetShardSplitQuery(ShardInfo shardinfo, List *SplitPoints , char* shardSplitMode
* It creates a background job for citus_split_shard_by_split_points and executes it in background.
*/
void
ExecuteSplitBackgroundJob(int64 jobid, ShardInfo shardinfo, List *SplitPoints , char* shardSplitMode)
ExecuteSplitBackgroundJob(int64 jobid, ShardInfo shardinfo, List *SplitPoints,
char *shardSplitMode)
{
StringInfo splitQuery = makeStringInfo();
splitQuery = GetShardSplitQuery(shardinfo, SplitPoints, shardSplitMode);
@ -168,12 +172,13 @@ FindShardSplitPoints(ShardInfo shardinfo)
appendStringInfo(CommonValueQuery,
"SELECT shardid , unnest(result::%s[]) from run_command_on_shards(%s,$$SELECT array_agg(val)"
" FROM pg_stats s , unnest(most_common_vals::text::%s[],most_common_freqs) as res(val,freq)"
" WHERE tablename = %s AND attname = %s AND freq > 0.2 $$)"
" WHERE tablename = %s AND attname = %s AND schemaname = %s AND freq > 0.3 $$)"
" WHERE result <> '' AND shardid = %ld;",
shardinfo->datatype, quote_literal_cstr(shardinfo->tablename),
shardinfo->datatype,
quote_literal_cstr(shardinfo->shardname),
quote_literal_cstr(shardinfo->distributionColumn),
quote_literal_cstr(shardinfo->schemaname),
shardinfo->shardid);
ereport(LOG, errmsg("%s", CommonValueQuery->data));
@ -204,6 +209,7 @@ FindShardSplitPoints(ShardInfo shardinfo)
tenantIdDatum);
hashedValue = DatumGetInt32(hashedValueDatum);
ereport(LOG, errmsg("%d", hashedValue));
/*Switching the memory context to store the unique SplitPoints in a list*/
MemoryContextSwitchTo(originalContext);
@ -258,10 +264,11 @@ ScheduleShardSplit(ShardInfo shardinfo , char* shardSplitMode)
List *SplitPoints = FindShardSplitPoints(shardinfo);
if (list_length(SplitPoints) > 0)
{
// ErrorOnConcurrentOperation();
/* ErrorOnConcurrentOperation(); */
int64 jobId = CreateBackgroundJob("Automatic Shard Split",
"Split using SplitPoints List");
ereport(LOG,errmsg("%s",GetShardSplitQuery(shardinfo,SplitPoints,shardSplitMode)->data));
ereport(LOG, errmsg("%s", GetShardSplitQuery(shardinfo, SplitPoints,
shardSplitMode)->data));
ExecuteSplitBackgroundJob(jobId, shardinfo, SplitPoints, shardSplitMode);
}
else
@ -299,10 +306,9 @@ citus_auto_shard_split_start(PG_FUNCTION_ARGS)
);
ereport(LOG, errmsg("%s", query->data));
char *shardSplitMode;
Oid shardTransferModeOid = PG_GETARG_OID(0);
Datum enumLabelDatum = DirectFunctionCall1(enum_out, shardTransferModeOid);
shardSplitMode = DatumGetCString(enumLabelDatum);
char *shardSplitMode = DatumGetCString(enumLabelDatum);
ereport(LOG, errmsg("%s", shardSplitMode));
if (SPI_connect() != SPI_OK_CONNECT)
{
@ -354,6 +360,8 @@ citus_auto_shard_split_start(PG_FUNCTION_ARGS)
distributionColumn);
shardinfo.datatype = format_type_be(shardinfo.distributionColumnId);
Oid schemaOid = get_rel_namespace(shardinfo.tableId);
shardinfo.schemaname = get_namespace_name(schemaOid);
ScheduleShardSplit(&shardinfo, shardSplitMode);
ereport(LOG, (errmsg(