Fix style

pull/5209/head
Sait Talha Nisanci 2021-08-25 17:17:16 +03:00
parent e1f5520e1a
commit 0b67fcf81d
34 changed files with 90 additions and 79 deletions

View File

@ -451,7 +451,7 @@ workflows:
- build: - build:
name: build-14 name: build-14
pg_major: 14 pg_major: 14
image_tag: '14beta3-dev202108191715' image_tag: '14beta3-dev202108191715'
- check-style - check-style
- check-sql-snapshots - check-sql-snapshots

View File

@ -2019,7 +2019,6 @@ ColumnarProcessUtility(PlannedStmt *pstmt,
DestReceiver *dest, DestReceiver *dest,
QueryCompletionCompat *completionTag) QueryCompletionCompat *completionTag)
{ {
#if PG_VERSION_NUM >= PG_VERSION_14 #if PG_VERSION_NUM >= PG_VERSION_14
if (readOnlyTree) if (readOnlyTree)
{ {

View File

@ -251,7 +251,9 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
*/ */
Assert(false); Assert(false);
ereport(ERROR, (errmsg("unsupported object %s for distribution by citus", ereport(ERROR, (errmsg("unsupported object %s for distribution by citus",
getObjectTypeDescription_compat(dependency, /* missingOk: */ false)), getObjectTypeDescription_compat(dependency,
/* missingOk: */ false)),
errdetail( errdetail(
"citus tries to recreate an unsupported object on its workers"), "citus tries to recreate an unsupported object on its workers"),
errhint("please report a bug as this should not be happening"))); errhint("please report a bug as this should not be happening")));

View File

@ -1613,7 +1613,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString,
* workers * workers
*/ */
const char *functionName = const char *functionName =
getObjectIdentity_compat(&address, /* missingOk: */ false); getObjectIdentity_compat(&address, /* missingOk: */ false);
ereport(ERROR, (errmsg("distrtibuted functions are not allowed to depend on an " ereport(ERROR, (errmsg("distrtibuted functions are not allowed to depend on an "
"extension"), "extension"),
@ -1933,9 +1933,9 @@ ErrorIfFunctionDependsOnExtension(const ObjectAddress *functionAddress)
if (IsObjectAddressOwnedByExtension(functionAddress, &extensionAddress)) if (IsObjectAddressOwnedByExtension(functionAddress, &extensionAddress))
{ {
char *functionName = char *functionName =
getObjectIdentity_compat(functionAddress, /* missingOk: */ false); getObjectIdentity_compat(functionAddress, /* missingOk: */ false);
char *extensionName = char *extensionName =
getObjectIdentity_compat(&extensionAddress, /* missingOk: */ false); getObjectIdentity_compat(&extensionAddress, /* missingOk: */ false);
ereport(ERROR, (errmsg("unable to create a distributed function from functions " ereport(ERROR, (errmsg("unable to create a distributed function from functions "
"owned by an extension"), "owned by an extension"),

View File

@ -212,9 +212,9 @@ DoLocalCopy(StringInfo buffer, Oid relationId, int64 shardId, CopyStmt *copyStat
(void) addRangeTableEntryForRelation(pState, shard, AccessShareLock, (void) addRangeTableEntryForRelation(pState, shard, AccessShareLock,
NULL, false, false); NULL, false, false);
CopyFromState cstate = BeginCopyFrom_compat(pState, shard, NULL, NULL, false, CopyFromState cstate = BeginCopyFrom_compat(pState, shard, NULL, NULL, false,
ReadFromLocalBufferCallback, ReadFromLocalBufferCallback,
copyStatement->attlist, copyStatement->attlist,
copyStatement->options); copyStatement->options);
CopyFrom(cstate); CopyFrom(cstate);
EndCopyFrom(cstate); EndCopyFrom(cstate);

View File

@ -525,13 +525,13 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
/* initialize copy state to read from COPY data source */ /* initialize copy state to read from COPY data source */
CopyFromState copyState = BeginCopyFrom_compat(NULL, CopyFromState copyState = BeginCopyFrom_compat(NULL,
copiedDistributedRelation, copiedDistributedRelation,
NULL, NULL,
copyStatement->filename, copyStatement->filename,
copyStatement->is_program, copyStatement->is_program,
NULL, NULL,
copyStatement->attlist, copyStatement->attlist,
copyStatement->options); copyStatement->options);
/* set up callback to identify error line number */ /* set up callback to identify error line number */
errorCallback.callback = CopyFromErrorCallback; errorCallback.callback = CopyFromErrorCallback;
@ -627,13 +627,13 @@ CopyToNewShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag, O
/* initialize copy state to read from COPY data source */ /* initialize copy state to read from COPY data source */
CopyFromState copyState = BeginCopyFrom_compat(NULL, CopyFromState copyState = BeginCopyFrom_compat(NULL,
distributedRelation, distributedRelation,
NULL, NULL,
copyStatement->filename, copyStatement->filename,
copyStatement->is_program, copyStatement->is_program,
NULL, NULL,
copyStatement->attlist, copyStatement->attlist,
copyStatement->options); copyStatement->options);
CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
copyOutState->delim = (char *) delimiterCharacter; copyOutState->delim = (char *) delimiterCharacter;

View File

@ -2448,16 +2448,16 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
#if PG_VERSION_NUM >= PG_VERSION_14 #if PG_VERSION_NUM >= PG_VERSION_14
case AT_SetCompression: case AT_SetCompression:
#endif #endif
{ {
/* /*
* We will not perform any special check for: * We will not perform any special check for:
* ALTER TABLE .. ALTER COLUMN .. SET NOT NULL * ALTER TABLE .. ALTER COLUMN .. SET NOT NULL
* ALTER TABLE .. REPLICA IDENTITY .. * ALTER TABLE .. REPLICA IDENTITY ..
* ALTER TABLE .. VALIDATE CONSTRAINT .. * ALTER TABLE .. VALIDATE CONSTRAINT ..
* ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION .. * ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION ..
*/ */
break; break;
} }
case AT_SetRelOptions: /* SET (...) */ case AT_SetRelOptions: /* SET (...) */
case AT_ResetRelOptions: /* RESET (...) */ case AT_ResetRelOptions: /* RESET (...) */

View File

@ -810,15 +810,21 @@ deparse_shard_reindex_statement(ReindexStmt *origStmt, Oid distrelid, int64 shar
} }
} }
/* /*
* IsReindexWithParam_compat returns true if the given parameter * IsReindexWithParam_compat returns true if the given parameter
* exists for the given reindexStmt. * exists for the given reindexStmt.
*/ */
bool IsReindexWithParam_compat(ReindexStmt* reindexStmt, char* param) { bool
IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param)
{
#if PG_VERSION_NUM < PG_VERSION_14 #if PG_VERSION_NUM < PG_VERSION_14
if (strcmp(param, "concurrently") == 0) { if (strcmp(param, "concurrently") == 0)
{
return reindexStmt->concurrent; return reindexStmt->concurrent;
}else if (strcmp(param, "verbose") == 0) { }
else if (strcmp(param, "verbose") == 0)
{
return reindexStmt->options & REINDEXOPT_VERBOSE; return reindexStmt->options & REINDEXOPT_VERBOSE;
} }
return false; return false;
@ -831,10 +837,9 @@ bool IsReindexWithParam_compat(ReindexStmt* reindexStmt, char* param) {
return defGetBoolean(opt); return defGetBoolean(opt);
} }
} }
return false; return false;
#endif #endif
}
}
/* /*
@ -1292,6 +1297,7 @@ RoleSpecString(RoleSpec *spec, bool withQuoteIdentifier)
quote_identifier(spec->rolename) : quote_identifier(spec->rolename) :
spec->rolename; spec->rolename;
} }
#if PG_VERSION_NUM >= PG_VERSION_14 #if PG_VERSION_NUM >= PG_VERSION_14
case ROLESPEC_CURRENT_ROLE: case ROLESPEC_CURRENT_ROLE:
#endif #endif

View File

@ -233,6 +233,7 @@ AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt)
appendStringInfoString(buf, ")"); appendStringInfoString(buf, ")");
} }
#if PG_VERSION_NUM >= PG_VERSION_14 #if PG_VERSION_NUM >= PG_VERSION_14
static void static void
AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt) AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
@ -259,6 +260,8 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
} }
} }
} }
#else #else
static void static void
AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt) AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
@ -285,6 +288,8 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
} }
} }
} }
#endif #endif
static void static void

View File

@ -2060,7 +2060,7 @@ get_with_clause(Query *query, deparse_context *context)
if (PRETTY_INDENT(context)) if (PRETTY_INDENT(context))
appendContextKeyword(context, "", 0, 0, 0); appendContextKeyword(context, "", 0, 0, 0);
appendStringInfoChar(buf, ')'); appendStringInfoChar(buf, ')');
if (cte->search_clause) if (cte->search_clause)
{ {
bool first = true; bool first = true;
@ -2100,7 +2100,7 @@ get_with_clause(Query *query, deparse_context *context)
} }
appendStringInfo(buf, " SET %s", quote_identifier(cte->cycle_clause->cycle_mark_column)); appendStringInfo(buf, " SET %s", quote_identifier(cte->cycle_clause->cycle_mark_column));
{ {
Const *cmv = castNode(Const, cte->cycle_clause->cycle_mark_value); Const *cmv = castNode(Const, cte->cycle_clause->cycle_mark_value);
Const *cmd = castNode(Const, cte->cycle_clause->cycle_mark_default); Const *cmd = castNode(Const, cte->cycle_clause->cycle_mark_default);
@ -7771,7 +7771,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
appendStringInfoString(buf, quote_identifier(colname)); appendStringInfoString(buf, quote_identifier(colname));
} }
appendStringInfoChar(buf, ')'); appendStringInfoChar(buf, ')');
if (j->join_using_alias) if (j->join_using_alias)
appendStringInfo(buf, " AS %s", appendStringInfo(buf, " AS %s",
quote_identifier(j->join_using_alias->aliasname)); quote_identifier(j->join_using_alias->aliasname));

View File

@ -411,8 +411,8 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript
copyOptions = lappend(copyOptions, copyOption); copyOptions = lappend(copyOptions, copyOption);
CopyFromState copyState = BeginCopyFrom_compat(NULL, stubRelation, NULL, CopyFromState copyState = BeginCopyFrom_compat(NULL, stubRelation, NULL,
fileName, false, NULL, fileName, false, NULL,
NULL, copyOptions); NULL, copyOptions);
while (true) while (true)
{ {

View File

@ -75,8 +75,12 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS)
{ {
ereport(ERROR, (errmsg("object still exists"), ereport(ERROR, (errmsg("object still exists"),
errdetail("the %s \"%s\" still exists", errdetail("the %s \"%s\" still exists",
getObjectTypeDescription_compat(&address, /* missingOk: */ false), getObjectTypeDescription_compat(&address,
getObjectIdentity_compat(&address, /* missingOk: */ false)),
/* missingOk: */ false),
getObjectIdentity_compat(&address,
/* missingOk: */ false)),
errhint("drop the object via a DROP command"))); errhint("drop the object via a DROP command")));
} }

View File

@ -688,7 +688,6 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList,
Assert(deleteCriteria != NULL); Assert(deleteCriteria != NULL);
List *deleteCriteriaList = list_make1(deleteCriteria); List *deleteCriteriaList = list_make1(deleteCriteria);
/* walk over shard list and check if shards can be dropped */ /* walk over shard list and check if shards can be dropped */
ShardInterval *shardInterval = NULL; ShardInterval *shardInterval = NULL;

View File

@ -286,9 +286,10 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
PlannedStmt *plan = subPlan->plan; PlannedStmt *plan = subPlan->plan;
IntoClause *into = NULL; IntoClause *into = NULL;
ParamListInfo params = NULL; ParamListInfo params = NULL;
/* /*
* With PG14, we need to provide a string here, * With PG14, we need to provide a string here,
* for now we put an empty string, which is valid according to postgres. * for now we put an empty string, which is valid according to postgres.
*/ */
char *queryString = pstrdup(""); char *queryString = pstrdup("");
instr_time planduration; instr_time planduration;

View File

@ -1890,9 +1890,10 @@ MasterAggregateExpression(Aggref *originalAggregate,
if (aggregateType == AGGREGATE_ARRAY_AGG) if (aggregateType == AGGREGATE_ARRAY_AGG)
{ {
#if PG_VERSION_NUM >= PG_VERSION_14 #if PG_VERSION_NUM >= PG_VERSION_14
/* /*
* Postgres expects the type of the array here such as INT4ARRAYOID. * Postgres expects the type of the array here such as INT4ARRAYOID.
* Hence we set it to workerReturnType. If we set this to * Hence we set it to workerReturnType. If we set this to
* ANYCOMPATIBLEARRAYOID then we will get the following error: * ANYCOMPATIBLEARRAYOID then we will get the following error:
* "argument declared anycompatiblearray is not an array but type anycompatiblearray" * "argument declared anycompatiblearray is not an array but type anycompatiblearray"
*/ */

View File

@ -2054,7 +2054,8 @@ GetRestrictInfoListForRelation(RangeTblEntry *rangeTblEntry,
* If the restriction involves multiple tables, we cannot add it to * If the restriction involves multiple tables, we cannot add it to
* input relation's expression list. * input relation's expression list.
*/ */
Relids varnos = pull_varnos_compat(relationRestriction->plannerInfo, (Node *) restrictionClause); Relids varnos = pull_varnos_compat(relationRestriction->plannerInfo,
(Node *) restrictionClause);
if (bms_num_members(varnos) != 1) if (bms_num_members(varnos) != 1)
{ {
continue; continue;

View File

@ -31,7 +31,7 @@ BEGIN
* Citus extension, so we create that dependency here. * Citus extension, so we create that dependency here.
* We are not using: * We are not using:
* ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg * ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg
* because we don't have an easy way to check if the aggregate * because we don't have an easy way to check if the aggregate
* exists with anyarray type or anycompatiblearray type. * exists with anyarray type or anycompatiblearray type.
*/ */
INSERT INTO pg_depend INSERT INTO pg_depend

View File

@ -31,7 +31,7 @@ BEGIN
* Citus extension, so we create that dependency here. * Citus extension, so we create that dependency here.
* We are not using: * We are not using:
* ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg * ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg
* because we don't have an easy way to check if the aggregate * because we don't have an easy way to check if the aggregate
* exists with anyarray type or anycompatiblearray type. * exists with anyarray type or anycompatiblearray type.
*/ */
INSERT INTO pg_depend INSERT INTO pg_depend

View File

@ -5,8 +5,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_prepare_pg_upgrade()
AS $cppu$ AS $cppu$
BEGIN BEGIN
DELETE FROM pg_depend WHERE DELETE FROM pg_depend WHERE
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
refobjid IN (select oid from pg_extension where extname = 'citus'); refobjid IN (select oid from pg_extension where extname = 'citus');
/* /*
* We are dropping the aggregates because postgres 14 changed * We are dropping the aggregates because postgres 14 changed
@ -14,7 +14,7 @@ BEGIN
* upgrading to pg14, spegifically when running pg_restore on * upgrading to pg14, spegifically when running pg_restore on
* array_cat_agg we would get an error. So we drop the aggregate * array_cat_agg we would get an error. So we drop the aggregate
* and create the right one on citus_finish_pg_upgrade. * and create the right one on citus_finish_pg_upgrade.
*/ */
DROP AGGREGATE IF EXISTS array_cat_agg(anyarray); DROP AGGREGATE IF EXISTS array_cat_agg(anyarray);
DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray); DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray);
-- --

View File

@ -5,8 +5,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_prepare_pg_upgrade()
AS $cppu$ AS $cppu$
BEGIN BEGIN
DELETE FROM pg_depend WHERE DELETE FROM pg_depend WHERE
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
refobjid IN (select oid from pg_extension where extname = 'citus'); refobjid IN (select oid from pg_extension where extname = 'citus');
/* /*
* We are dropping the aggregates because postgres 14 changed * We are dropping the aggregates because postgres 14 changed
@ -14,7 +14,7 @@ BEGIN
* upgrading to pg14, spegifically when running pg_restore on * upgrading to pg14, spegifically when running pg_restore on
* array_cat_agg we would get an error. So we drop the aggregate * array_cat_agg we would get an error. So we drop the aggregate
* and create the right one on citus_finish_pg_upgrade. * and create the right one on citus_finish_pg_upgrade.
*/ */
DROP AGGREGATE IF EXISTS array_cat_agg(anyarray); DROP AGGREGATE IF EXISTS array_cat_agg(anyarray);
DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray); DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray);
-- --

View File

@ -50,7 +50,7 @@ FunctionOidExtended(const char *schemaName, const char *functionName, int argume
argumentCount, argumentCount,
argumentList, argumentList,
findVariadics, findVariadics,
findDefaults, findDefaults,
false, false,
true); true);

View File

@ -243,4 +243,4 @@ clean distclean maintainer-clean:
rm -f $(output_files) $(input_files) rm -f $(output_files) $(input_files)
rm -rf tmp_check/ rm -rf tmp_check/
all: create-tablespaces all: create-tablespaces

View File

@ -239,6 +239,7 @@ s/ERROR: ROLLBACK is not allowed in an SQL function/ERROR: ROLLBACK is not all
/.*Async Capable.*/d /.*Async Capable.*/d
/Parent Relationship/d /Parent Relationship/d
/Parent-Relationship/d /Parent-Relationship/d
s/function array_cat_agg\(anyarray\) anyarray/function array_cat_agg\(anycompatiblearray\) anycompatiblearray/g
s/function array_cat_agg\(anycompatiblearray\)/function array_cat_agg\(anyarray\)/g s/function array_cat_agg\(anycompatiblearray\)/function array_cat_agg\(anyarray\)/g
s/TRIM\(BOTH FROM value\)/btrim\(value\)/g s/TRIM\(BOTH FROM value\)/btrim\(value\)/g
s/pg14\.idx.*/pg14\.xxxxx/g s/pg14\.idx.*/pg14\.xxxxx/g

View File

@ -20,4 +20,5 @@
/multi_mx_copy_data.out /multi_mx_copy_data.out
/multi_outer_join.out /multi_outer_join.out
/multi_outer_join_reference.out /multi_outer_join_reference.out
/tablespace.out
/worker_copy.out /worker_copy.out

View File

@ -139,7 +139,7 @@ SELECT * FROM multi_extension.print_extension_changes();
| function alter_role_if_exists(text,text) boolean | function alter_role_if_exists(text,text) boolean
| function any_value(anyelement) anyelement | function any_value(anyelement) anyelement
| function any_value_agg(anyelement,anyelement) anyelement | function any_value_agg(anyelement,anyelement) anyelement
| function array_cat_agg(anycompatiblearray) anycompatiblearray | function array_cat_agg(anyarray) anycompatiblearray
| function assign_distributed_transaction_id(integer,bigint,timestamp with time zone) void | function assign_distributed_transaction_id(integer,bigint,timestamp with time zone) void
| function authinfo_valid(text) boolean | function authinfo_valid(text) boolean
| function broadcast_intermediate_result(text,text) bigint | function broadcast_intermediate_result(text,text) bigint

View File

@ -1,5 +0,0 @@
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts0';
\c - - - :worker_1_port
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts1';
\c - - - :worker_2_port
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts2';

View File

@ -26,7 +26,7 @@ ORDER BY 1;
function alter_table_set_access_method(regclass,text) function alter_table_set_access_method(regclass,text)
function any_value(anyelement) function any_value(anyelement)
function any_value_agg(anyelement,anyelement) function any_value_agg(anyelement,anyelement)
function array_cat_agg(anycompatiblearray) function array_cat_agg(anyarray)
function assign_distributed_transaction_id(integer,bigint,timestamp with time zone) function assign_distributed_transaction_id(integer,bigint,timestamp with time zone)
function authinfo_valid(text) function authinfo_valid(text)
function broadcast_intermediate_result(text,text) function broadcast_intermediate_result(text,text)

View File

@ -23,7 +23,7 @@ ORDER BY 1;
function alter_table_set_access_method(regclass,text) function alter_table_set_access_method(regclass,text)
function any_value(anyelement) function any_value(anyelement)
function any_value_agg(anyelement,anyelement) function any_value_agg(anyelement,anyelement)
function array_cat_agg(anycompatiblearray) function array_cat_agg(anyarray)
function assign_distributed_transaction_id(integer,bigint,timestamp with time zone) function assign_distributed_transaction_id(integer,bigint,timestamp with time zone)
function authinfo_valid(text) function authinfo_valid(text)
function broadcast_intermediate_result(text,text) function broadcast_intermediate_result(text,text)

View File

@ -20,4 +20,5 @@
/multi_mx_copy_data.sql /multi_mx_copy_data.sql
/multi_outer_join.sql /multi_outer_join.sql
/multi_outer_join_reference.sql /multi_outer_join_reference.sql
/tablespace.sql
/worker_copy.sql /worker_copy.sql

View File

@ -101,7 +101,7 @@ FROM
-- DISTINCT w/wout distribution key -- DISTINCT w/wout distribution key
-- there seems to be an issue with SELECT DISTINCT ROW with PG14 -- there seems to be an issue with SELECT DISTINCT ROW with PG14
-- so we add an alternative output that gives an error, this should -- so we add an alternative output that gives an error, this should
-- be removed after the issue is fixed on PG14. -- be removed after the issue is fixed on PG14.
SELECT DISTINCT(col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38) SELECT DISTINCT(col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38)
FROM FROM
data_types_table data_types_table

View File

@ -60,4 +60,4 @@ SELECT success FROM run_command_on_workers('select pg_reload_conf()');
RESET enable_partitionwise_join; RESET enable_partitionwise_join;
DROP SCHEMA partition_wise_join CASCADE; DROP SCHEMA partition_wise_join CASCADE;

View File

@ -114,7 +114,7 @@ INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *;
-- this test has different output for pg14 and here we mostly test that -- this test has different output for pg14 and here we mostly test that
-- we don't get an error, hence we use explain_has_distributed_subplan. -- we don't get an error, hence we use explain_has_distributed_subplan.
SELECT public.explain_has_distributed_subplan( SELECT public.explain_has_distributed_subplan(
$$ $$
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE)
WITH cte_1 AS (INSERT INTO test_wal VALUES(6,66),(7,77),(8,88) RETURNING *) WITH cte_1 AS (INSERT INTO test_wal VALUES(6,66),(7,77),(8,88) RETURNING *)
SELECT * FROM cte_1; SELECT * FROM cte_1;

View File

@ -98,7 +98,7 @@ CREATE EXTENSION seg;
-- show that the extension is created on existing worker -- show that the extension is created on existing worker
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
SELECT workers.result = pg_extension.extversion AS same_version SELECT workers.result = pg_extension.extversion AS same_version
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg'; FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg';
-- now create the reference table -- now create the reference table
@ -145,7 +145,7 @@ SELECT 1 from master_add_node('localhost', :worker_2_port);
-- show that the extension is created on both existing and new node -- show that the extension is created on both existing and new node
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
SELECT workers.result = pg_extension.extversion AS same_version SELECT workers.result = pg_extension.extversion AS same_version
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg'; FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg';
-- check for the unpackaged extension to be created correctly -- check for the unpackaged extension to be created correctly
@ -212,7 +212,7 @@ ROLLBACK;
-- show that the CREATE EXTENSION command propagated even if the transaction -- show that the CREATE EXTENSION command propagated even if the transaction
-- block is rollbacked, that's a shortcoming of dependency creation logic -- block is rollbacked, that's a shortcoming of dependency creation logic
SELECT COUNT(DISTINCT workers.result) SELECT COUNT(DISTINCT workers.result)
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers; FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers;
-- drop the schema and all the objects -- drop the schema and all the objects

View File

@ -1,5 +0,0 @@
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts0';
\c - - - :worker_1_port
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts1';
\c - - - :worker_2_port
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts2';