Fix style

pull/5209/head
Sait Talha Nisanci 2021-08-25 17:17:16 +03:00
parent e1f5520e1a
commit 0b67fcf81d
34 changed files with 90 additions and 79 deletions

View File

@ -451,7 +451,7 @@ workflows:
- build:
name: build-14
pg_major: 14
image_tag: '14beta3-dev202108191715'
image_tag: '14beta3-dev202108191715'
- check-style
- check-sql-snapshots

View File

@ -2019,7 +2019,6 @@ ColumnarProcessUtility(PlannedStmt *pstmt,
DestReceiver *dest,
QueryCompletionCompat *completionTag)
{
#if PG_VERSION_NUM >= PG_VERSION_14
if (readOnlyTree)
{

View File

@ -251,7 +251,9 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
*/
Assert(false);
ereport(ERROR, (errmsg("unsupported object %s for distribution by citus",
getObjectTypeDescription_compat(dependency, /* missingOk: */ false)),
getObjectTypeDescription_compat(dependency,
/* missingOk: */ false)),
errdetail(
"citus tries to recreate an unsupported object on its workers"),
errhint("please report a bug as this should not be happening")));

View File

@ -1613,7 +1613,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString,
* workers
*/
const char *functionName =
const char *functionName =
getObjectIdentity_compat(&address, /* missingOk: */ false);
ereport(ERROR, (errmsg("distrtibuted functions are not allowed to depend on an "
"extension"),
@ -1933,9 +1933,9 @@ ErrorIfFunctionDependsOnExtension(const ObjectAddress *functionAddress)
if (IsObjectAddressOwnedByExtension(functionAddress, &extensionAddress))
{
char *functionName =
char *functionName =
getObjectIdentity_compat(functionAddress, /* missingOk: */ false);
char *extensionName =
char *extensionName =
getObjectIdentity_compat(&extensionAddress, /* missingOk: */ false);
ereport(ERROR, (errmsg("unable to create a distributed function from functions "
"owned by an extension"),

View File

@ -212,9 +212,9 @@ DoLocalCopy(StringInfo buffer, Oid relationId, int64 shardId, CopyStmt *copyStat
(void) addRangeTableEntryForRelation(pState, shard, AccessShareLock,
NULL, false, false);
CopyFromState cstate = BeginCopyFrom_compat(pState, shard, NULL, NULL, false,
ReadFromLocalBufferCallback,
copyStatement->attlist,
copyStatement->options);
ReadFromLocalBufferCallback,
copyStatement->attlist,
copyStatement->options);
CopyFrom(cstate);
EndCopyFrom(cstate);

View File

@ -525,13 +525,13 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
/* initialize copy state to read from COPY data source */
CopyFromState copyState = BeginCopyFrom_compat(NULL,
copiedDistributedRelation,
NULL,
copyStatement->filename,
copyStatement->is_program,
NULL,
copyStatement->attlist,
copyStatement->options);
copiedDistributedRelation,
NULL,
copyStatement->filename,
copyStatement->is_program,
NULL,
copyStatement->attlist,
copyStatement->options);
/* set up callback to identify error line number */
errorCallback.callback = CopyFromErrorCallback;
@ -627,13 +627,13 @@ CopyToNewShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag, O
/* initialize copy state to read from COPY data source */
CopyFromState copyState = BeginCopyFrom_compat(NULL,
distributedRelation,
NULL,
copyStatement->filename,
copyStatement->is_program,
NULL,
copyStatement->attlist,
copyStatement->options);
distributedRelation,
NULL,
copyStatement->filename,
copyStatement->is_program,
NULL,
copyStatement->attlist,
copyStatement->options);
CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
copyOutState->delim = (char *) delimiterCharacter;

View File

@ -2448,16 +2448,16 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
#if PG_VERSION_NUM >= PG_VERSION_14
case AT_SetCompression:
#endif
{
/*
* We will not perform any special check for:
* ALTER TABLE .. ALTER COLUMN .. SET NOT NULL
* ALTER TABLE .. REPLICA IDENTITY ..
* ALTER TABLE .. VALIDATE CONSTRAINT ..
* ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION ..
*/
break;
}
{
/*
* We will not perform any special check for:
* ALTER TABLE .. ALTER COLUMN .. SET NOT NULL
* ALTER TABLE .. REPLICA IDENTITY ..
* ALTER TABLE .. VALIDATE CONSTRAINT ..
* ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION ..
*/
break;
}
case AT_SetRelOptions: /* SET (...) */
case AT_ResetRelOptions: /* RESET (...) */

View File

@ -810,15 +810,21 @@ deparse_shard_reindex_statement(ReindexStmt *origStmt, Oid distrelid, int64 shar
}
}
/*
* IsReindexWithParam_compat returns true if the given parameter
* exists for the given reindexStmt.
*/
bool IsReindexWithParam_compat(ReindexStmt* reindexStmt, char* param) {
bool
IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param)
{
#if PG_VERSION_NUM < PG_VERSION_14
if (strcmp(param, "concurrently") == 0) {
if (strcmp(param, "concurrently") == 0)
{
return reindexStmt->concurrent;
}else if (strcmp(param, "verbose") == 0) {
}
else if (strcmp(param, "verbose") == 0)
{
return reindexStmt->options & REINDEXOPT_VERBOSE;
}
return false;
@ -831,10 +837,9 @@ bool IsReindexWithParam_compat(ReindexStmt* reindexStmt, char* param) {
return defGetBoolean(opt);
}
}
return false;
return false;
#endif
}
}
/*
@ -1292,6 +1297,7 @@ RoleSpecString(RoleSpec *spec, bool withQuoteIdentifier)
quote_identifier(spec->rolename) :
spec->rolename;
}
#if PG_VERSION_NUM >= PG_VERSION_14
case ROLESPEC_CURRENT_ROLE:
#endif

View File

@ -233,6 +233,7 @@ AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt)
appendStringInfoString(buf, ")");
}
#if PG_VERSION_NUM >= PG_VERSION_14
static void
AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
@ -259,6 +260,8 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
}
}
}
#else
static void
AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
@ -285,6 +288,8 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
}
}
}
#endif
static void

View File

@ -2060,7 +2060,7 @@ get_with_clause(Query *query, deparse_context *context)
if (PRETTY_INDENT(context))
appendContextKeyword(context, "", 0, 0, 0);
appendStringInfoChar(buf, ')');
if (cte->search_clause)
{
bool first = true;
@ -2100,7 +2100,7 @@ get_with_clause(Query *query, deparse_context *context)
}
appendStringInfo(buf, " SET %s", quote_identifier(cte->cycle_clause->cycle_mark_column));
{
Const *cmv = castNode(Const, cte->cycle_clause->cycle_mark_value);
Const *cmd = castNode(Const, cte->cycle_clause->cycle_mark_default);
@ -7771,7 +7771,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
appendStringInfoString(buf, quote_identifier(colname));
}
appendStringInfoChar(buf, ')');
if (j->join_using_alias)
appendStringInfo(buf, " AS %s",
quote_identifier(j->join_using_alias->aliasname));

View File

@ -411,8 +411,8 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript
copyOptions = lappend(copyOptions, copyOption);
CopyFromState copyState = BeginCopyFrom_compat(NULL, stubRelation, NULL,
fileName, false, NULL,
NULL, copyOptions);
fileName, false, NULL,
NULL, copyOptions);
while (true)
{

View File

@ -75,8 +75,12 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS)
{
ereport(ERROR, (errmsg("object still exists"),
errdetail("the %s \"%s\" still exists",
getObjectTypeDescription_compat(&address, /* missingOk: */ false),
getObjectIdentity_compat(&address, /* missingOk: */ false)),
getObjectTypeDescription_compat(&address,
/* missingOk: */ false),
getObjectIdentity_compat(&address,
/* missingOk: */ false)),
errhint("drop the object via a DROP command")));
}

View File

@ -688,7 +688,6 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList,
Assert(deleteCriteria != NULL);
List *deleteCriteriaList = list_make1(deleteCriteria);
/* walk over shard list and check if shards can be dropped */
ShardInterval *shardInterval = NULL;

View File

@ -286,9 +286,10 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
PlannedStmt *plan = subPlan->plan;
IntoClause *into = NULL;
ParamListInfo params = NULL;
/*
* With PG14, we need to provide a string here,
* for now we put an empty string, which is valid according to postgres.
* for now we put an empty string, which is valid according to postgres.
*/
char *queryString = pstrdup("");
instr_time planduration;

View File

@ -1890,9 +1890,10 @@ MasterAggregateExpression(Aggref *originalAggregate,
if (aggregateType == AGGREGATE_ARRAY_AGG)
{
#if PG_VERSION_NUM >= PG_VERSION_14
/*
* Postgres expects the type of the array here such as INT4ARRAYOID.
* Hence we set it to workerReturnType. If we set this to
* Hence we set it to workerReturnType. If we set this to
* ANYCOMPATIBLEARRAYOID then we will get the following error:
* "argument declared anycompatiblearray is not an array but type anycompatiblearray"
*/

View File

@ -2054,7 +2054,8 @@ GetRestrictInfoListForRelation(RangeTblEntry *rangeTblEntry,
* If the restriction involves multiple tables, we cannot add it to
* input relation's expression list.
*/
Relids varnos = pull_varnos_compat(relationRestriction->plannerInfo, (Node *) restrictionClause);
Relids varnos = pull_varnos_compat(relationRestriction->plannerInfo,
(Node *) restrictionClause);
if (bms_num_members(varnos) != 1)
{
continue;

View File

@ -31,7 +31,7 @@ BEGIN
* Citus extension, so we create that dependency here.
* We are not using:
* ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg
* because we don't have an easy way to check if the aggregate
* because we don't have an easy way to check if the aggregate
* exists with anyarray type or anycompatiblearray type.
*/
INSERT INTO pg_depend

View File

@ -31,7 +31,7 @@ BEGIN
* Citus extension, so we create that dependency here.
* We are not using:
* ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg
* because we don't have an easy way to check if the aggregate
* because we don't have an easy way to check if the aggregate
* exists with anyarray type or anycompatiblearray type.
*/
INSERT INTO pg_depend

View File

@ -5,8 +5,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_prepare_pg_upgrade()
AS $cppu$
BEGIN
DELETE FROM pg_depend WHERE
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
DELETE FROM pg_depend WHERE
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
refobjid IN (select oid from pg_extension where extname = 'citus');
/*
* We are dropping the aggregates because postgres 14 changed
@ -14,7 +14,7 @@ BEGIN
* upgrading to pg14, spegifically when running pg_restore on
* array_cat_agg we would get an error. So we drop the aggregate
* and create the right one on citus_finish_pg_upgrade.
*/
*/
DROP AGGREGATE IF EXISTS array_cat_agg(anyarray);
DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray);
--

View File

@ -5,8 +5,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_prepare_pg_upgrade()
AS $cppu$
BEGIN
DELETE FROM pg_depend WHERE
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
DELETE FROM pg_depend WHERE
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
refobjid IN (select oid from pg_extension where extname = 'citus');
/*
* We are dropping the aggregates because postgres 14 changed
@ -14,7 +14,7 @@ BEGIN
* upgrading to pg14, spegifically when running pg_restore on
* array_cat_agg we would get an error. So we drop the aggregate
* and create the right one on citus_finish_pg_upgrade.
*/
*/
DROP AGGREGATE IF EXISTS array_cat_agg(anyarray);
DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray);
--

View File

@ -50,7 +50,7 @@ FunctionOidExtended(const char *schemaName, const char *functionName, int argume
argumentCount,
argumentList,
findVariadics,
findDefaults,
findDefaults,
false,
true);

View File

@ -243,4 +243,4 @@ clean distclean maintainer-clean:
rm -f $(output_files) $(input_files)
rm -rf tmp_check/
all: create-tablespaces
all: create-tablespaces

View File

@ -239,6 +239,7 @@ s/ERROR: ROLLBACK is not allowed in an SQL function/ERROR: ROLLBACK is not all
/.*Async Capable.*/d
/Parent Relationship/d
/Parent-Relationship/d
s/function array_cat_agg\(anyarray\) anyarray/function array_cat_agg\(anycompatiblearray\) anycompatiblearray/g
s/function array_cat_agg\(anycompatiblearray\)/function array_cat_agg\(anyarray\)/g
s/TRIM\(BOTH FROM value\)/btrim\(value\)/g
s/pg14\.idx.*/pg14\.xxxxx/g

View File

@ -20,4 +20,5 @@
/multi_mx_copy_data.out
/multi_outer_join.out
/multi_outer_join_reference.out
/tablespace.out
/worker_copy.out

View File

@ -139,7 +139,7 @@ SELECT * FROM multi_extension.print_extension_changes();
| function alter_role_if_exists(text,text) boolean
| function any_value(anyelement) anyelement
| function any_value_agg(anyelement,anyelement) anyelement
| function array_cat_agg(anycompatiblearray) anycompatiblearray
| function array_cat_agg(anyarray) anycompatiblearray
| function assign_distributed_transaction_id(integer,bigint,timestamp with time zone) void
| function authinfo_valid(text) boolean
| function broadcast_intermediate_result(text,text) bigint

View File

@ -1,5 +0,0 @@
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts0';
\c - - - :worker_1_port
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts1';
\c - - - :worker_2_port
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts2';

View File

@ -26,7 +26,7 @@ ORDER BY 1;
function alter_table_set_access_method(regclass,text)
function any_value(anyelement)
function any_value_agg(anyelement,anyelement)
function array_cat_agg(anycompatiblearray)
function array_cat_agg(anyarray)
function assign_distributed_transaction_id(integer,bigint,timestamp with time zone)
function authinfo_valid(text)
function broadcast_intermediate_result(text,text)

View File

@ -23,7 +23,7 @@ ORDER BY 1;
function alter_table_set_access_method(regclass,text)
function any_value(anyelement)
function any_value_agg(anyelement,anyelement)
function array_cat_agg(anycompatiblearray)
function array_cat_agg(anyarray)
function assign_distributed_transaction_id(integer,bigint,timestamp with time zone)
function authinfo_valid(text)
function broadcast_intermediate_result(text,text)

View File

@ -20,4 +20,5 @@
/multi_mx_copy_data.sql
/multi_outer_join.sql
/multi_outer_join_reference.sql
/tablespace.sql
/worker_copy.sql

View File

@ -101,7 +101,7 @@ FROM
-- DISTINCT w/wout distribution key
-- there seems to be an issue with SELECT DISTINCT ROW with PG14
-- so we add an alternative output that gives an error, this should
-- be removed after the issue is fixed on PG14.
-- be removed after the issue is fixed on PG14.
SELECT DISTINCT(col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38)
FROM
data_types_table

View File

@ -60,4 +60,4 @@ SELECT success FROM run_command_on_workers('select pg_reload_conf()');
RESET enable_partitionwise_join;
DROP SCHEMA partition_wise_join CASCADE;
DROP SCHEMA partition_wise_join CASCADE;

View File

@ -114,7 +114,7 @@ INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *;
-- this test has different output for pg14 and here we mostly test that
-- we don't get an error, hence we use explain_has_distributed_subplan.
SELECT public.explain_has_distributed_subplan(
$$
$$
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE)
WITH cte_1 AS (INSERT INTO test_wal VALUES(6,66),(7,77),(8,88) RETURNING *)
SELECT * FROM cte_1;

View File

@ -98,7 +98,7 @@ CREATE EXTENSION seg;
-- show that the extension is created on existing worker
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
SELECT workers.result = pg_extension.extversion AS same_version
SELECT workers.result = pg_extension.extversion AS same_version
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg';
-- now create the reference table
@ -145,7 +145,7 @@ SELECT 1 from master_add_node('localhost', :worker_2_port);
-- show that the extension is created on both existing and new node
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
SELECT workers.result = pg_extension.extversion AS same_version
SELECT workers.result = pg_extension.extversion AS same_version
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg';
-- check for the unpackaged extension to be created correctly
@ -212,7 +212,7 @@ ROLLBACK;
-- show that the CREATE EXTENSION command propagated even if the transaction
-- block is rollbacked, that's a shortcoming of dependency creation logic
SELECT COUNT(DISTINCT workers.result)
SELECT COUNT(DISTINCT workers.result)
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers;
-- drop the schema and all the objects

View File

@ -1,5 +0,0 @@
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts0';
\c - - - :worker_1_port
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts1';
\c - - - :worker_2_port
CREATE TABLESPACE test_tablespace LOCATION '/home/talha/citus/src/test/regress/data/ts2';