mirror of https://github.com/citusdata/citus.git
Fix style
parent
42e41900b3
commit
23131a52cd
|
@ -447,7 +447,7 @@ workflows:
|
|||
- build:
|
||||
name: build-14
|
||||
pg_major: 14
|
||||
image_tag: '14beta3-dev202108191715'
|
||||
image_tag: '14beta3-dev202108191715'
|
||||
|
||||
- check-style
|
||||
- check-sql-snapshots
|
||||
|
|
|
@ -233,6 +233,7 @@ AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt)
|
|||
appendStringInfoString(buf, ")");
|
||||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
static void
|
||||
AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
|
||||
|
@ -251,6 +252,8 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#else
|
||||
static void
|
||||
AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
|
||||
|
@ -277,6 +280,8 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
static void
|
||||
|
|
|
@ -2060,7 +2060,7 @@ get_with_clause(Query *query, deparse_context *context)
|
|||
if (PRETTY_INDENT(context))
|
||||
appendContextKeyword(context, "", 0, 0, 0);
|
||||
appendStringInfoChar(buf, ')');
|
||||
|
||||
|
||||
if (cte->search_clause)
|
||||
{
|
||||
bool first = true;
|
||||
|
@ -2100,7 +2100,7 @@ get_with_clause(Query *query, deparse_context *context)
|
|||
}
|
||||
|
||||
appendStringInfo(buf, " SET %s", quote_identifier(cte->cycle_clause->cycle_mark_column));
|
||||
|
||||
|
||||
{
|
||||
Const *cmv = castNode(Const, cte->cycle_clause->cycle_mark_value);
|
||||
Const *cmd = castNode(Const, cte->cycle_clause->cycle_mark_default);
|
||||
|
@ -7771,7 +7771,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
|
|||
appendStringInfoString(buf, quote_identifier(colname));
|
||||
}
|
||||
appendStringInfoChar(buf, ')');
|
||||
|
||||
|
||||
if (j->join_using_alias)
|
||||
appendStringInfo(buf, " AS %s",
|
||||
quote_identifier(j->join_using_alias->aliasname));
|
||||
|
|
|
@ -286,9 +286,10 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
|
|||
PlannedStmt *plan = subPlan->plan;
|
||||
IntoClause *into = NULL;
|
||||
ParamListInfo params = NULL;
|
||||
|
||||
/*
|
||||
* With PG14, we need to provide a string here,
|
||||
* for now we put an empty string, which is valid according to postgres.
|
||||
* for now we put an empty string, which is valid according to postgres.
|
||||
*/
|
||||
char *queryString = pstrdup("");
|
||||
instr_time planduration;
|
||||
|
|
|
@ -643,22 +643,27 @@ ModifyPartialQuerySupported(Query *queryTree, bool multiShardQuery,
|
|||
}
|
||||
else
|
||||
{
|
||||
if (commandType == CMD_UPDATE) {
|
||||
if (commandType == CMD_UPDATE)
|
||||
{
|
||||
/*
|
||||
* For
|
||||
* For
|
||||
*/
|
||||
RangeTblEntry* resultRTE = ExtractResultRelationRTE(queryTree);
|
||||
RangeTblEntry *resultRTE = ExtractResultRelationRTE(queryTree);
|
||||
|
||||
/*
|
||||
* FirstLowInvalidHeapAttributeNumber is added as an offset to rte->updatedCols.
|
||||
* So we substract that to get the column no for an updated column that matches
|
||||
* resultRTE->updatedcols.
|
||||
*/
|
||||
int updatedColNoWithOffset = partitionColumn->varattno - FirstLowInvalidHeapAttributeNumber;
|
||||
if (bms_is_member(updatedColNoWithOffset, resultRTE->updatedCols)) {
|
||||
int updatedColNoWithOffset = partitionColumn->varattno -
|
||||
FirstLowInvalidHeapAttributeNumber;
|
||||
if (bms_is_member(updatedColNoWithOffset, resultRTE->updatedCols))
|
||||
{
|
||||
targetEntryPartitionColumn = true;
|
||||
}
|
||||
|
||||
}else if (targetEntry->resno == partitionColumn->varattno) {
|
||||
}
|
||||
else if (targetEntry->resno == partitionColumn->varattno)
|
||||
{
|
||||
targetEntryPartitionColumn = true;
|
||||
}
|
||||
}
|
||||
|
@ -679,8 +684,8 @@ ModifyPartialQuerySupported(Query *queryTree, bool multiShardQuery,
|
|||
NULL, NULL);
|
||||
}
|
||||
|
||||
//TODO:: targetEntry->resno is wrong here, we SHOULD think about
|
||||
//TargetEntryChangesValue for update case based on 86dc90056dfdbd9d1b891718d2e5614e3e432f35.
|
||||
/*TODO:: targetEntry->resno is wrong here, we SHOULD think about */
|
||||
/*TargetEntryChangesValue for update case based on 86dc90056dfdbd9d1b891718d2e5614e3e432f35. */
|
||||
if (commandType == CMD_UPDATE && targetEntryPartitionColumn &&
|
||||
TargetEntryChangesValue(targetEntry, partitionColumn,
|
||||
queryTree->jointree))
|
||||
|
@ -1137,15 +1142,19 @@ ErrorIfOnConflictNotSupported(Query *queryTree)
|
|||
|
||||
bool setTargetEntryPartitionColumn = false;
|
||||
|
||||
if (partitionColumn) {
|
||||
RangeTblEntry* resultRTE = ExtractResultRelationRTE(queryTree);
|
||||
if (partitionColumn)
|
||||
{
|
||||
RangeTblEntry *resultRTE = ExtractResultRelationRTE(queryTree);
|
||||
|
||||
/*
|
||||
* FirstLowInvalidHeapAttributeNumber is added as an offset to rte->updatedCols.
|
||||
* So we substract that to get the column no for an updated column that matches
|
||||
* resultRTE->updatedcols.
|
||||
*/
|
||||
int updatedColNoWithOffset = partitionColumn->varattno - FirstLowInvalidHeapAttributeNumber;
|
||||
if (bms_is_member(updatedColNoWithOffset, resultRTE->updatedCols)) {
|
||||
* FirstLowInvalidHeapAttributeNumber is added as an offset to rte->updatedCols.
|
||||
* So we substract that to get the column no for an updated column that matches
|
||||
* resultRTE->updatedcols.
|
||||
*/
|
||||
int updatedColNoWithOffset = partitionColumn->varattno -
|
||||
FirstLowInvalidHeapAttributeNumber;
|
||||
if (bms_is_member(updatedColNoWithOffset, resultRTE->updatedCols))
|
||||
{
|
||||
setTargetEntryPartitionColumn = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,8 +5,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_prepare_pg_upgrade()
|
|||
AS $cppu$
|
||||
BEGIN
|
||||
|
||||
DELETE FROM pg_depend WHERE
|
||||
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
|
||||
DELETE FROM pg_depend WHERE
|
||||
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
|
||||
refobjid IN (select oid from pg_extension where extname = 'citus');
|
||||
/*
|
||||
* We are dropping the aggregates because postgres 14 changed
|
||||
|
@ -14,7 +14,7 @@ BEGIN
|
|||
* upgrading to pg14, spegifically when running pg_restore on
|
||||
* array_cat_agg we would get an error. So we drop the aggregate
|
||||
* and create the right one on citus_finish_pg_upgrade.
|
||||
*/
|
||||
*/
|
||||
DROP AGGREGATE IF EXISTS array_cat_agg(anyarray);
|
||||
DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray);
|
||||
--
|
||||
|
|
|
@ -5,8 +5,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_prepare_pg_upgrade()
|
|||
AS $cppu$
|
||||
BEGIN
|
||||
|
||||
DELETE FROM pg_depend WHERE
|
||||
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
|
||||
DELETE FROM pg_depend WHERE
|
||||
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
|
||||
refobjid IN (select oid from pg_extension where extname = 'citus');
|
||||
/*
|
||||
* We are dropping the aggregates because postgres 14 changed
|
||||
|
@ -14,7 +14,7 @@ BEGIN
|
|||
* upgrading to pg14, spegifically when running pg_restore on
|
||||
* array_cat_agg we would get an error. So we drop the aggregate
|
||||
* and create the right one on citus_finish_pg_upgrade.
|
||||
*/
|
||||
*/
|
||||
DROP AGGREGATE IF EXISTS array_cat_agg(anyarray);
|
||||
DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray);
|
||||
--
|
||||
|
|
|
@ -140,7 +140,8 @@ ListToHashSet(List *itemList, Size keySize, bool isStringList)
|
|||
flags |= HASH_BLOBS;
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
else {
|
||||
else
|
||||
{
|
||||
flags |= HASH_STRINGS;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -63,7 +63,7 @@ typedef struct DDLJob
|
|||
} DDLJob;
|
||||
|
||||
|
||||
extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
extern void multi_ProcessUtility(PlannedStmt * pstmt, const char *queryString,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
bool readOnlyTree,
|
||||
#endif
|
||||
|
|
|
@ -241,4 +241,4 @@ s/ERROR: ROLLBACK is not allowed in an SQL function/ERROR: ROLLBACK is not all
|
|||
/Parent-Relationship/d
|
||||
s/function array_cat_agg\(anyarray\) anyarray/function array_cat_agg\(anycompatiblearray\) anycompatiblearray/g
|
||||
s/TRIM\(BOTH FROM value\)/btrim\(value\)/g
|
||||
s/function array_cat_agg\(anycompatiblearray\)/function array_cat_agg\(anyarray\)/g
|
||||
s/function array_cat_agg\(anycompatiblearray\)/function array_cat_agg\(anyarray\)/g
|
||||
|
|
|
@ -139,7 +139,7 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
| function alter_role_if_exists(text,text) boolean
|
||||
| function any_value(anyelement) anyelement
|
||||
| function any_value_agg(anyelement,anyelement) anyelement
|
||||
| function array_cat_agg(anycompatiblearray) anycompatiblearray
|
||||
| function array_cat_agg(anyarray) anycompatiblearray
|
||||
| function assign_distributed_transaction_id(integer,bigint,timestamp with time zone) void
|
||||
| function authinfo_valid(text) boolean
|
||||
| function broadcast_intermediate_result(text,text) bigint
|
||||
|
|
|
@ -26,7 +26,7 @@ ORDER BY 1;
|
|||
function alter_table_set_access_method(regclass,text)
|
||||
function any_value(anyelement)
|
||||
function any_value_agg(anyelement,anyelement)
|
||||
function array_cat_agg(anycompatiblearray)
|
||||
function array_cat_agg(anyarray)
|
||||
function assign_distributed_transaction_id(integer,bigint,timestamp with time zone)
|
||||
function authinfo_valid(text)
|
||||
function broadcast_intermediate_result(text,text)
|
||||
|
|
|
@ -23,7 +23,7 @@ ORDER BY 1;
|
|||
function alter_table_set_access_method(regclass,text)
|
||||
function any_value(anyelement)
|
||||
function any_value_agg(anyelement,anyelement)
|
||||
function array_cat_agg(anycompatiblearray)
|
||||
function array_cat_agg(anyarray)
|
||||
function assign_distributed_transaction_id(integer,bigint,timestamp with time zone)
|
||||
function authinfo_valid(text)
|
||||
function broadcast_intermediate_result(text,text)
|
||||
|
|
|
@ -101,7 +101,7 @@ FROM
|
|||
-- DISTINCT w/wout distribution key
|
||||
-- there seems to be an issue with SELECT DISTINCT ROW with PG14
|
||||
-- so we add an alternative output that gives an error, this should
|
||||
-- be removed after the issue is fixed on PG14.
|
||||
-- be removed after the issue is fixed on PG14.
|
||||
SELECT DISTINCT(col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38)
|
||||
FROM
|
||||
data_types_table
|
||||
|
|
|
@ -60,4 +60,4 @@ SELECT success FROM run_command_on_workers('select pg_reload_conf()');
|
|||
|
||||
RESET enable_partitionwise_join;
|
||||
|
||||
DROP SCHEMA partition_wise_join CASCADE;
|
||||
DROP SCHEMA partition_wise_join CASCADE;
|
||||
|
|
|
@ -114,7 +114,7 @@ INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *;
|
|||
-- this test has different output for pg14 and here we mostly test that
|
||||
-- we don't get an error, hence we use explain_has_distributed_subplan.
|
||||
SELECT public.explain_has_distributed_subplan(
|
||||
$$
|
||||
$$
|
||||
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE)
|
||||
WITH cte_1 AS (INSERT INTO test_wal VALUES(6,66),(7,77),(8,88) RETURNING *)
|
||||
SELECT * FROM cte_1;
|
||||
|
|
|
@ -98,7 +98,7 @@ CREATE EXTENSION seg;
|
|||
|
||||
-- show that the extension is created on existing worker
|
||||
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
|
||||
SELECT workers.result = pg_extension.extversion AS same_version
|
||||
SELECT workers.result = pg_extension.extversion AS same_version
|
||||
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg';
|
||||
|
||||
-- now create the reference table
|
||||
|
@ -145,7 +145,7 @@ SELECT 1 from master_add_node('localhost', :worker_2_port);
|
|||
|
||||
-- show that the extension is created on both existing and new node
|
||||
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
|
||||
SELECT workers.result = pg_extension.extversion AS same_version
|
||||
SELECT workers.result = pg_extension.extversion AS same_version
|
||||
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg';
|
||||
|
||||
-- check for the unpackaged extension to be created correctly
|
||||
|
@ -212,7 +212,7 @@ ROLLBACK;
|
|||
|
||||
-- show that the CREATE EXTENSION command propagated even if the transaction
|
||||
-- block is rollbacked, that's a shortcoming of dependency creation logic
|
||||
SELECT COUNT(DISTINCT workers.result)
|
||||
SELECT COUNT(DISTINCT workers.result)
|
||||
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers;
|
||||
|
||||
-- drop the schema and all the objects
|
||||
|
|
Loading…
Reference in New Issue