Support columnar table index builds with CONCURRENTLY option (#5032)

With this commit, we add (`CREATE INDEX` / `REINDEX`) `CONCURRENTLY` support for columnar tables.

For that, we implement `columnar_index_validate_scan` callback.
The reasoning behind the implementation is as follows:

* Postgres function `validate_index` provides all the TIDs that are currently in the
  index to `columnar_index_validate_scan` callback via a `tupleSort` object..

* We start scanning the table by using `columnar_getnextslot` as usual.
  Before moving forward, note that `columnar_getnextslot` guarantees
  to return tuples in the order of their TIDs.

* For us to use during table scan, postgres provides a snapshot guaranteeing
  that any tuples that are valid according to that snapshot but are not in the
  index must be added to the index.

* Then for each tuple that we read from our table, we continue iterating
  given `tupleSort` to find the first TID that is greater than or equal to our
  tuple's TID.

  If both TID's are equal to each other, then we skip the tuple since it's already
  indexed.

  If the TID that we read from tupleSort is greater then our tuple's TID, then
  we decide to insert this tuple into index.
pull/5107/head
Onur Tirtir 2021-07-09 13:44:58 +03:00 committed by GitHub
parent ea5fe022a4
commit f00c63c33d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 883 additions and 60 deletions

View File

@ -126,6 +126,13 @@ static double ColumnarReadRowsIntoIndex(TableScanDesc scan,
IndexBuildCallback indexCallback, IndexBuildCallback indexCallback,
void *indexCallbackState, void *indexCallbackState,
EState *estate, ExprState *predicate); EState *estate, ExprState *predicate);
static void ColumnarReadMissingRowsIntoIndex(TableScanDesc scan, Relation indexRelation,
IndexInfo *indexInfo, EState *estate,
ExprState *predicate,
ValidateIndexState *state);
static ItemPointerData TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort,
ItemPointer targetItemPointer);
/* Custom tuple slot ops used for columnar. Initialized in columnar_tableam_init(). */ /* Custom tuple slot ops used for columnar. Initialized in columnar_tableam_init(). */
static TupleTableSlotOps TTSOpsColumnar; static TupleTableSlotOps TTSOpsColumnar;
@ -148,9 +155,6 @@ columnar_beginscan(Relation relation, Snapshot snapshot,
/* attr_needed represents 0-indexed attribute numbers */ /* attr_needed represents 0-indexed attribute numbers */
Bitmapset *attr_needed = bms_add_range(NULL, 0, natts - 1); Bitmapset *attr_needed = bms_add_range(NULL, 0, natts - 1);
/* the columnar access method does not use the flags, they are specific to heap */
flags = 0;
TableScanDesc scandesc = columnar_beginscan_extended(relation, snapshot, nkeys, key, TableScanDesc scandesc = columnar_beginscan_extended(relation, snapshot, nkeys, key,
parallel_scan, parallel_scan,
flags, attr_needed, NULL); flags, attr_needed, NULL);
@ -241,6 +245,11 @@ columnar_endscan(TableScanDesc sscan)
ColumnarEndRead(scan->cs_readState); ColumnarEndRead(scan->cs_readState);
scan->cs_readState = NULL; scan->cs_readState = NULL;
} }
if (scan->cs_base.rs_flags & SO_TEMP_SNAPSHOT)
{
UnregisterSnapshot(scan->cs_base.rs_snapshot);
}
} }
@ -1105,13 +1114,6 @@ columnar_index_build_range_scan(Relation columnarRelation,
ereport(ERROR, (errmsg("BRIN indexes on columnar tables are not supported"))); ereport(ERROR, (errmsg("BRIN indexes on columnar tables are not supported")));
} }
if (indexInfo->ii_Concurrent)
{
/* we already don't allow CONCURRENTLY syntax but be on the safe side */
ereport(ERROR, (errmsg("concurrent index builds are not supported "
"for columnar tables")));
}
if (scan) if (scan)
{ {
/* /*
@ -1124,6 +1126,7 @@ columnar_index_build_range_scan(Relation columnarRelation,
ereport(DEBUG4, (errmsg("ignoring parallel worker when building " ereport(DEBUG4, (errmsg("ignoring parallel worker when building "
"index since parallel scan on columnar " "index since parallel scan on columnar "
"tables is not supported"))); "tables is not supported")));
table_endscan(scan);
return 0; return 0;
} }
@ -1138,11 +1141,6 @@ columnar_index_build_range_scan(Relation columnarRelation,
* and index whatever's live according to that. * and index whatever's live according to that.
*/ */
TransactionId OldestXmin = InvalidTransactionId; TransactionId OldestXmin = InvalidTransactionId;
/*
* We already don't allow concurrent index builds so ii_Concurrent
* will always be false, but let's keep the code close to heapAM.
*/
if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent) if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent)
{ {
/* ignore lazy VACUUM's */ /* ignore lazy VACUUM's */
@ -1371,21 +1369,177 @@ ColumnarReadRowsIntoIndex(TableScanDesc scan, Relation indexRelation,
static void static void
columnar_index_validate_scan(Relation heapRelation, columnar_index_validate_scan(Relation columnarRelation,
Relation indexRelation, Relation indexRelation,
IndexInfo *indexInfo, IndexInfo *indexInfo,
Snapshot snapshot, Snapshot snapshot,
ValidateIndexState *state) ValidateIndexState *
validateIndexState)
{
ColumnarReportTotalVirtualBlocks(columnarRelation, snapshot,
PROGRESS_SCAN_BLOCKS_TOTAL);
/*
* Set up execution state for predicate, if any.
* Note that this is only useful for partial indexes.
*/
EState *estate = CreateExecutorState();
ExprContext *econtext = GetPerTupleExprContext(estate);
econtext->ecxt_scantuple = table_slot_create(columnarRelation, NULL);
ExprState *predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
int nkeys = 0;
ScanKeyData *scanKey = NULL;
bool allowAccessStrategy = true;
bool allowSyncScan = false;
TableScanDesc scan = table_beginscan_strat(columnarRelation, snapshot, nkeys, scanKey,
allowAccessStrategy, allowSyncScan);
ColumnarReadMissingRowsIntoIndex(scan, indexRelation, indexInfo, estate,
predicate, validateIndexState);
table_endscan(scan);
/* report the last "virtual" block as "done" */
ColumnarReportTotalVirtualBlocks(columnarRelation, snapshot,
PROGRESS_SCAN_BLOCKS_DONE);
ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
FreeExecutorState(estate);
indexInfo->ii_ExpressionsState = NIL;
indexInfo->ii_PredicateState = NULL;
}
/*
* ColumnarReadMissingRowsIntoIndex inserts the tuples that are not in
* the index yet by reading the actual relation based on given "scan".
*/
static void
ColumnarReadMissingRowsIntoIndex(TableScanDesc scan, Relation indexRelation,
IndexInfo *indexInfo, EState *estate,
ExprState *predicate,
ValidateIndexState *validateIndexState)
{
BlockNumber lastReportedBlockNumber = InvalidBlockNumber;
bool indexTupleSortEmpty = false;
ItemPointerData indexedItemPointerData;
ItemPointerSetInvalid(&indexedItemPointerData);
ExprContext *econtext = GetPerTupleExprContext(estate);
TupleTableSlot *slot = econtext->ecxt_scantuple;
while (columnar_getnextslot(scan, ForwardScanDirection, slot))
{
CHECK_FOR_INTERRUPTS();
ItemPointer columnarItemPointer = &slot->tts_tid;
BlockNumber currentBlockNumber = ItemPointerGetBlockNumber(columnarItemPointer);
if (lastReportedBlockNumber != currentBlockNumber)
{ {
/* /*
* This is only called for concurrent index builds, * columnar_getnextslot guarantees that returned tuple will
* see table_index_validate_scan. * always have a greater ItemPointer than the ones we fetched
* Note that we already error out for concurrent index * before, so we directly use BlockNumber to report our progress.
* builds in utility hook but be on the safe side.
*/ */
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), Assert(lastReportedBlockNumber == InvalidBlockNumber ||
errmsg("concurrent index builds are not supported for " currentBlockNumber >= lastReportedBlockNumber);
"columnar tables"))); pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_DONE,
currentBlockNumber);
lastReportedBlockNumber = currentBlockNumber;
}
validateIndexState->htups += 1;
if (!indexTupleSortEmpty &&
(!ItemPointerIsValid(&indexedItemPointerData) ||
ItemPointerCompare(&indexedItemPointerData, columnarItemPointer) < 0))
{
/*
* Skip indexed item pointers until we find or pass the current
* columnar relation item pointer.
*/
indexedItemPointerData =
TupleSortSkipSmallerItemPointers(validateIndexState->tuplesort,
columnarItemPointer);
indexTupleSortEmpty = !ItemPointerIsValid(&indexedItemPointerData);
}
if (!indexTupleSortEmpty &&
ItemPointerCompare(&indexedItemPointerData, columnarItemPointer) == 0)
{
/* tuple is already covered by the index, skip */
continue;
}
Assert(indexTupleSortEmpty ||
ItemPointerCompare(&indexedItemPointerData, columnarItemPointer) > 0);
MemoryContextReset(econtext->ecxt_per_tuple_memory);
if (predicate != NULL && !ExecQual(predicate, econtext))
{
/* for partial indexes, discard tuples that don't satisfy the predicate */
continue;
}
Datum indexValues[INDEX_MAX_KEYS];
bool indexNulls[INDEX_MAX_KEYS];
FormIndexDatum(indexInfo, slot, estate, indexValues, indexNulls);
Relation columnarRelation = scan->rs_rd;
IndexUniqueCheck indexUniqueCheck =
indexInfo->ii_Unique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO;
index_insert(indexRelation, indexValues, indexNulls, columnarItemPointer,
columnarRelation, indexUniqueCheck, indexInfo);
validateIndexState->tups_inserted += 1;
}
}
/*
* TupleSortSkipSmallerItemPointers iterates given tupleSort until finding an
* ItemPointer that is greater than or equal to given targetItemPointer and
* returns that ItemPointer.
* If such an ItemPointer does not exist, then returns invalid ItemPointer.
*
* Note that this function assumes given tupleSort doesn't have any NULL
* Datum's.
*/
static ItemPointerData
TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetItemPointer)
{
ItemPointerData tsItemPointerData;
ItemPointerSetInvalid(&tsItemPointerData);
while (!ItemPointerIsValid(&tsItemPointerData) ||
ItemPointerCompare(&tsItemPointerData, targetItemPointer) < 0)
{
bool forwardDirection = true;
Datum *abbrev = NULL;
Datum tsDatum;
bool tsDatumIsNull;
if (!tuplesort_getdatum(tupleSort, forwardDirection, &tsDatum,
&tsDatumIsNull, abbrev))
{
ItemPointerSetInvalid(&tsItemPointerData);
break;
}
Assert(!tsDatumIsNull);
itemptr_decode(&tsItemPointerData, DatumGetInt64(tsDatum));
#ifndef USE_FLOAT8_BYVAL
/*
* If int8 is pass-by-ref, we need to free Datum memory.
* See tuplesort_getdatum function's comment.
*/
pfree(DatumGetPointer(tsDatum));
#endif
}
return tsItemPointerData;
} }
@ -1709,19 +1863,6 @@ ColumnarProcessUtility(PlannedStmt *pstmt,
GetCreateIndexRelationLockMode(indexStmt)); GetCreateIndexRelationLockMode(indexStmt));
if (rel->rd_tableam == GetColumnarTableAmRoutine()) if (rel->rd_tableam == GetColumnarTableAmRoutine())
{ {
/*
* We should reject CREATE INDEX CONCURRENTLY before DefineIndex() is
* called. Erroring in callbacks called from DefineIndex() will create
* the index and mark it as INVALID, which will cause segfault during
* inserts.
*/
if (indexStmt->concurrent)
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("concurrent index commands are not "
"supported for columnar tables")));
}
/* for now, we don't support index access methods other than btree & hash */ /* for now, we don't support index access methods other than btree & hash */
if (strncmp(indexStmt->accessMethod, "btree", NAMEDATALEN) != 0 && if (strncmp(indexStmt->accessMethod, "btree", NAMEDATALEN) != 0 &&
strncmp(indexStmt->accessMethod, "hash", NAMEDATALEN) != 0) strncmp(indexStmt->accessMethod, "hash", NAMEDATALEN) != 0)

View File

@ -1,3 +1,4 @@
test: columnar_write_concurrency test: columnar_write_concurrency
test: columnar_vacuum_vs_insert test: columnar_vacuum_vs_insert
test: columnar_temp_tables test: columnar_temp_tables
test: columnar_index_concurrency

View File

@ -0,0 +1,426 @@
Parsed test spec with 5 sessions
starting permutation: s1-begin s1-insert s2-create-index s3-insert s1-commit s1-force-index-scan s1-check-test-1-2 s1-reset-table
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
step s2-create-index:
CREATE INDEX idx_s2 ON columnar_table (a);
<waiting ...>
step s3-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(21, 30) i;
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-index: <... completed>
step s3-insert: <... completed>
step s1-force-index-scan:
SET enable_seqscan TO OFF;
SET columnar.enable_custom_scan TO OFF;
SET enable_indexscan TO ON;
step s1-check-test-1-2:
SELECT SUM(a)=30 FROM columnar_table WHERE a=5 OR a=25;
?column?
t
step s1-reset-table:
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;
starting permutation: s1-begin s1-insert s2-create-index-concurrently s3-insert s1-commit s1-force-index-scan s1-check-test-1-2 s1-reset-table
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
step s2-create-index-concurrently:
CREATE INDEX CONCURRENTLY conc_s2_idx ON columnar_table(a);
<waiting ...>
step s3-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(21, 30) i;
step s1-commit:
COMMIT;
step s2-create-index-concurrently: <... completed>
step s1-force-index-scan:
SET enable_seqscan TO OFF;
SET columnar.enable_custom_scan TO OFF;
SET enable_indexscan TO ON;
step s1-check-test-1-2:
SELECT SUM(a)=30 FROM columnar_table WHERE a=5 OR a=25;
?column?
t
step s1-reset-table:
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;
starting permutation: s1-begin s1-insert s2-create-index-concurrently s5-begin s5-insert s3-begin s3-insert s1-commit s4-insert-1 s5-commit s3-rollback s1-force-index-scan s1-check-test-3 s1-reset-table
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
step s2-create-index-concurrently:
CREATE INDEX CONCURRENTLY conc_s2_idx ON columnar_table(a);
<waiting ...>
step s5-begin:
BEGIN;
step s5-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(11, 20) i;
step s3-begin:
BEGIN;
step s3-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(21, 30) i;
step s1-commit:
COMMIT;
step s4-insert-1:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(31, 40) i;
step s5-commit:
COMMIT;
step s3-rollback:
ROLLBACK;
step s2-create-index-concurrently: <... completed>
step s1-force-index-scan:
SET enable_seqscan TO OFF;
SET columnar.enable_custom_scan TO OFF;
SET enable_indexscan TO ON;
step s1-check-test-3:
SELECT COUNT(a)=0 FROM columnar_table WHERE a=25;
SELECT SUM(a)=55 FROM columnar_table WHERE a=5 OR a=15 OR a=35;
?column?
t
?column?
t
step s1-reset-table:
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;
starting permutation: s4-insert-4 s1-begin s1-insert s4-insert-2 s2-create-index-concurrently s4-insert-3 s5-begin s5-insert s3-begin s3-insert s1-rollback s4-insert-1 s5-rollback s3-commit s1-force-index-scan s1-check-test-4 s1-reset-table
step s4-insert-4:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(61, 70) i;
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
step s4-insert-2:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(41, 50) i;
step s2-create-index-concurrently:
CREATE INDEX CONCURRENTLY conc_s2_idx ON columnar_table(a);
<waiting ...>
step s4-insert-3:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(51, 60) i;
step s5-begin:
BEGIN;
step s5-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(11, 20) i;
step s3-begin:
BEGIN;
step s3-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(21, 30) i;
step s1-rollback:
ROLLBACK;
step s4-insert-1:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(31, 40) i;
step s5-rollback:
ROLLBACK;
step s3-commit:
COMMIT;
step s2-create-index-concurrently: <... completed>
step s1-force-index-scan:
SET enable_seqscan TO OFF;
SET columnar.enable_custom_scan TO OFF;
SET enable_indexscan TO ON;
step s1-check-test-4:
SELECT COUNT(a)=0 FROM columnar_table WHERE a=5 OR a=15;
SELECT SUM(a)=225 FROM columnar_table WHERE a=25 OR a=35 OR a=45 OR a=55 OR a=65;
?column?
t
?column?
t
step s1-reset-table:
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;
starting permutation: s4-insert-4 s1-begin s1-insert s2-create-partial-concurrently s4-insert-1 s1-rollback s1-reset-table
step s4-insert-4:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(61, 70) i;
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
step s2-create-partial-concurrently:
CREATE INDEX CONCURRENTLY conc_partial_s2_idx ON columnar_table(a)
WHERE a > 50 AND a <= 80;
<waiting ...>
step s4-insert-1:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(31, 40) i;
step s1-rollback:
ROLLBACK;
step s2-create-partial-concurrently: <... completed>
step s1-reset-table:
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;
starting permutation: s2-create-index s1-begin s1-insert s2-reindex-concurrently s3-insert s1-commit s1-force-index-scan s1-check-test-1-2 s1-reset-table
step s2-create-index:
CREATE INDEX idx_s2 ON columnar_table (a);
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
step s2-reindex-concurrently:
REINDEX INDEX CONCURRENTLY idx_s2;
<waiting ...>
step s3-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(21, 30) i;
step s1-commit:
COMMIT;
step s2-reindex-concurrently: <... completed>
step s1-force-index-scan:
SET enable_seqscan TO OFF;
SET columnar.enable_custom_scan TO OFF;
SET enable_indexscan TO ON;
step s1-check-test-1-2:
SELECT SUM(a)=30 FROM columnar_table WHERE a=5 OR a=25;
?column?
t
step s1-reset-table:
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;
starting permutation: s2-create-index s1-begin s1-insert s2-reindex-concurrently s5-begin s5-insert s3-begin s3-insert s1-commit s4-insert-1 s5-commit s3-rollback s1-force-index-scan s1-check-test-3 s1-reset-table
step s2-create-index:
CREATE INDEX idx_s2 ON columnar_table (a);
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
step s2-reindex-concurrently:
REINDEX INDEX CONCURRENTLY idx_s2;
<waiting ...>
step s5-begin:
BEGIN;
step s5-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(11, 20) i;
step s3-begin:
BEGIN;
step s3-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(21, 30) i;
step s1-commit:
COMMIT;
step s4-insert-1:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(31, 40) i;
step s5-commit:
COMMIT;
step s3-rollback:
ROLLBACK;
step s2-reindex-concurrently: <... completed>
step s1-force-index-scan:
SET enable_seqscan TO OFF;
SET columnar.enable_custom_scan TO OFF;
SET enable_indexscan TO ON;
step s1-check-test-3:
SELECT COUNT(a)=0 FROM columnar_table WHERE a=25;
SELECT SUM(a)=55 FROM columnar_table WHERE a=5 OR a=15 OR a=35;
?column?
t
?column?
t
step s1-reset-table:
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;
starting permutation: s2-create-index s4-insert-4 s1-begin s1-insert s4-insert-2 s2-reindex-concurrently s4-insert-3 s5-begin s5-insert s3-begin s3-insert s1-rollback s4-insert-1 s5-rollback s3-commit s1-force-index-scan s1-check-test-4 s1-reset-table
step s2-create-index:
CREATE INDEX idx_s2 ON columnar_table (a);
step s4-insert-4:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(61, 70) i;
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
step s4-insert-2:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(41, 50) i;
step s2-reindex-concurrently:
REINDEX INDEX CONCURRENTLY idx_s2;
<waiting ...>
step s4-insert-3:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(51, 60) i;
step s5-begin:
BEGIN;
step s5-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(11, 20) i;
step s3-begin:
BEGIN;
step s3-insert:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(21, 30) i;
step s1-rollback:
ROLLBACK;
step s4-insert-1:
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(31, 40) i;
step s5-rollback:
ROLLBACK;
step s3-commit:
COMMIT;
step s2-reindex-concurrently: <... completed>
step s1-force-index-scan:
SET enable_seqscan TO OFF;
SET columnar.enable_custom_scan TO OFF;
SET enable_indexscan TO ON;
step s1-check-test-4:
SELECT COUNT(a)=0 FROM columnar_table WHERE a=5 OR a=15;
SELECT SUM(a)=225 FROM columnar_table WHERE a=25 OR a=35 OR a=45 OR a=55 OR a=65;
?column?
t
?column?
t
step s1-reset-table:
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;
starting permutation: s4-insert-5 s1-begin s1-insert s2-create-unique-index-concurrently s1-commit s1-reset-table
step s4-insert-5:
-- Insert values conflicting with "s1-insert" so that concurrent index
-- build leaves an invalid index behind.
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(1, 10) i;
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
step s2-create-unique-index-concurrently:
CREATE UNIQUE INDEX CONCURRENTLY conc_unique_s2_idx ON columnar_table(a);
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-unique-index-concurrently: <... completed>
error in steps s1-commit s2-create-unique-index-concurrently: ERROR: could not create unique index "conc_unique_s2_idx"
step s1-reset-table:
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;
starting permutation: s2-create-unique-index s4-insert-5 s1-begin s1-insert s2-reindex-unique-concurrently s1-commit s1-reset-table
step s2-create-unique-index:
CREATE UNIQUE INDEX unique_idx_s2 ON columnar_table (a);
step s4-insert-5:
-- Insert values conflicting with "s1-insert" so that concurrent index
-- build leaves an invalid index behind.
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(1, 10) i;
step s1-begin:
BEGIN;
step s1-insert:
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
ERROR: duplicate key value violates unique constraint "unique_idx_s2"
step s2-reindex-unique-concurrently:
REINDEX INDEX CONCURRENTLY unique_idx_s2;
step s1-commit:
COMMIT;
step s1-reset-table:
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;

View File

@ -10,13 +10,15 @@ SET search_path tO columnar_indexes, public;
-- --
create table t(a int, b int) using columnar; create table t(a int, b int) using columnar;
create index CONCURRENTLY t_idx on t(a, b); create index CONCURRENTLY t_idx on t(a, b);
ERROR: concurrent index commands are not supported for columnar tables REINDEX INDEX CONCURRENTLY t_idx;
\d t \d t
Table "columnar_indexes.t" Table "columnar_indexes.t"
Column | Type | Collation | Nullable | Default Column | Type | Collation | Nullable | Default
--------------------------------------------------------------------- ---------------------------------------------------------------------
a | integer | | | a | integer | | |
b | integer | | | b | integer | | |
Indexes:
"t_idx" btree (a, b)
explain insert into t values (1, 2); explain insert into t values (1, 2);
QUERY PLAN QUERY PLAN
@ -32,16 +34,6 @@ SELECT * FROM t;
1 | 2 1 | 2
(1 row) (1 row)
create index t_idx on t(a, b);
\d t
Table "columnar_indexes.t"
Column | Type | Collation | Nullable | Default
---------------------------------------------------------------------
a | integer | | |
b | integer | | |
Indexes:
"t_idx" btree (a, b)
explain insert into t values (1, 2); explain insert into t values (1, 2);
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -61,6 +53,38 @@ SELECT * FROM t;
set columnar.enable_custom_scan to 'off'; set columnar.enable_custom_scan to 'off';
set enable_seqscan to off; set enable_seqscan to off;
CREATE table columnar_table (a INT, b int) USING columnar; CREATE table columnar_table (a INT, b int) USING columnar;
INSERT INTO columnar_table (a) VALUES (1), (1);
CREATE UNIQUE INDEX CONCURRENTLY ON columnar_table (a);
ERROR: could not create unique index "columnar_table_a_idx"
DETAIL: Key (a)=(1) is duplicated.
-- CONCURRENTLY should leave an invalid index behind
SELECT COUNT(*)=1 FROM pg_index WHERE indrelid = 'columnar_table'::regclass AND indisvalid = 'false';
?column?
---------------------------------------------------------------------
t
(1 row)
INSERT INTO columnar_table (a) VALUES (1), (1);
REINDEX TABLE columnar_table;
ERROR: could not create unique index "columnar_table_a_idx"
DETAIL: Key (a)=(1) is duplicated.
-- index is still invalid since REINDEX error'ed out
SELECT COUNT(*)=1 FROM pg_index WHERE indrelid = 'columnar_table'::regclass AND indisvalid = 'false';
?column?
---------------------------------------------------------------------
t
(1 row)
TRUNCATE columnar_table;
REINDEX TABLE columnar_table;
-- now it should be valid
SELECT COUNT(*)=0 FROM pg_index WHERE indrelid = 'columnar_table'::regclass AND indisvalid = 'false';
?column?
---------------------------------------------------------------------
t
(1 row)
DROP INDEX columnar_table_a_idx;
INSERT INTO columnar_table (a, b) SELECT i,i*2 FROM generate_series(0, 16000) i; INSERT INTO columnar_table (a, b) SELECT i,i*2 FROM generate_series(0, 16000) i;
-- unique -- -- unique --
BEGIN; BEGIN;
@ -70,7 +94,7 @@ BEGIN;
CREATE UNIQUE INDEX ON columnar_table (a); CREATE UNIQUE INDEX ON columnar_table (a);
ERROR: cannot read from table when there is unflushed data in upper transactions ERROR: cannot read from table when there is unflushed data in upper transactions
ROLLBACK; ROLLBACK;
CREATE UNIQUE INDEX ON columnar_table (a); CREATE UNIQUE INDEX CONCURRENTLY ON columnar_table (a);
BEGIN; BEGIN;
INSERT INTO columnar_table VALUES (16050); INSERT INTO columnar_table VALUES (16050);
SAVEPOINT s1; SAVEPOINT s1;
@ -131,7 +155,7 @@ INSERT INTO partial_unique_idx_test VALUES (4, 700);
ERROR: duplicate key value violates unique constraint "partial_unique_idx_test_a_idx" ERROR: duplicate key value violates unique constraint "partial_unique_idx_test_a_idx"
DETAIL: Key (a)=(4) already exists. DETAIL: Key (a)=(4) already exists.
-- btree -- -- btree --
CREATE INDEX ON columnar_table (a); CREATE INDEX CONCURRENTLY ON columnar_table (a);
SELECT (SELECT SUM(b) FROM columnar_table WHERE a>700 and a<965)=439560; SELECT (SELECT SUM(b) FROM columnar_table WHERE a>700 and a<965)=439560;
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -166,7 +190,7 @@ EXPLAIN (COSTS OFF) SELECT b FROM columnar_table WHERE b = 30001;
-- some more rows -- some more rows
INSERT INTO columnar_table (a, b) SELECT i,i*2 FROM generate_series(16000, 17000) i; INSERT INTO columnar_table (a, b) SELECT i,i*2 FROM generate_series(16000, 17000) i;
DROP INDEX columnar_table_a_idx; DROP INDEX CONCURRENTLY columnar_table_a_idx;
TRUNCATE columnar_table; TRUNCATE columnar_table;
-- pkey -- -- pkey --
INSERT INTO columnar_table (a, b) SELECT i,i*2 FROM generate_series(16000, 16499) i; INSERT INTO columnar_table (a, b) SELECT i,i*2 FROM generate_series(16000, 16499) i;
@ -275,7 +299,7 @@ INSERT INTO exclusion_test SELECT x, 2, 3*x, BOX('4,4,4,4') FROM generate_series
-- make sure that we respect INCLUDE syntax -- -- make sure that we respect INCLUDE syntax --
CREATE TABLE include_test (a INT, b BIGINT, c BIGINT, d BIGINT) USING columnar; CREATE TABLE include_test (a INT, b BIGINT, c BIGINT, d BIGINT) USING columnar;
INSERT INTO include_test SELECT i, i, i, i FROM generate_series (1, 1000) i; INSERT INTO include_test SELECT i, i, i, i FROM generate_series (1, 1000) i;
CREATE UNIQUE INDEX unique_a ON include_test (a); CREATE UNIQUE INDEX CONCURRENTLY unique_a ON include_test (a);
-- cannot use index only scan -- cannot use index only scan
EXPLAIN (COSTS OFF) SELECT b FROM include_test WHERE a = 500; EXPLAIN (COSTS OFF) SELECT b FROM include_test WHERE a = 500;
QUERY PLAN QUERY PLAN
@ -383,8 +407,15 @@ INSERT INTO gist_point_tbl (id, p) SELECT g, point(g*10, g*10) FROM generate_ser
-- sp gist -- -- sp gist --
CREATE TABLE box_temp (f1 box) USING columnar; CREATE TABLE box_temp (f1 box) USING columnar;
INSERT INTO box_temp SELECT box(point(i, i), point(i * 2, i * 2)) FROM generate_series(1, 10) AS i; INSERT INTO box_temp SELECT box(point(i, i), point(i * 2, i * 2)) FROM generate_series(1, 10) AS i;
CREATE INDEX box_spgist ON box_temp USING spgist (f1); CREATE INDEX CONCURRENTLY box_spgist ON box_temp USING spgist (f1);
ERROR: only btree and hash indexes are supported on columnar tables ERROR: only btree and hash indexes are supported on columnar tables
-- CONCURRENTLY should not leave an invalid index behind
SELECT COUNT(*)=0 FROM pg_index WHERE indrelid = 'box_temp'::regclass AND indisvalid = 'false';
?column?
---------------------------------------------------------------------
t
(1 row)
INSERT INTO box_temp SELECT box(point(i, i), point(i * 2, i * 2)) FROM generate_series(1, 10) AS i; INSERT INTO box_temp SELECT box(point(i, i), point(i * 2, i * 2)) FROM generate_series(1, 10) AS i;
-- brin -- -- brin --
CREATE TABLE brin_summarize (value int) USING columnar; CREATE TABLE brin_summarize (value int) USING columnar;
@ -399,5 +430,10 @@ VACUUM FULL parallel_scan_test;
NOTICE: falling back to serial index build since parallel scan on columnar tables is not supported NOTICE: falling back to serial index build since parallel scan on columnar tables is not supported
REINDEX TABLE parallel_scan_test; REINDEX TABLE parallel_scan_test;
NOTICE: falling back to serial index build since parallel scan on columnar tables is not supported NOTICE: falling back to serial index build since parallel scan on columnar tables is not supported
CREATE INDEX CONCURRENTLY ON parallel_scan_test (a);
NOTICE: falling back to serial index build since parallel scan on columnar tables is not supported
REINDEX TABLE CONCURRENTLY parallel_scan_test;
NOTICE: falling back to serial index build since parallel scan on columnar tables is not supported
NOTICE: falling back to serial index build since parallel scan on columnar tables is not supported
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA columnar_indexes CASCADE; DROP SCHEMA columnar_indexes CASCADE;

View File

@ -0,0 +1,193 @@
setup
{
CREATE TABLE columnar_table (a INT, b INT) USING columnar;
}
teardown
{
DROP TABLE IF EXISTS columnar_table CASCADE;
}
session "s1"
step "s1-begin"
{
BEGIN;
}
step "s1-insert"
{
INSERT INTO columnar_table SELECT i, 1000*i FROM generate_series(1, 10) i;
}
step "s1-commit"
{
COMMIT;
}
step "s1-rollback"
{
ROLLBACK;
}
step "s1-reset-table"
{
DROP INDEX IF EXISTS idx_s2, conc_s2_idx, conc_unique_s2_idx, unique_idx_s2, conc_partial_s2_idx;
TRUNCATE columnar_table;
}
step "s1-force-index-scan"
{
SET enable_seqscan TO OFF;
SET columnar.enable_custom_scan TO OFF;
SET enable_indexscan TO ON;
}
step "s1-check-test-1-2"
{
SELECT SUM(a)=30 FROM columnar_table WHERE a=5 OR a=25;
}
step "s1-check-test-3"
{
SELECT COUNT(a)=0 FROM columnar_table WHERE a=25;
SELECT SUM(a)=55 FROM columnar_table WHERE a=5 OR a=15 OR a=35;
}
step "s1-check-test-4"
{
SELECT COUNT(a)=0 FROM columnar_table WHERE a=5 OR a=15;
SELECT SUM(a)=225 FROM columnar_table WHERE a=25 OR a=35 OR a=45 OR a=55 OR a=65;
}
session "s2"
step "s2-create-index"
{
CREATE INDEX idx_s2 ON columnar_table (a);
}
step "s2-create-unique-index"
{
CREATE UNIQUE INDEX unique_idx_s2 ON columnar_table (a);
}
step "s2-create-index-concurrently"
{
CREATE INDEX CONCURRENTLY conc_s2_idx ON columnar_table(a);
}
step "s2-create-unique-index-concurrently"
{
CREATE UNIQUE INDEX CONCURRENTLY conc_unique_s2_idx ON columnar_table(a);
}
step "s2-create-partial-concurrently"
{
CREATE INDEX CONCURRENTLY conc_partial_s2_idx ON columnar_table(a)
WHERE a > 50 AND a <= 80;
}
step "s2-reindex-unique-concurrently"
{
REINDEX INDEX CONCURRENTLY unique_idx_s2;
}
step "s2-reindex-concurrently"
{
REINDEX INDEX CONCURRENTLY idx_s2;
}
session "s3"
step "s3-begin"
{
BEGIN;
}
step "s3-rollback"
{
ROLLBACK;
}
step "s3-insert"
{
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(21, 30) i;
}
step "s3-commit"
{
COMMIT;
}
session "s4"
step "s4-insert-1"
{
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(31, 40) i;
}
step "s4-insert-2"
{
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(41, 50) i;
}
step "s4-insert-3"
{
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(51, 60) i;
}
step "s4-insert-4"
{
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(61, 70) i;
}
step "s4-insert-5"
{
-- Insert values conflicting with "s1-insert" so that concurrent index
-- build leaves an invalid index behind.
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(1, 10) i;
}
session "s5"
step "s5-insert"
{
INSERT INTO columnar_table SELECT i, i*1000 FROM generate_series(11, 20) i;
}
step "s5-begin"
{
BEGIN;
}
step "s5-commit"
{
COMMIT;
}
step "s5-rollback"
{
ROLLBACK;
}
# CREATE INDEX (without CONCURRENTLY)
permutation "s1-begin" "s1-insert" "s2-create-index" "s3-insert" "s1-commit" "s1-force-index-scan" "s1-check-test-1-2" "s1-reset-table"
# Start a session that executes INSERT in a transaction block before
# CREATE INDEX / REINDEX CONCURRENTLY so that the latter one blocks.
# CREATE INDEX CONCURRENTLY
permutation "s1-begin" "s1-insert" "s2-create-index-concurrently" "s3-insert" "s1-commit" "s1-force-index-scan" "s1-check-test-1-2" "s1-reset-table"
permutation "s1-begin" "s1-insert" "s2-create-index-concurrently" "s5-begin" "s5-insert" "s3-begin" "s3-insert" "s1-commit" "s4-insert-1" "s5-commit" "s3-rollback" "s1-force-index-scan" "s1-check-test-3" "s1-reset-table"
permutation "s4-insert-4" "s1-begin" "s1-insert" "s4-insert-2" "s2-create-index-concurrently" "s4-insert-3" "s5-begin" "s5-insert" "s3-begin" "s3-insert" "s1-rollback" "s4-insert-1" "s5-rollback" "s3-commit" "s1-force-index-scan" "s1-check-test-4" "s1-reset-table"
permutation "s4-insert-4" "s1-begin" "s1-insert" "s2-create-partial-concurrently" "s4-insert-1" "s1-rollback" "s1-reset-table"
# similar tests with REINDEX INDEX CONCURRENTLY
permutation "s2-create-index" "s1-begin" "s1-insert" "s2-reindex-concurrently" "s3-insert" "s1-commit" "s1-force-index-scan" "s1-check-test-1-2" "s1-reset-table"
permutation "s2-create-index" "s1-begin" "s1-insert" "s2-reindex-concurrently" "s5-begin" "s5-insert" "s3-begin" "s3-insert" "s1-commit" "s4-insert-1" "s5-commit" "s3-rollback" "s1-force-index-scan" "s1-check-test-3" "s1-reset-table"
permutation "s2-create-index" "s4-insert-4" "s1-begin" "s1-insert" "s4-insert-2" "s2-reindex-concurrently" "s4-insert-3" "s5-begin" "s5-insert" "s3-begin" "s3-insert" "s1-rollback" "s4-insert-1" "s5-rollback" "s3-commit" "s1-force-index-scan" "s1-check-test-4" "s1-reset-table"
# CREATE INDEX / REINDEX CONCURRENTLY fails due to duplicate values
permutation "s4-insert-5" "s1-begin" "s1-insert" "s2-create-unique-index-concurrently" "s1-commit" "s1-reset-table"
permutation "s2-create-unique-index" "s4-insert-5" "s1-begin" "s1-insert" "s2-reindex-unique-concurrently" "s1-commit" "s1-reset-table"

View File

@ -12,13 +12,12 @@ SET search_path tO columnar_indexes, public;
-- --
create table t(a int, b int) using columnar; create table t(a int, b int) using columnar;
create index CONCURRENTLY t_idx on t(a, b); create index CONCURRENTLY t_idx on t(a, b);
REINDEX INDEX CONCURRENTLY t_idx;
\d t \d t
explain insert into t values (1, 2); explain insert into t values (1, 2);
insert into t values (1, 2); insert into t values (1, 2);
SELECT * FROM t; SELECT * FROM t;
create index t_idx on t(a, b);
\d t
explain insert into t values (1, 2); explain insert into t values (1, 2);
insert into t values (3, 4); insert into t values (3, 4);
SELECT * FROM t; SELECT * FROM t;
@ -28,6 +27,27 @@ set columnar.enable_custom_scan to 'off';
set enable_seqscan to off; set enable_seqscan to off;
CREATE table columnar_table (a INT, b int) USING columnar; CREATE table columnar_table (a INT, b int) USING columnar;
INSERT INTO columnar_table (a) VALUES (1), (1);
CREATE UNIQUE INDEX CONCURRENTLY ON columnar_table (a);
-- CONCURRENTLY should leave an invalid index behind
SELECT COUNT(*)=1 FROM pg_index WHERE indrelid = 'columnar_table'::regclass AND indisvalid = 'false';
INSERT INTO columnar_table (a) VALUES (1), (1);
REINDEX TABLE columnar_table;
-- index is still invalid since REINDEX error'ed out
SELECT COUNT(*)=1 FROM pg_index WHERE indrelid = 'columnar_table'::regclass AND indisvalid = 'false';
TRUNCATE columnar_table;
REINDEX TABLE columnar_table;
-- now it should be valid
SELECT COUNT(*)=0 FROM pg_index WHERE indrelid = 'columnar_table'::regclass AND indisvalid = 'false';
DROP INDEX columnar_table_a_idx;
INSERT INTO columnar_table (a, b) SELECT i,i*2 FROM generate_series(0, 16000) i; INSERT INTO columnar_table (a, b) SELECT i,i*2 FROM generate_series(0, 16000) i;
-- unique -- -- unique --
@ -38,7 +58,7 @@ BEGIN;
CREATE UNIQUE INDEX ON columnar_table (a); CREATE UNIQUE INDEX ON columnar_table (a);
ROLLBACK; ROLLBACK;
CREATE UNIQUE INDEX ON columnar_table (a); CREATE UNIQUE INDEX CONCURRENTLY ON columnar_table (a);
BEGIN; BEGIN;
INSERT INTO columnar_table VALUES (16050); INSERT INTO columnar_table VALUES (16050);
@ -80,7 +100,7 @@ INSERT INTO partial_unique_idx_test VALUES (4, 600);
INSERT INTO partial_unique_idx_test VALUES (4, 700); INSERT INTO partial_unique_idx_test VALUES (4, 700);
-- btree -- -- btree --
CREATE INDEX ON columnar_table (a); CREATE INDEX CONCURRENTLY ON columnar_table (a);
SELECT (SELECT SUM(b) FROM columnar_table WHERE a>700 and a<965)=439560; SELECT (SELECT SUM(b) FROM columnar_table WHERE a>700 and a<965)=439560;
CREATE INDEX ON columnar_table (b) CREATE INDEX ON columnar_table (b)
@ -98,7 +118,7 @@ EXPLAIN (COSTS OFF) SELECT b FROM columnar_table WHERE b = 30001;
-- some more rows -- some more rows
INSERT INTO columnar_table (a, b) SELECT i,i*2 FROM generate_series(16000, 17000) i; INSERT INTO columnar_table (a, b) SELECT i,i*2 FROM generate_series(16000, 17000) i;
DROP INDEX columnar_table_a_idx; DROP INDEX CONCURRENTLY columnar_table_a_idx;
TRUNCATE columnar_table; TRUNCATE columnar_table;
-- pkey -- -- pkey --
@ -199,7 +219,7 @@ CREATE TABLE include_test (a INT, b BIGINT, c BIGINT, d BIGINT) USING columnar;
INSERT INTO include_test SELECT i, i, i, i FROM generate_series (1, 1000) i; INSERT INTO include_test SELECT i, i, i, i FROM generate_series (1, 1000) i;
CREATE UNIQUE INDEX unique_a ON include_test (a); CREATE UNIQUE INDEX CONCURRENTLY unique_a ON include_test (a);
-- cannot use index only scan -- cannot use index only scan
EXPLAIN (COSTS OFF) SELECT b FROM include_test WHERE a = 500; EXPLAIN (COSTS OFF) SELECT b FROM include_test WHERE a = 500;
@ -281,7 +301,11 @@ INSERT INTO gist_point_tbl (id, p) SELECT g, point(g*10, g*10) FROM generate_ser
-- sp gist -- -- sp gist --
CREATE TABLE box_temp (f1 box) USING columnar; CREATE TABLE box_temp (f1 box) USING columnar;
INSERT INTO box_temp SELECT box(point(i, i), point(i * 2, i * 2)) FROM generate_series(1, 10) AS i; INSERT INTO box_temp SELECT box(point(i, i), point(i * 2, i * 2)) FROM generate_series(1, 10) AS i;
CREATE INDEX box_spgist ON box_temp USING spgist (f1); CREATE INDEX CONCURRENTLY box_spgist ON box_temp USING spgist (f1);
-- CONCURRENTLY should not leave an invalid index behind
SELECT COUNT(*)=0 FROM pg_index WHERE indrelid = 'box_temp'::regclass AND indisvalid = 'false';
INSERT INTO box_temp SELECT box(point(i, i), point(i * 2, i * 2)) FROM generate_series(1, 10) AS i; INSERT INTO box_temp SELECT box(point(i, i), point(i * 2, i * 2)) FROM generate_series(1, 10) AS i;
-- brin -- -- brin --
@ -294,6 +318,8 @@ INSERT INTO parallel_scan_test SELECT i FROM generate_series(1,10) i;
CREATE INDEX ON parallel_scan_test (a); CREATE INDEX ON parallel_scan_test (a);
VACUUM FULL parallel_scan_test; VACUUM FULL parallel_scan_test;
REINDEX TABLE parallel_scan_test; REINDEX TABLE parallel_scan_test;
CREATE INDEX CONCURRENTLY ON parallel_scan_test (a);
REINDEX TABLE CONCURRENTLY parallel_scan_test;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA columnar_indexes CASCADE; DROP SCHEMA columnar_indexes CASCADE;