Columnar: rename files and tests. (#4751)

* Columnar: rename files and tests.

* Columnar: Rename Table*State to Columnar*State.
pull/4759/head
jeff-davis 2021-03-01 08:34:24 -08:00 committed by GitHub
parent feee25dfbd
commit 9da9bd3dfd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
76 changed files with 129 additions and 390 deletions

View File

@ -32,7 +32,7 @@
#include "columnar/columnar.h" #include "columnar/columnar.h"
#include "columnar/columnar_version_compat.h" #include "columnar/columnar_version_compat.h"
struct TableWriteState struct ColumnarWriteState
{ {
TupleDesc tupleDescriptor; TupleDesc tupleDescriptor;
FmgrInfo **comparisonFunctionArray; FmgrInfo **comparisonFunctionArray;
@ -62,12 +62,12 @@ static StripeBuffers * CreateEmptyStripeBuffers(uint32 stripeMaxRowCount,
static StripeSkipList * CreateEmptyStripeSkipList(uint32 stripeMaxRowCount, static StripeSkipList * CreateEmptyStripeSkipList(uint32 stripeMaxRowCount,
uint32 chunkRowCount, uint32 chunkRowCount,
uint32 columnCount); uint32 columnCount);
static void FlushStripe(TableWriteState *writeState); static void FlushStripe(ColumnarWriteState *writeState);
static StringInfo SerializeBoolArray(bool *boolArray, uint32 boolArrayLength); static StringInfo SerializeBoolArray(bool *boolArray, uint32 boolArrayLength);
static void SerializeSingleDatum(StringInfo datumBuffer, Datum datum, static void SerializeSingleDatum(StringInfo datumBuffer, Datum datum,
bool datumTypeByValue, int datumTypeLength, bool datumTypeByValue, int datumTypeLength,
char datumTypeAlign); char datumTypeAlign);
static void SerializeChunkData(TableWriteState *writeState, uint32 chunkIndex, static void SerializeChunkData(ColumnarWriteState *writeState, uint32 chunkIndex,
uint32 rowCount); uint32 rowCount);
static void UpdateChunkSkipNodeMinMax(ColumnChunkSkipNode *chunkSkipNode, static void UpdateChunkSkipNodeMinMax(ColumnChunkSkipNode *chunkSkipNode,
Datum columnValue, bool columnTypeByValue, Datum columnValue, bool columnTypeByValue,
@ -81,7 +81,7 @@ static StringInfo CopyStringInfo(StringInfo sourceString);
* handle. This handle should be used for adding the row values and finishing the * handle. This handle should be used for adding the row values and finishing the
* data load operation. * data load operation.
*/ */
TableWriteState * ColumnarWriteState *
ColumnarBeginWrite(RelFileNode relfilenode, ColumnarBeginWrite(RelFileNode relfilenode,
ColumnarOptions options, ColumnarOptions options,
TupleDesc tupleDescriptor) TupleDesc tupleDescriptor)
@ -121,7 +121,7 @@ ColumnarBeginWrite(RelFileNode relfilenode,
ChunkData *chunkData = CreateEmptyChunkData(columnCount, columnMaskArray, ChunkData *chunkData = CreateEmptyChunkData(columnCount, columnMaskArray,
options.chunkRowCount); options.chunkRowCount);
TableWriteState *writeState = palloc0(sizeof(TableWriteState)); ColumnarWriteState *writeState = palloc0(sizeof(ColumnarWriteState));
writeState->relfilenode = relfilenode; writeState->relfilenode = relfilenode;
writeState->options = options; writeState->options = options;
writeState->tupleDescriptor = CreateTupleDescCopy(tupleDescriptor); writeState->tupleDescriptor = CreateTupleDescCopy(tupleDescriptor);
@ -148,7 +148,7 @@ ColumnarBeginWrite(RelFileNode relfilenode,
* the stripe, and add its metadata to the table footer. * the stripe, and add its metadata to the table footer.
*/ */
void void
ColumnarWriteRow(TableWriteState *writeState, Datum *columnValues, bool *columnNulls) ColumnarWriteRow(ColumnarWriteState *writeState, Datum *columnValues, bool *columnNulls)
{ {
uint32 columnIndex = 0; uint32 columnIndex = 0;
StripeBuffers *stripeBuffers = writeState->stripeBuffers; StripeBuffers *stripeBuffers = writeState->stripeBuffers;
@ -240,7 +240,7 @@ ColumnarWriteRow(TableWriteState *writeState, Datum *columnValues, bool *columnN
* stripe, we flush it. * stripe, we flush it.
*/ */
void void
ColumnarEndWrite(TableWriteState *writeState) ColumnarEndWrite(ColumnarWriteState *writeState)
{ {
ColumnarFlushPendingWrites(writeState); ColumnarFlushPendingWrites(writeState);
@ -252,7 +252,7 @@ ColumnarEndWrite(TableWriteState *writeState)
void void
ColumnarFlushPendingWrites(TableWriteState *writeState) ColumnarFlushPendingWrites(ColumnarWriteState *writeState)
{ {
StripeBuffers *stripeBuffers = writeState->stripeBuffers; StripeBuffers *stripeBuffers = writeState->stripeBuffers;
if (stripeBuffers != NULL) if (stripeBuffers != NULL)
@ -277,7 +277,7 @@ ColumnarFlushPendingWrites(TableWriteState *writeState)
* Return per-tuple context for columnar write operation. * Return per-tuple context for columnar write operation.
*/ */
MemoryContext MemoryContext
ColumnarWritePerTupleContext(TableWriteState *state) ColumnarWritePerTupleContext(ColumnarWriteState *state)
{ {
return state->perTupleContext; return state->perTupleContext;
} }
@ -432,7 +432,7 @@ WriteToSmgr(Relation rel, uint64 logicalOffset, char *data, uint32 dataLength)
* flushes the skip list, data, and footer buffers to the file. * flushes the skip list, data, and footer buffers to the file.
*/ */
static void static void
FlushStripe(TableWriteState *writeState) FlushStripe(ColumnarWriteState *writeState)
{ {
StripeMetadata stripeMetadata = { 0 }; StripeMetadata stripeMetadata = { 0 };
uint32 columnIndex = 0; uint32 columnIndex = 0;
@ -630,7 +630,7 @@ SerializeSingleDatum(StringInfo datumBuffer, Datum datum, bool datumTypeByValue,
* compression type for every column. * compression type for every column.
*/ */
static void static void
SerializeChunkData(TableWriteState *writeState, uint32 chunkIndex, uint32 rowCount) SerializeChunkData(ColumnarWriteState *writeState, uint32 chunkIndex, uint32 rowCount)
{ {
uint32 columnIndex = 0; uint32 columnIndex = 0;
StripeBuffers *stripeBuffers = writeState->stripeBuffers; StripeBuffers *stripeBuffers = writeState->stripeBuffers;
@ -804,7 +804,7 @@ CopyStringInfo(StringInfo sourceString)
bool bool
ContainsPendingWrites(TableWriteState *state) ContainsPendingWrites(ColumnarWriteState *state)
{ {
return state->stripeBuffers != NULL && state->stripeBuffers->rowCount != 0; return state->stripeBuffers != NULL && state->stripeBuffers->rowCount != 0;
} }

View File

@ -64,7 +64,7 @@ typedef struct StripeReadState
ChunkGroupReadState *chunkGroupReadState; /* owned */ ChunkGroupReadState *chunkGroupReadState; /* owned */
} StripeReadState; } StripeReadState;
struct TableReadState struct ColumnarReadState
{ {
List *stripeList; List *stripeList;
TupleDesc tupleDescriptor; TupleDesc tupleDescriptor;
@ -148,7 +148,7 @@ static Datum ColumnDefaultValue(TupleConstr *tupleConstraints,
* ColumnarBeginRead initializes a columnar read operation. This function returns a * ColumnarBeginRead initializes a columnar read operation. This function returns a
* read handle that's used during reading rows and finishing the read operation. * read handle that's used during reading rows and finishing the read operation.
*/ */
TableReadState * ColumnarReadState *
ColumnarBeginRead(Relation relation, TupleDesc tupleDescriptor, ColumnarBeginRead(Relation relation, TupleDesc tupleDescriptor,
List *projectedColumnList, List *whereClauseList) List *projectedColumnList, List *whereClauseList)
{ {
@ -170,7 +170,7 @@ ColumnarBeginRead(Relation relation, TupleDesc tupleDescriptor,
"Stripe Read Memory Context", "Stripe Read Memory Context",
ALLOCSET_DEFAULT_SIZES); ALLOCSET_DEFAULT_SIZES);
TableReadState *readState = palloc0(sizeof(TableReadState)); ColumnarReadState *readState = palloc0(sizeof(ColumnarReadState));
readState->relation = relation; readState->relation = relation;
readState->stripeList = stripeList; readState->stripeList = stripeList;
readState->projectedColumnList = projectedColumnList; readState->projectedColumnList = projectedColumnList;
@ -190,7 +190,7 @@ ColumnarBeginRead(Relation relation, TupleDesc tupleDescriptor,
* the function returns false. * the function returns false.
*/ */
bool bool
ColumnarReadNextRow(TableReadState *readState, Datum *columnValues, bool *columnNulls) ColumnarReadNextRow(ColumnarReadState *readState, Datum *columnValues, bool *columnNulls)
{ {
while (true) while (true)
{ {
@ -238,7 +238,7 @@ ColumnarReadNextRow(TableReadState *readState, Datum *columnValues, bool *column
* the beginning again * the beginning again
*/ */
void void
ColumnarRescan(TableReadState *readState) ColumnarRescan(ColumnarReadState *readState)
{ {
readState->stripeReadState = NULL; readState->stripeReadState = NULL;
readState->currentStripe = 0; readState->currentStripe = 0;
@ -250,7 +250,7 @@ ColumnarRescan(TableReadState *readState)
* Finishes a columnar read operation. * Finishes a columnar read operation.
*/ */
void void
ColumnarEndRead(TableReadState *readState) ColumnarEndRead(ColumnarReadState *readState)
{ {
MemoryContextDelete(readState->stripeReadContext); MemoryContextDelete(readState->stripeReadContext);
list_free_deep(readState->stripeList); list_free_deep(readState->stripeList);
@ -484,7 +484,7 @@ ReadChunkGroupNextRow(ChunkGroupReadState *chunkGroupReadState, Datum *columnVal
* Return the number of chunk groups filtered during this read operation. * Return the number of chunk groups filtered during this read operation.
*/ */
int64 int64
ColumnarReadChunkGroupsFiltered(TableReadState *state) ColumnarReadChunkGroupsFiltered(ColumnarReadState *state)
{ {
return state->chunkGroupsFiltered; return state->chunkGroupsFiltered;
} }

View File

@ -73,7 +73,7 @@
typedef struct ColumnarScanDescData typedef struct ColumnarScanDescData
{ {
TableScanDescData cs_base; TableScanDescData cs_base;
TableReadState *cs_readState; ColumnarReadState *cs_readState;
/* /*
* We initialize cs_readState lazily in the first getnextslot() call. We * We initialize cs_readState lazily in the first getnextslot() call. We
@ -237,7 +237,7 @@ columnar_beginscan_extended(Relation relation, Snapshot snapshot,
* init_columnar_read_state initializes a column store table read and returns the * init_columnar_read_state initializes a column store table read and returns the
* state. * state.
*/ */
static TableReadState * static ColumnarReadState *
init_columnar_read_state(Relation relation, TupleDesc tupdesc, Bitmapset *attr_needed, init_columnar_read_state(Relation relation, TupleDesc tupdesc, Bitmapset *attr_needed,
List *scanQual) List *scanQual)
{ {
@ -256,7 +256,7 @@ init_columnar_read_state(Relation relation, TupleDesc tupdesc, Bitmapset *attr_n
} }
} }
TableReadState *readState = ColumnarBeginRead(relation, tupdesc, neededColumnList, ColumnarReadState *readState = ColumnarBeginRead(relation, tupdesc, neededColumnList,
scanQual); scanQual);
return readState; return readState;
@ -440,7 +440,7 @@ columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid,
* columnar_init_write_state allocates the write state in a longer * columnar_init_write_state allocates the write state in a longer
* lasting context, so no need to worry about it. * lasting context, so no need to worry about it.
*/ */
TableWriteState *writeState = columnar_init_write_state(relation, ColumnarWriteState *writeState = columnar_init_write_state(relation,
RelationGetDescr(relation), RelationGetDescr(relation),
GetCurrentSubTransactionId()); GetCurrentSubTransactionId());
MemoryContext oldContext = MemoryContextSwitchTo(ColumnarWritePerTupleContext( MemoryContext oldContext = MemoryContextSwitchTo(ColumnarWritePerTupleContext(
@ -481,7 +481,7 @@ static void
columnar_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, columnar_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
CommandId cid, int options, BulkInsertState bistate) CommandId cid, int options, BulkInsertState bistate)
{ {
TableWriteState *writeState = columnar_init_write_state(relation, ColumnarWriteState *writeState = columnar_init_write_state(relation,
RelationGetDescr(relation), RelationGetDescr(relation),
GetCurrentSubTransactionId()); GetCurrentSubTransactionId());
@ -645,12 +645,13 @@ columnar_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
ColumnarOptions columnarOptions = { 0 }; ColumnarOptions columnarOptions = { 0 };
ReadColumnarOptions(OldHeap->rd_id, &columnarOptions); ReadColumnarOptions(OldHeap->rd_id, &columnarOptions);
TableWriteState *writeState = ColumnarBeginWrite(NewHeap->rd_node, ColumnarWriteState *writeState = ColumnarBeginWrite(NewHeap->rd_node,
columnarOptions, columnarOptions,
targetDesc); targetDesc);
TableReadState *readState = ColumnarBeginRead(OldHeap, sourceDesc, ColumnarReadState *readState = ColumnarBeginRead(OldHeap, sourceDesc,
RelationColumnList(sourceDesc), NULL); RelationColumnList(sourceDesc),
NULL);
Datum *values = palloc0(sourceDesc->natts * sizeof(Datum)); Datum *values = palloc0(sourceDesc->natts * sizeof(Datum));
bool *nulls = palloc0(sourceDesc->natts * sizeof(bool)); bool *nulls = palloc0(sourceDesc->natts * sizeof(bool));
@ -1163,7 +1164,7 @@ int64
ColumnarScanChunkGroupsFiltered(TableScanDesc scanDesc) ColumnarScanChunkGroupsFiltered(TableScanDesc scanDesc)
{ {
ColumnarScanDesc columnarScanDesc = (ColumnarScanDesc) scanDesc; ColumnarScanDesc columnarScanDesc = (ColumnarScanDesc) scanDesc;
TableReadState *readState = columnarScanDesc->cs_readState; ColumnarReadState *readState = columnarScanDesc->cs_readState;
/* readState is initialized lazily */ /* readState is initialized lazily */
if (readState != NULL) if (readState != NULL)

View File

@ -69,7 +69,7 @@ static MemoryContext WriteStateContext = NULL;
typedef struct SubXidWriteState typedef struct SubXidWriteState
{ {
SubTransactionId subXid; SubTransactionId subXid;
TableWriteState *writeState; ColumnarWriteState *writeState;
struct SubXidWriteState *next; struct SubXidWriteState *next;
} SubXidWriteState; } SubXidWriteState;
@ -115,7 +115,7 @@ CleanupWriteStateMap(void *arg)
} }
TableWriteState * ColumnarWriteState *
columnar_init_write_state(Relation relation, TupleDesc tupdesc, columnar_init_write_state(Relation relation, TupleDesc tupdesc,
SubTransactionId currentSubXid) SubTransactionId currentSubXid)
{ {

View File

@ -212,14 +212,14 @@ typedef struct StripeBuffers
} StripeBuffers; } StripeBuffers;
/* TableReadState represents state of a columnar scan. */ /* ColumnarReadState represents state of a columnar scan. */
struct TableReadState; struct ColumnarReadState;
typedef struct TableReadState TableReadState; typedef struct ColumnarReadState ColumnarReadState;
/* TableWriteState represents state of a columnar write operation. */ /* ColumnarWriteState represents state of a columnar write operation. */
struct TableWriteState; struct ColumnarWriteState;
typedef struct TableWriteState TableWriteState; typedef struct ColumnarWriteState ColumnarWriteState;
extern int columnar_compression; extern int columnar_compression;
extern int columnar_stripe_row_limit; extern int columnar_stripe_row_limit;
@ -231,26 +231,26 @@ extern void columnar_init_gucs(void);
extern CompressionType ParseCompressionType(const char *compressionTypeString); extern CompressionType ParseCompressionType(const char *compressionTypeString);
/* Function declarations for writing to a columnar table */ /* Function declarations for writing to a columnar table */
extern TableWriteState * ColumnarBeginWrite(RelFileNode relfilenode, extern ColumnarWriteState * ColumnarBeginWrite(RelFileNode relfilenode,
ColumnarOptions options, ColumnarOptions options,
TupleDesc tupleDescriptor); TupleDesc tupleDescriptor);
extern void ColumnarWriteRow(TableWriteState *state, Datum *columnValues, extern void ColumnarWriteRow(ColumnarWriteState *state, Datum *columnValues,
bool *columnNulls); bool *columnNulls);
extern void ColumnarFlushPendingWrites(TableWriteState *state); extern void ColumnarFlushPendingWrites(ColumnarWriteState *state);
extern void ColumnarEndWrite(TableWriteState *state); extern void ColumnarEndWrite(ColumnarWriteState *state);
extern bool ContainsPendingWrites(TableWriteState *state); extern bool ContainsPendingWrites(ColumnarWriteState *state);
extern MemoryContext ColumnarWritePerTupleContext(TableWriteState *state); extern MemoryContext ColumnarWritePerTupleContext(ColumnarWriteState *state);
/* Function declarations for reading from columnar table */ /* Function declarations for reading from columnar table */
extern TableReadState * ColumnarBeginRead(Relation relation, extern ColumnarReadState * ColumnarBeginRead(Relation relation,
TupleDesc tupleDescriptor, TupleDesc tupleDescriptor,
List *projectedColumnList, List *projectedColumnList,
List *qualConditions); List *qualConditions);
extern bool ColumnarReadNextRow(TableReadState *state, Datum *columnValues, extern bool ColumnarReadNextRow(ColumnarReadState *state, Datum *columnValues,
bool *columnNulls); bool *columnNulls);
extern void ColumnarRescan(TableReadState *readState); extern void ColumnarRescan(ColumnarReadState *readState);
extern void ColumnarEndRead(TableReadState *state); extern void ColumnarEndRead(ColumnarReadState *state);
extern int64 ColumnarReadChunkGroupsFiltered(TableReadState *state); extern int64 ColumnarReadChunkGroupsFiltered(ColumnarReadState *state);
/* Function declarations for common functions */ /* Function declarations for common functions */
extern FmgrInfo * GetFunctionInfoOrNull(Oid typeId, Oid accessMethodId, extern FmgrInfo * GetFunctionInfoOrNull(Oid typeId, Oid accessMethodId,
@ -296,7 +296,7 @@ extern Datum columnar_relation_storageid(PG_FUNCTION_ARGS);
/* write_state_management.c */ /* write_state_management.c */
extern TableWriteState * columnar_init_write_state(Relation relation, TupleDesc extern ColumnarWriteState * columnar_init_write_state(Relation relation, TupleDesc
tupdesc, tupdesc,
SubTransactionId currentSubXid); SubTransactionId currentSubXid);
extern void FlushWriteStateForRelfilenode(Oid relfilenode, SubTransactionId extern void FlushWriteStateForRelfilenode(Oid relfilenode, SubTransactionId

View File

@ -167,11 +167,11 @@ check-operations: all
check-columnar: check-columnar:
$(pg_regress_multi_check) --load-extension=citus \ $(pg_regress_multi_check) --load-extension=citus \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/columnar_am_schedule $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/columnar_schedule $(EXTRA_TESTS)
check-columnar-isolation: all $(isolation_test_files) check-columnar-isolation: all $(isolation_test_files)
$(pg_regress_multi_check) --load-extension=citus --isolationtester \ $(pg_regress_multi_check) --load-extension=citus --isolationtester \
-- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/columnar_am_isolation_schedule $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/columnar_isolation_schedule $(EXTRA_TESTS)
check-failure: all check-failure: all
$(pg_regress_multi_check) --load-extension=citus --mitmproxy \ $(pg_regress_multi_check) --load-extension=citus --mitmproxy \

View File

@ -1,2 +0,0 @@
test: am_write_concurrency
test: am_vacuum_vs_insert

View File

@ -1,36 +0,0 @@
test: multi_cluster_management
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views
test: am_create
test: am_load
test: am_query
test: am_analyze
test: am_data_types
test: am_drop
test: am_indexes
test: columnar_fallback_scan
test: columnar_partitioning
test: columnar_permissions
test: am_empty
test: am_insert
test: am_update_delete
test: columnar_cursor
test: am_copyto
test: am_alter
test: am_alter_set_type
test: am_lz4 am_zstd
test: am_rollback
test: am_truncate
test: am_vacuum
test: am_clean
test: columnar_types_without_comparison
test: am_chunk_filtering
test: am_join
test: am_trigger
test: am_tableoptions
test: am_recursive
test: am_transactions
test: am_matview
test: am_memory
test: columnar_citus_integration

View File

@ -0,0 +1,2 @@
test: columnar_write_concurrency
test: columnar_vacuum_vs_insert

View File

@ -0,0 +1,36 @@
test: multi_cluster_management
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views
test: columnar_create
test: columnar_load
test: columnar_query
test: columnar_analyze
test: columnar_data_types
test: columnar_drop
test: columnar_indexes
test: columnar_fallback_scan
test: columnar_partitioning
test: columnar_permissions
test: columnar_empty
test: columnar_insert
test: columnar_update_delete
test: columnar_cursor
test: columnar_copyto
test: columnar_alter
test: columnar_alter_set_type
test: columnar_lz4 columnar_zstd
test: columnar_rollback
test: columnar_truncate
test: columnar_vacuum
test: columnar_clean
test: columnar_types_without_comparison
test: columnar_chunk_filtering
test: columnar_join
test: columnar_trigger
test: columnar_tableoptions
test: columnar_recursive
test: columnar_transactions
test: columnar_matview
test: columnar_memory
test: columnar_citus_integration

View File

@ -1,256 +0,0 @@
--
-- Test the TRUNCATE TABLE command for cstore_fdw tables.
--
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
---------------------------------------------------------------------
f
(1 row)
-- Check that files for the automatically managed table exist in the
-- cstore_fdw/{databaseoid} directory.
SELECT count(*) FROM (
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
) AS q1) AS q2;
count
---------------------------------------------------------------------
0
(1 row)
-- CREATE a cstore_fdw table, fill with some data --
CREATE FOREIGN TABLE cstore_truncate_test (a int, b int) SERVER cstore_server;
CREATE FOREIGN TABLE cstore_truncate_test_second (a int, b int) SERVER cstore_server;
CREATE FOREIGN TABLE cstore_truncate_test_compressed (a int, b int) SERVER cstore_server OPTIONS (compression 'pglz');
CREATE TABLE cstore_truncate_test_regular (a int, b int);
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
INSERT INTO cstore_truncate_test_compressed select a, a from generate_series(1, 10) a;
-- query rows
SELECT * FROM cstore_truncate_test;
a | b
---------------------------------------------------------------------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
6 | 6
7 | 7
8 | 8
9 | 9
10 | 10
(10 rows)
TRUNCATE TABLE cstore_truncate_test;
SELECT * FROM cstore_truncate_test;
a | b
---------------------------------------------------------------------
(0 rows)
SELECT COUNT(*) from cstore_truncate_test;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM cstore_truncate_test_compressed;
count
---------------------------------------------------------------------
20
(1 row)
TRUNCATE TABLE cstore_truncate_test_compressed;
SELECT count(*) FROM cstore_truncate_test_compressed;
count
---------------------------------------------------------------------
0
(1 row)
-- make sure data files still present
SELECT count(*) FROM (
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
) AS q1) AS q2;
count
---------------------------------------------------------------------
6
(1 row)
INSERT INTO cstore_truncate_test select a, a from generate_series(1, 10) a;
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(10, 20) a;
INSERT INTO cstore_truncate_test_second select a, a from generate_series(20, 30) a;
SELECT * from cstore_truncate_test;
a | b
---------------------------------------------------------------------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
6 | 6
7 | 7
8 | 8
9 | 9
10 | 10
(10 rows)
SELECT * from cstore_truncate_test_second;
a | b
---------------------------------------------------------------------
20 | 20
21 | 21
22 | 22
23 | 23
24 | 24
25 | 25
26 | 26
27 | 27
28 | 28
29 | 29
30 | 30
(11 rows)
SELECT * from cstore_truncate_test_regular;
a | b
---------------------------------------------------------------------
10 | 10
11 | 11
12 | 12
13 | 13
14 | 14
15 | 15
16 | 16
17 | 17
18 | 18
19 | 19
20 | 20
(11 rows)
-- make sure multi truncate works
-- notice that the same table might be repeated
TRUNCATE TABLE cstore_truncate_test,
cstore_truncate_test_regular,
cstore_truncate_test_second,
cstore_truncate_test;
SELECT * from cstore_truncate_test;
a | b
---------------------------------------------------------------------
(0 rows)
SELECT * from cstore_truncate_test_second;
a | b
---------------------------------------------------------------------
(0 rows)
SELECT * from cstore_truncate_test_regular;
a | b
---------------------------------------------------------------------
(0 rows)
-- test if truncate on empty table works
TRUNCATE TABLE cstore_truncate_test;
SELECT * from cstore_truncate_test;
a | b
---------------------------------------------------------------------
(0 rows)
-- test if a cached truncate from a pl/pgsql function works
CREATE FUNCTION cstore_truncate_test_regular_func() RETURNS void AS $$
BEGIN
INSERT INTO cstore_truncate_test_regular select a, a from generate_series(1, 10) a;
TRUNCATE TABLE cstore_truncate_test_regular;
END;$$
LANGUAGE plpgsql;
SELECT cstore_truncate_test_regular_func();
cstore_truncate_test_regular_func
---------------------------------------------------------------------
(1 row)
-- the cached plans are used stating from the second call
SELECT cstore_truncate_test_regular_func();
cstore_truncate_test_regular_func
---------------------------------------------------------------------
(1 row)
DROP FUNCTION cstore_truncate_test_regular_func();
DROP FOREIGN TABLE cstore_truncate_test, cstore_truncate_test_second;
DROP TABLE cstore_truncate_test_regular;
DROP FOREIGN TABLE cstore_truncate_test_compressed;
-- test truncate with schema
CREATE SCHEMA truncate_schema;
CREATE FOREIGN TABLE truncate_schema.truncate_tbl (id int) SERVER cstore_server OPTIONS(compression 'pglz');
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
TRUNCATE TABLE truncate_schema.truncate_tbl;
SELECT COUNT(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
0
(1 row)
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
-- create a user that can not truncate
CREATE USER truncate_user;
GRANT USAGE ON SCHEMA truncate_schema TO truncate_user;
GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user;
REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user;
SELECT current_user \gset
\c - truncate_user
-- verify truncate command fails and check number of rows
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
TRUNCATE TABLE truncate_schema.truncate_tbl;
ERROR: permission denied for relation truncate_tbl
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
-- switch to super user, grant truncate to truncate_user
\c - :current_user
GRANT TRUNCATE ON TABLE truncate_schema.truncate_tbl TO truncate_user;
-- verify truncate_user can truncate now
\c - truncate_user
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
100
(1 row)
TRUNCATE TABLE truncate_schema.truncate_tbl;
SELECT count(*) FROM truncate_schema.truncate_tbl;
count
---------------------------------------------------------------------
0
(1 row)
\c - :current_user
-- cleanup
DROP SCHEMA truncate_schema CASCADE;
NOTICE: drop cascades to foreign table truncate_schema.truncate_tbl
DROP USER truncate_user;
-- verify files are removed
SELECT count(*) FROM (
SELECT pg_ls_dir('cstore_fdw/' || databaseoid ) FROM (
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database()
) AS q1) AS q2;
count
---------------------------------------------------------------------
0
(1 row)

View File

@ -1,5 +1,5 @@
-- --
-- Test the ANALYZE command for cstore_fdw tables. -- Test the ANALYZE command for columnar tables.
-- --
-- ANALYZE uncompressed table -- ANALYZE uncompressed table
ANALYZE contestant; ANALYZE contestant;

View File

@ -1,5 +1,5 @@
-- --
-- Testing insert on cstore_fdw tables. -- Testing insert on columnar tables.
-- --
CREATE TABLE test_insert_command (a int) USING columnar; CREATE TABLE test_insert_command (a int) USING columnar;
-- test single row inserts fail -- test single row inserts fail
@ -63,10 +63,10 @@ GROUP BY a ORDER BY a;
CREATE TABLE test_long_text_hash AS CREATE TABLE test_long_text_hash AS
SELECT int_val, md5(text_val) AS hash SELECT int_val, md5(text_val) AS hash
FROM test_long_text; FROM test_long_text;
CREATE TABLE test_cstore_long_text(int_val int, text_val text) CREATE TABLE test_columnar_long_text(int_val int, text_val text)
USING columnar; USING columnar;
-- store long text in cstore table -- store long text in columnar table
INSERT INTO test_cstore_long_text SELECT * FROM test_long_text; INSERT INTO test_columnar_long_text SELECT * FROM test_long_text;
SELECT * FROM chunk_group_consistency; SELECT * FROM chunk_group_consistency;
consistent consistent
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -75,10 +75,10 @@ SELECT * FROM chunk_group_consistency;
-- drop source table to remove original text from toast -- drop source table to remove original text from toast
DROP TABLE test_long_text; DROP TABLE test_long_text;
-- check if text data is still available in cstore table -- check if text data is still available in columnar table
-- by comparing previously stored hash. -- by comparing previously stored hash.
SELECT a.int_val SELECT a.int_val
FROM test_long_text_hash a, test_cstore_long_text c FROM test_long_text_hash a, test_columnar_long_text c
WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val); WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val);
int_val int_val
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -95,7 +95,7 @@ WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val);
(10 rows) (10 rows)
DROP TABLE test_long_text_hash; DROP TABLE test_long_text_hash;
DROP TABLE test_cstore_long_text; DROP TABLE test_columnar_long_text;
CREATE TABLE test_logical_replication(i int) USING columnar; CREATE TABLE test_logical_replication(i int) USING columnar;
-- should succeed -- should succeed
INSERT INTO test_logical_replication VALUES (1); INSERT INTO test_logical_replication VALUES (1);

View File

@ -1,5 +1,5 @@
CREATE SCHEMA am_cstore_join; CREATE SCHEMA am_columnar_join;
SET search_path TO am_cstore_join; SET search_path TO am_columnar_join;
CREATE TABLE users (id int, name text) USING columnar; CREATE TABLE users (id int, name text) USING columnar;
INSERT INTO users SELECT a, 'name' || a FROM generate_series(0,30-1) AS a; INSERT INTO users SELECT a, 'name' || a FROM generate_series(0,30-1) AS a;
CREATE TABLE things (id int, user_id int, name text) USING columnar; CREATE TABLE things (id int, user_id int, name text) USING columnar;
@ -34,4 +34,4 @@ WHERE things.id > 299990;
(6 rows) (6 rows)
SET client_min_messages TO warning; SET client_min_messages TO warning;
DROP SCHEMA am_cstore_join CASCADE; DROP SCHEMA am_columnar_join CASCADE;

View File

@ -155,9 +155,6 @@ SELECT * FROM test_tr ORDER BY i;
(1 row) (1 row)
drop table test_tr; drop table test_tr;
--
-- https://github.com/citusdata/cstore2/issues/32
--
create table events( create table events(
user_id bigint, user_id bigint,
event_id bigint, event_id bigint,

View File

@ -1,5 +1,5 @@
-- --
-- Test copying data from cstore_fdw tables. -- Test copying data from columnar tables.
-- --
CREATE TABLE test_contestant(handle TEXT, birthdate DATE, rating INT, CREATE TABLE test_contestant(handle TEXT, birthdate DATE, rating INT,
percentile FLOAT, country CHAR(3), achievements TEXT[]) percentile FLOAT, country CHAR(3), achievements TEXT[])

View File

@ -1,5 +1,5 @@
-- --
-- Test loading and reading different data types to/from cstore_fdw foreign tables. -- Test loading and reading different data types to/from columnar foreign tables.
-- --

View File

@ -1,5 +1,5 @@
-- --
-- Test copying data from cstore_fdw tables. -- Test copying data from columnar tables.
-- --
CREATE TABLE test_contestant(handle TEXT, birthdate DATE, rating INT, CREATE TABLE test_contestant(handle TEXT, birthdate DATE, rating INT,
percentile FLOAT, country CHAR(3), achievements TEXT[]) percentile FLOAT, country CHAR(3), achievements TEXT[])

View File

@ -1,5 +1,5 @@
-- --
-- Test loading and reading different data types to/from cstore_fdw foreign tables. -- Test loading and reading different data types to/from columnar foreign tables.
-- --
-- Settings to make the result deterministic -- Settings to make the result deterministic
SET datestyle = "ISO, YMD"; SET datestyle = "ISO, YMD";

View File

@ -1,5 +1,5 @@
-- --
-- Test the ANALYZE command for cstore_fdw tables. -- Test the ANALYZE command for columnar tables.
-- --
-- ANALYZE uncompressed table -- ANALYZE uncompressed table

View File

@ -1,5 +1,5 @@
-- --
-- Testing insert on cstore_fdw tables. -- Testing insert on columnar tables.
-- --
CREATE TABLE test_insert_command (a int) USING columnar; CREATE TABLE test_insert_command (a int) USING columnar;
@ -39,25 +39,25 @@ CREATE TABLE test_long_text_hash AS
SELECT int_val, md5(text_val) AS hash SELECT int_val, md5(text_val) AS hash
FROM test_long_text; FROM test_long_text;
CREATE TABLE test_cstore_long_text(int_val int, text_val text) CREATE TABLE test_columnar_long_text(int_val int, text_val text)
USING columnar; USING columnar;
-- store long text in cstore table -- store long text in columnar table
INSERT INTO test_cstore_long_text SELECT * FROM test_long_text; INSERT INTO test_columnar_long_text SELECT * FROM test_long_text;
SELECT * FROM chunk_group_consistency; SELECT * FROM chunk_group_consistency;
-- drop source table to remove original text from toast -- drop source table to remove original text from toast
DROP TABLE test_long_text; DROP TABLE test_long_text;
-- check if text data is still available in cstore table -- check if text data is still available in columnar table
-- by comparing previously stored hash. -- by comparing previously stored hash.
SELECT a.int_val SELECT a.int_val
FROM test_long_text_hash a, test_cstore_long_text c FROM test_long_text_hash a, test_columnar_long_text c
WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val); WHERE a.int_val = c.int_val AND a.hash = md5(c.text_val);
DROP TABLE test_long_text_hash; DROP TABLE test_long_text_hash;
DROP TABLE test_cstore_long_text; DROP TABLE test_columnar_long_text;
CREATE TABLE test_logical_replication(i int) USING columnar; CREATE TABLE test_logical_replication(i int) USING columnar;
-- should succeed -- should succeed

View File

@ -1,5 +1,5 @@
CREATE SCHEMA am_cstore_join; CREATE SCHEMA am_columnar_join;
SET search_path TO am_cstore_join; SET search_path TO am_columnar_join;
CREATE TABLE users (id int, name text) USING columnar; CREATE TABLE users (id int, name text) USING columnar;
INSERT INTO users SELECT a, 'name' || a FROM generate_series(0,30-1) AS a; INSERT INTO users SELECT a, 'name' || a FROM generate_series(0,30-1) AS a;
@ -25,4 +25,4 @@ JOIN things ON (users.id = things.user_id)
WHERE things.id > 299990; WHERE things.id > 299990;
SET client_min_messages TO warning; SET client_min_messages TO warning;
DROP SCHEMA am_cstore_join CASCADE; DROP SCHEMA am_columnar_join CASCADE;

View File

@ -125,9 +125,6 @@ SELECT * FROM test_tr ORDER BY i;
drop table test_tr; drop table test_tr;
--
-- https://github.com/citusdata/cstore2/issues/32
--
create table events( create table events(
user_id bigint, user_id bigint,
event_id bigint, event_id bigint,