Merge remote-tracking branch 'origin/master' into feature/drop_shards_on_drop_table

pull/326/head
Marco Slot 2016-02-17 22:52:58 +01:00
commit 75a141a7c6
197 changed files with 2316 additions and 2150 deletions

20
.gitattributes vendored
View File

@ -20,3 +20,23 @@ src/test/regress/output/*.source -whitespace
# These files are maintained or generated elsewhere. We take them as is.
configure -whitespace
# all C files (implementation and header) use our style...
*.[ch] citus-style
# except these exceptions...
src/backend/distributed/utils/citus_outfuncs.c -citus-style
src/backend/distributed/utils/citus_read.c -citus-style
src/backend/distributed/utils/citus_readfuncs_94.c -citus-style
src/backend/distributed/utils/citus_readfuncs_95.c -citus-style
src/backend/distributed/utils/ruleutils_94.c -citus-style
src/backend/distributed/utils/ruleutils_95.c -citus-style
src/include/distributed/citus_nodes.h -citus-style
src/include/dumputils.h -citus-style
# all csql files use PostgreSQL style...
src/bin/csql/*.[ch] -citus-style
# except these exceptions
src/bin/csql/copy_options.c citus-style
src/bin/csql/stage.[ch] citus-style

View File

@ -1,11 +1,11 @@
# CitusDB toplevel Makefile
# Citus toplevel Makefile
citusdb_subdir = .
citusdb_top_builddir = .
citus_subdir = .
citus_top_builddir = .
# Hint that configure should be run first
ifeq (,$(wildcard Makefile.global))
$(error ./configure needs to be run before compiling CitusDB)
$(error ./configure needs to be run before compiling Citus)
endif
include Makefile.global
@ -20,9 +20,9 @@ install-extension:
install-headers:
$(MKDIR_P) '$(includedir_server)/distributed/'
# generated headers are located in the build directory
$(INSTALL_DATA) src/include/citusdb_config.h '$(includedir_server)/'
$(INSTALL_DATA) src/include/citus_config.h '$(includedir_server)/'
# the rest in the source tree
$(INSTALL_DATA) $(citusdb_abs_srcdir)/src/include/distributed/*.h '$(includedir_server)/distributed/'
$(INSTALL_DATA) $(citus_abs_srcdir)/src/include/distributed/*.h '$(includedir_server)/distributed/'
clean-extension:
$(MAKE) -C src/backend/distributed/ clean
.PHONY: extension install-extension clean-extension
@ -42,6 +42,13 @@ clean-csql:
install: install-csql
clean: clean-csql
# apply or check style
reindent:
cd ${citus_abs_top_srcdir} && citus_indent --quiet
check-style:
cd ${citus_abs_top_srcdir} && citus_indent --quiet --check
.PHONY: reindent check-style
# depend on install for now
check: all install
$(MAKE) -C src/test/regress check-full

View File

@ -9,40 +9,40 @@
# makefiles, particulary central handling of compilation flags and
# rules.
citusdb_abs_srcdir:=@abs_top_srcdir@/${citusdb_subdir}
citusdb_abs_top_srcdir:=@abs_top_srcdir@
citus_abs_srcdir:=@abs_top_srcdir@/${citus_subdir}
citus_abs_top_srcdir:=@abs_top_srcdir@
PG_CONFIG:=@PG_CONFIG@
PGXS:=$(shell $(PG_CONFIG) --pgxs)
# Support for VPATH builds (i.e. builds from outside the source tree)
vpath_build=@vpath_build@
ifeq ($(vpath_build),yes)
VPATH:=$(citusdb_abs_srcdir)
VPATH:=$(citus_abs_srcdir)
USE_VPATH:=$(VPATH)
endif
# CitusDB is built using PostgreSQL's pgxs
# Citus is built using PostgreSQL's pgxs
USE_PGXS=1
include $(PGXS)
# Remake Makefile.global from Makefile.global.in if the latter
# changed. In order to trigger this rule, the including file must
# write `include $(citusdb_top_builddir)/Makefile.global', not some
# write `include $(citus_top_builddir)/Makefile.global', not some
# shortcut thereof. This makes it less likely to accidentally run
# with some outdated Makefile.global.
# Make internally restarts whenever included Makefiles are
# regenerated.
$(citusdb_top_builddir)/Makefile.global: $(citusdb_top_builddir)/Makefile.global.in @top_srcdir@/configure $(citusdb_top_builddir)/config.status
$(citus_top_builddir)/Makefile.global: $(citus_top_builddir)/Makefile.global.in @top_srcdir@/configure $(citus_top_builddir)/config.status
cd @abs_top_builddir@ && ./config.status Makefile.global
# Ensure configuration is generated by the most recent configure,
# useful for longer existing build directories.
$(citusdb_top_builddir)/config.status: @top_srcdir@/configure
$(citus_top_builddir)/config.status: @top_srcdir@/configure
cd @abs_top_builddir@ && ./config.status --recheck
# Regenerate configure if configure.in changed
@top_srcdir@/configure: $(citusdb_abs_srcdir)/configure.in
cd ${citusdb_abs_srcdir} && ./autogen.sh
@top_srcdir@/configure: $(citus_abs_srcdir)/configure.in
cd ${citus_abs_srcdir} && ./autogen.sh
# If specified via configure, replace the default compiler. Normally
# we'll build with the one postgres was built with. But it's useful to
@ -54,8 +54,8 @@ endif
# Add options passed to configure or computed therein, to CFLAGS/CPPFLAGS/...
override CFLAGS += @CFLAGS@ @CITUS_CFLAGS@
override CPPFLAGS := @CPPFLAGS@ -I '${citusdb_abs_top_srcdir}/src/include' $(CPPFLAGS)
override CPPFLAGS := @CPPFLAGS@ -I '${citus_abs_top_srcdir}/src/include' $(CPPFLAGS)
override LDFLAGS += @LDFLAGS@
# optional file with user defined, additional, rules
-include ${citusdb_abs_srcdir}/src/Makefile.custom
-include ${citus_abs_srcdir}/src/Makefile.custom

View File

@ -1,7 +1,7 @@
#!/bin/bash
#
# autogen.sh converts configure.in to configure and creates
# citusdb_config.h.in. The resuting resulting files are checked into
# citus_config.h.in. The resuting resulting files are checked into
# the SCM, to avoid everyone needing autoconf installed.
autoreconf -f

28
configure vendored
View File

@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for CitusDB 5.0.
# Generated by GNU Autoconf 2.69 for Citus 5.0.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@ -577,10 +577,10 @@ MFLAGS=
MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='CitusDB'
PACKAGE_TARNAME='citusdb'
PACKAGE_NAME='Citus'
PACKAGE_TARNAME='citus'
PACKAGE_VERSION='5.0'
PACKAGE_STRING='CitusDB 5.0'
PACKAGE_STRING='Citus 5.0'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@ -1190,7 +1190,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures CitusDB 5.0 to adapt to many kinds of systems.
\`configure' configures Citus 5.0 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1238,7 +1238,7 @@ Fine tuning of the installation directories:
--infodir=DIR info documentation [DATAROOTDIR/info]
--localedir=DIR locale-dependent data [DATAROOTDIR/locale]
--mandir=DIR man documentation [DATAROOTDIR/man]
--docdir=DIR documentation root [DATAROOTDIR/doc/citusdb]
--docdir=DIR documentation root [DATAROOTDIR/doc/citus]
--htmldir=DIR html documentation [DOCDIR]
--dvidir=DIR dvi documentation [DOCDIR]
--pdfdir=DIR pdf documentation [DOCDIR]
@ -1251,7 +1251,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of CitusDB 5.0:";;
short | recursive ) echo "Configuration of Citus 5.0:";;
esac
cat <<\_ACEOF
@ -1333,7 +1333,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
CitusDB configure 5.0
Citus configure 5.0
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@ -1390,7 +1390,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by CitusDB $as_me 5.0, which was
It was created by Citus $as_me 5.0, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@ -1871,7 +1871,7 @@ if test -z "$version_num"; then
fi
if test "$version_num" != '9.4' -a "$version_num" != '9.5'; then
as_fn_error $? "CitusDB is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
$as_echo "$as_me: building against PostgreSQL $version_num" >&6;}
@ -2893,7 +2893,7 @@ CITUS_CFLAGS="$CITUS_CFLAGS"
ac_config_files="$ac_config_files Makefile.global"
ac_config_headers="$ac_config_headers src/include/citusdb_config.h"
ac_config_headers="$ac_config_headers src/include/citus_config.h"
cat >confcache <<\_ACEOF
@ -3402,7 +3402,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by CitusDB $as_me 5.0, which was
This file was extended by Citus $as_me 5.0, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@ -3464,7 +3464,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
CitusDB config.status 5.0
Citus config.status 5.0
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
@ -3586,7 +3586,7 @@ for ac_config_target in $ac_config_targets
do
case $ac_config_target in
"Makefile.global") CONFIG_FILES="$CONFIG_FILES Makefile.global" ;;
"src/include/citusdb_config.h") CONFIG_HEADERS="$CONFIG_HEADERS src/include/citusdb_config.h" ;;
"src/include/citus_config.h") CONFIG_HEADERS="$CONFIG_HEADERS src/include/citus_config.h" ;;
*) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac

View File

@ -1,11 +1,11 @@
# CitusDB autoconf input script.
# Citus autoconf input script.
#
# Converted into an actual configure script by autogen.sh. This
# conversion only has to be done when configure.in changes. To avoid
# everyone needing autoconf installed, the resulting files are checked
# into the SCM.
AC_INIT([CitusDB], [5.0], [], [citusdb], [])
AC_INIT([Citus], [5.0], [], [citus], [])
AC_COPYRIGHT([Copyright (c) Copyright (c) 2012-2015, Citus Data, Inc.])
AC_PROG_SED
@ -32,7 +32,7 @@ if test -z "$version_num"; then
fi
if test "$version_num" != '9.4' -a "$version_num" != '9.5'; then
AC_MSG_ERROR([CitusDB is not compatible with the detected PostgreSQL version ${version_num}.])
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
else
AC_MSG_NOTICE([building against PostgreSQL $version_num])
fi;
@ -96,11 +96,11 @@ CITUSAC_PROG_CC_CFLAGS_OPT([-Wmissing-prototypes])
AC_SUBST(CITUS_CFLAGS, "$CITUS_CFLAGS")
AC_CONFIG_FILES([Makefile.global])
AC_CONFIG_HEADERS([src/include/citusdb_config.h])
AC_CONFIG_HEADERS([src/include/citus_config.h])
AH_TOP([
/*
* citusdb_config.h.in is generated by autoconf/autoheader and
* converted into citusdb_config.h by configure. Include when code needs to
* citus_config.h.in is generated by autoconf/autoheader and
* converted into citus_config.h by configure. Include when code needs to
* depend on determinations made by configure.
*
* Do not manually edit!

View File

@ -1,8 +1,8 @@
#! /bin/sh
#
# CitusDB copy of PostgreSQL's config/prep_buildtree
# Citus copy of PostgreSQL's config/prep_buildtree
#
# This script prepares a CitusDB build tree for an out-of-tree/VPATH
# This script prepares a Citus build tree for an out-of-tree/VPATH
# build. It is intended to be run by the configure script.
me=`basename $0`

View File

@ -10,4 +10,4 @@
/tmp_check*
# ignore latest install file
citusdb--5.0.sql
citus--5.0.sql

View File

@ -1,13 +1,13 @@
# Makefile for the CitusDB extension
# Makefile for the Citus extension
citusdb_subdir = src/backend/distributed
citusdb_top_builddir = ../../..
citus_subdir = src/backend/distributed
citus_top_builddir = ../../..
MODULE_big = citusdb
EXTENSION = citusdb
MODULE_big = citus
EXTENSION = citus
EXTVERSION = 5.0
DATA_built = $(EXTENSION)--$(EXTVERSION).sql
SCRIPTS = $(wildcard $(citusdb_top_builddir)/src/bin/scripts/*)
SCRIPTS = $(wildcard $(citus_top_builddir)/src/bin/scripts/*)
# directories with source files
SUBDIRS = . commands executor master planner relay test utils worker
@ -15,7 +15,7 @@ SUBDIRS = . commands executor master planner relay test utils worker
# That patsubst rule searches all directories listed in SUBDIRS for .c
# files, and adds the corresponding .o files to OBJS
OBJS += \
$(patsubst $(citusdb_abs_srcdir)/%.c,%.o,$(foreach dir,$(SUBDIRS), $(wildcard $(citusdb_abs_srcdir)/$(dir)/*.c)))
$(patsubst $(citus_abs_srcdir)/%.c,%.o,$(foreach dir,$(SUBDIRS), $(wildcard $(citus_abs_srcdir)/$(dir)/*.c)))
# define build process for latest install file
$(EXTENSION)--$(EXTVERSION).sql: $(EXTENSION).sql
@ -28,6 +28,6 @@ NO_PGXS = 1
SHLIB_LINK = $(libpq)
include $(citusdb_top_builddir)/Makefile.global
include $(citus_top_builddir)/Makefile.global
override CPPFLAGS += -I$(libpq_srcdir)

View File

@ -0,0 +1,6 @@
# Citus extension
comment = 'Citus distributed database'
default_version = '5.0'
module_pathname = '$libdir/citus'
relocatable = false
schema = pg_catalog

View File

@ -1,24 +1,24 @@
/* citusdb.sql */
/* citus.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION citusdb" to load this file. \quit
\echo Use "CREATE EXTENSION citus" to load this file. \quit
CREATE SCHEMA citusdb;
CREATE SCHEMA citus;
-- Ensure CREATE EXTENSION is not run against an old citusdb data
-- Ensure CREATE EXTENSION is not run against an old citus data
-- directory, we're not compatible (due to the builtin functions/tables)
DO $$
BEGIN
IF EXISTS(SELECT * FROM pg_proc WHERE proname = 'worker_apply_shard_ddl_command') THEN
RAISE 'cannot install citusdb extension in CitusDB 4 data directory';
RAISE 'cannot install citus extension in Citus 4 data directory';
END IF;
END;
$$;
/*****************************************************************************
* CitusDB data types
* Citus data types
*****************************************************************************/
CREATE TYPE citusdb.distribution_type AS ENUM (
CREATE TYPE citus.distribution_type AS ENUM (
'hash',
'range',
'append'
@ -26,18 +26,18 @@ CREATE TYPE citusdb.distribution_type AS ENUM (
/*****************************************************************************
* CitusDB tables & corresponding indexes
* Citus tables & corresponding indexes
*****************************************************************************/
CREATE TABLE citusdb.pg_dist_partition(
CREATE TABLE citus.pg_dist_partition(
logicalrelid Oid NOT NULL,
partmethod "char" NOT NULL,
partkey text NOT NULL
);
CREATE UNIQUE INDEX pg_dist_partition_logical_relid_index
ON citusdb.pg_dist_partition using btree(logicalrelid);
ALTER TABLE citusdb.pg_dist_partition SET SCHEMA pg_catalog;
ON citus.pg_dist_partition using btree(logicalrelid);
ALTER TABLE citus.pg_dist_partition SET SCHEMA pg_catalog;
CREATE TABLE citusdb.pg_dist_shard(
CREATE TABLE citus.pg_dist_shard(
logicalrelid oid NOT NULL,
shardid int8 NOT NULL,
shardstorage "char" NOT NULL,
@ -46,12 +46,12 @@ CREATE TABLE citusdb.pg_dist_shard(
shardmaxvalue text
);
CREATE UNIQUE INDEX pg_dist_shard_shardid_index
ON citusdb.pg_dist_shard using btree(shardid);
ON citus.pg_dist_shard using btree(shardid);
CREATE INDEX pg_dist_shard_logical_relid_index
ON citusdb.pg_dist_shard using btree(logicalrelid);
ALTER TABLE citusdb.pg_dist_shard SET SCHEMA pg_catalog;
ON citus.pg_dist_shard using btree(logicalrelid);
ALTER TABLE citus.pg_dist_shard SET SCHEMA pg_catalog;
CREATE TABLE citusdb.pg_dist_shard_placement(
CREATE TABLE citus.pg_dist_shard_placement(
shardid int8 NOT NULL,
shardstate int4 NOT NULL,
shardlength int8 NOT NULL,
@ -59,40 +59,40 @@ CREATE TABLE citusdb.pg_dist_shard_placement(
nodeport int8 NOT NULL
) WITH oids;
CREATE UNIQUE INDEX pg_dist_shard_placement_oid_index
ON citusdb.pg_dist_shard_placement using btree(oid);
ON citus.pg_dist_shard_placement using btree(oid);
CREATE INDEX pg_dist_shard_placement_shardid_index
ON citusdb.pg_dist_shard_placement using btree(shardid);
ON citus.pg_dist_shard_placement using btree(shardid);
CREATE INDEX pg_dist_shard_placement_nodeid_index
ON citusdb.pg_dist_shard_placement using btree(nodename, nodeport);
ALTER TABLE citusdb.pg_dist_shard_placement SET SCHEMA pg_catalog;
ON citus.pg_dist_shard_placement using btree(nodename, nodeport);
ALTER TABLE citus.pg_dist_shard_placement SET SCHEMA pg_catalog;
/*****************************************************************************
* CitusDB sequences
* Citus sequences
*****************************************************************************/
/*
* Unternal sequence to generate 64-bit shard ids. These identifiers are then
* used to identify shards in the distributed database.
*/
CREATE SEQUENCE citusdb.pg_dist_shardid_seq
CREATE SEQUENCE citus.pg_dist_shardid_seq
MINVALUE 102008
NO CYCLE;
ALTER SEQUENCE citusdb.pg_dist_shardid_seq SET SCHEMA pg_catalog;
ALTER SEQUENCE citus.pg_dist_shardid_seq SET SCHEMA pg_catalog;
/*
* internal sequence to generate 32-bit jobIds. These identifiers are then
* used to identify jobs in the distributed database; and they wrap at 32-bits
* to allow for slave nodes to independently execute their distributed jobs.
*/
CREATE SEQUENCE citusdb.pg_dist_jobid_seq
CREATE SEQUENCE citus.pg_dist_jobid_seq
MINVALUE 2 /* first jobId reserved for clean up jobs */
MAXVALUE 4294967296;
ALTER SEQUENCE citusdb.pg_dist_jobid_seq SET SCHEMA pg_catalog;
ALTER SEQUENCE citus.pg_dist_jobid_seq SET SCHEMA pg_catalog;
/*****************************************************************************
* CitusDB functions
* Citus functions
*****************************************************************************/
/* For backward compatibility and ease of use create functions et al. in pg_catalog */
@ -182,13 +182,13 @@ COMMENT ON FUNCTION master_get_round_robin_candidate_nodes(shard_id bigint)
CREATE FUNCTION master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citusdb.distribution_type)
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$master_create_distributed_table$$;
COMMENT ON FUNCTION master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citusdb.distribution_type)
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- define shard creation function for hash-partitioned tables
@ -323,7 +323,7 @@ COMMENT ON FUNCTION worker_append_table_to_shard(text, text, text, integer)
/* trigger functions */
CREATE OR REPLACE FUNCTION citusdb_drop_trigger()
CREATE OR REPLACE FUNCTION citus_drop_trigger()
RETURNS event_trigger
LANGUAGE plpgsql
SET search_path = pg_catalog
@ -349,7 +349,7 @@ BEGIN
END LOOP;
END;
$cdbdt$;
COMMENT ON FUNCTION citusdb_drop_trigger()
COMMENT ON FUNCTION citus_drop_trigger()
IS 'perform checks and actions at the end of DROP actions';
CREATE FUNCTION master_dist_partition_cache_invalidate()
@ -369,21 +369,21 @@ COMMENT ON FUNCTION master_dist_shard_cache_invalidate()
/* internal functions, not user accessible */
CREATE FUNCTION citusdb_extradata_container(INTERNAL)
CREATE FUNCTION citus_extradata_container(INTERNAL)
RETURNS void
LANGUAGE C
AS 'MODULE_PATHNAME', $$citusdb_extradata_container$$;
COMMENT ON FUNCTION pg_catalog.citusdb_extradata_container(INTERNAL)
AS 'MODULE_PATHNAME', $$citus_extradata_container$$;
COMMENT ON FUNCTION pg_catalog.citus_extradata_container(INTERNAL)
IS 'placeholder function to store additional data in postgres node trees';
/*****************************************************************************
* CitusDB triggers
* Citus triggers
*****************************************************************************/
CREATE EVENT TRIGGER citusdb_cascade_to_partition
CREATE EVENT TRIGGER citus_cascade_to_partition
ON SQL_DROP
EXECUTE PROCEDURE citusdb_drop_trigger();
EXECUTE PROCEDURE citus_drop_trigger();
CREATE TRIGGER dist_partition_cache_invalidate
AFTER INSERT OR UPDATE OR DELETE
@ -397,7 +397,7 @@ CREATE TRIGGER dist_shard_cache_invalidate
/*****************************************************************************
* CitusDB aggregates
* Citus aggregates
*****************************************************************************/
CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray);
COMMENT ON AGGREGATE array_cat_agg(anyarray)

View File

@ -1,6 +0,0 @@
# CitusDB extension
comment = 'CitusDB distributed database'
default_version = '5.0'
module_pathname = '$libdir/citusdb'
relocatable = false
schema = pg_catalog

View File

@ -165,7 +165,7 @@ master_create_distributed_table(PG_FUNCTION_ARGS)
*
* Similarly, do not allow UNIQUE constraint and/or PRIMARY KEY if it does not
* include partition column. This check is important for two reasons. First,
* currently CitusDB does not enforce uniqueness constraint on multiple shards.
* currently Citus does not enforce uniqueness constraint on multiple shards.
* Second, INSERT INTO .. ON CONFLICT (i.e., UPSERT) queries can be executed with no
* further check for constraints.
*/
@ -191,7 +191,7 @@ master_create_distributed_table(PG_FUNCTION_ARGS)
}
/*
* CitusDB cannot enforce uniqueness constraints with overlapping shards. Thus,
* Citus cannot enforce uniqueness constraints with overlapping shards. Thus,
* emit a warning for unique indexes on append partitioned tables.
*/
if (distributionMethod == DISTRIBUTE_BY_APPEND)
@ -262,7 +262,7 @@ master_create_distributed_table(PG_FUNCTION_ARGS)
* necessary for a distributed relation in addition to the preexisting ones
* for a normal relation.
*
* We create one dependency from the (now distributed) relation to the citusdb
* We create one dependency from the (now distributed) relation to the citus
* extension to prevent the extension from being dropped while distributed
* tables exist. Furthermore a dependency from pg_dist_partition's
* distribution clause to the underlying columns is created, but it's marked
@ -281,7 +281,7 @@ RecordDistributedRelationDependencies(Oid distributedRelationId, Node *distribut
relationAddr.objectSubId = 0;
citusExtensionAddr.classId = ExtensionRelationId;
citusExtensionAddr.objectId = get_extension_oid("citusdb", false);
citusExtensionAddr.objectId = get_extension_oid("citus", false);
citusExtensionAddr.objectSubId = 0;
/* dependency from table entry to extension */
@ -294,10 +294,10 @@ RecordDistributedRelationDependencies(Oid distributedRelationId, Node *distribut
/*
* LookupDistributionMethod maps the oids of citusdb.distribution_type enum
* LookupDistributionMethod maps the oids of citus.distribution_type enum
* values to pg_dist_partition.partmethod values.
*
* The passed in oid has to belong to a value of citusdb.distribution_type.
* The passed in oid has to belong to a value of citus.distribution_type.
*/
static char
LookupDistributionMethod(Oid distributionMethodOid)

View File

@ -271,17 +271,27 @@ ReceiveCopyData(StringInfo copyData)
switch (messageType)
{
case 'd': /* CopyData */
{
copyDone = false;
break;
}
case 'c': /* CopyDone */
{
copyDone = true;
break;
}
case 'f': /* CopyFail */
{
ereport(ERROR, (errcode(ERRCODE_QUERY_CANCELED),
errmsg("COPY data failed: %s", pq_getmsgstring(copyData))));
break;
}
case 'H': /* Flush */
case 'S': /* Sync */
{
/*
* Ignore Flush/Sync for the convenience of client libraries (such
* as libpq) that may send those without noticing that the command
@ -289,12 +299,16 @@ ReceiveCopyData(StringInfo copyData)
*/
copyDone = false;
break;
}
default:
{
ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("unexpected message type 0x%02X during COPY data",
messageType)));
break;
}
}
return copyDone;
}

View File

@ -706,7 +706,7 @@ ClientConnectionReady(PGconn *connection, PostgresPollingStatusType pollingStatu
fd_set readFileDescriptorSet;
fd_set writeFileDescriptorSet;
fd_set exceptionFileDescriptorSet;
struct timeval immediateTimeout = {0, 0};
struct timeval immediateTimeout = { 0, 0 };
int connectionFileDescriptor = PQsocket(connection);
FD_ZERO(&readFileDescriptorSet);

View File

@ -157,7 +157,6 @@ multi_ExecutorStart(QueryDesc *queryDesc, int eflags)
queryDesc->plannedstmt = masterSelectPlan;
eflags |= EXEC_FLAG_CITUS_MASTER_SELECT;
}
}
/* if the execution is not done for router executor, drop into standard executor */
@ -253,7 +252,7 @@ multi_ExecutorEnd(QueryDesc *queryDesc)
RangeTblEntry *rangeTableEntry = linitial(planStatement->rtable);
Oid masterTableRelid = rangeTableEntry->relid;
ObjectAddress masterTableObject = {InvalidOid, InvalidOid, 0};
ObjectAddress masterTableObject = { InvalidOid, InvalidOid, 0 };
masterTableObject.classId = RelationRelationId;
masterTableObject.objectId = masterTableRelid;

View File

@ -89,7 +89,7 @@ MultiRealTimeExecute(Job *job)
}
/* loop around until all tasks complete, one task fails, or user cancels */
while ( !(allTasksCompleted || taskFailed || QueryCancelPending) )
while (!(allTasksCompleted || taskFailed || QueryCancelPending))
{
uint32 taskCount = list_length(taskList);
uint32 completedTaskCount = 0;
@ -287,8 +287,9 @@ ManageTaskExecution(Task *task, TaskExecution *taskExecution)
uint32 currentCount = taskExecution->connectPollCount;
if (currentCount >= maxCount)
{
ereport(WARNING, (errmsg("could not establish asynchronous connection "
"after %u ms", REMOTE_NODE_CONNECT_TIMEOUT)));
ereport(WARNING, (errmsg("could not establish asynchronous "
"connection after %u ms",
REMOTE_NODE_CONNECT_TIMEOUT)));
taskStatusArray[currentIndex] = EXEC_TASK_FAILED;
}
@ -342,7 +343,8 @@ ManageTaskExecution(Task *task, TaskExecution *taskExecution)
{
List *dataFetchTaskList = task->dependedTaskList;
int32 dataFetchTaskIndex = taskExecution->dataFetchTaskIndex;
Task *dataFetchTask = (Task *) list_nth(dataFetchTaskList, dataFetchTaskIndex);
Task *dataFetchTask = (Task *) list_nth(dataFetchTaskList,
dataFetchTaskIndex);
char *dataFetchQuery = dataFetchTask->queryString;
int32 connectionId = connectionIdArray[currentIndex];
@ -411,11 +413,13 @@ ManageTaskExecution(Task *task, TaskExecution *taskExecution)
StringInfo computeTaskQuery = makeStringInfo();
if (BinaryMasterCopyFormat)
{
appendStringInfo(computeTaskQuery, COPY_QUERY_TO_STDOUT_BINARY, queryString);
appendStringInfo(computeTaskQuery, COPY_QUERY_TO_STDOUT_BINARY,
queryString);
}
else
{
appendStringInfo(computeTaskQuery, COPY_QUERY_TO_STDOUT_TEXT, queryString);
appendStringInfo(computeTaskQuery, COPY_QUERY_TO_STDOUT_TEXT,
queryString);
}
querySent = MultiClientSendQuery(connectionId, computeTaskQuery->data);
@ -475,7 +479,8 @@ ManageTaskExecution(Task *task, TaskExecution *taskExecution)
else
{
ereport(WARNING, (errcode_for_file_access(),
errmsg("could not open file \"%s\": %m", filename)));
errmsg("could not open file \"%s\": %m",
filename)));
taskStatusArray[currentIndex] = EXEC_TASK_FAILED;
}

View File

@ -80,6 +80,7 @@ RouterExecutorStart(QueryDesc *queryDesc, int eflags, Task *task)
queryDesc->estate = executorState;
#if (PG_VERSION_NUM < 90500)
/* make sure that upsertQuery is false for versions that UPSERT is not available */
Assert(task->upsertQuery == false);
#endif
@ -153,7 +154,7 @@ CommutativityRuleToLockMode(CmdType commandType, bool upsertQuery)
static void
AcquireExecutorShardLock(Task *task, LOCKMODE lockMode)
{
int64 shardId = task->shardId;
int64 shardId = task->anchorShardId;
LockShardResource(shardId, lockMode);
}
@ -219,9 +220,9 @@ RouterExecutorRun(QueryDesc *queryDesc, ScanDirection direction, long count, Tas
}
MemoryContextSwitchTo(oldcontext);
}
/*
* ExecuteDistributedModify is the main entry point for modifying distributed
* tables. A distributed modification is successful if any placement of the
@ -250,7 +251,7 @@ ExecuteDistributedModify(Task *task)
Assert(taskPlacement->shardState == FILE_FINALIZED);
connection = GetConnection(nodeName, nodePort);
connection = GetOrEstablishConnection(nodeName, nodePort);
if (connection == NULL)
{
failedPlacementList = lappend(failedPlacementList, taskPlacement);
@ -383,7 +384,7 @@ ExecuteTaskAndStoreResults(Task *task, TupleDesc tupleDescriptor,
bool queryOK = false;
bool storedOK = false;
PGconn *connection = GetConnection(nodeName, nodePort);
PGconn *connection = GetOrEstablishConnection(nodeName, nodePort);
if (connection == NULL)
{
continue;
@ -532,9 +533,10 @@ StoreQueryResult(PGconn *connection, TupleDesc tupleDescriptor,
return true;
}
/*
* RouterExecutorFinish cleans up after a distributed execution.
*/
* RouterExecutorFinish cleans up after a distributed execution.
*/
void
RouterExecutorFinish(QueryDesc *queryDesc)
{

View File

@ -73,7 +73,7 @@ JobExecutorType(MultiPlan *multiPlan)
ereport(WARNING, (errmsg("this query uses more connections than the "
"configured max_connections limit"),
errhint("Consider increasing max_connections or setting "
"citusdb.task_executor_type to "
"citus.task_executor_type to "
"\"task-tracker\".")));
}
@ -88,7 +88,7 @@ JobExecutorType(MultiPlan *multiPlan)
ereport(WARNING, (errmsg("this query uses more file descriptors than the "
"configured max_files_per_process limit"),
errhint("Consider increasing max_files_per_process or "
"setting citusdb.task_executor_type to "
"setting citus.task_executor_type to "
"\"task-tracker\".")));
}
@ -96,7 +96,7 @@ JobExecutorType(MultiPlan *multiPlan)
if (dependedJobCount > 0)
{
ereport(ERROR, (errmsg("cannot use real time executor with repartition jobs"),
errhint("Set citusdb.task_executor_type to "
errhint("Set citus.task_executor_type to "
"\"task-tracker\".")));
}
}
@ -119,7 +119,7 @@ JobExecutorType(MultiPlan *multiPlan)
if (dependedJobCount > 0)
{
ereport(ERROR, (errmsg("cannot use router executor with repartition jobs"),
errhint("Set citusdb.task_executor_type to "
errhint("Set citus.task_executor_type to "
"\"task-tracker\".")));
}
@ -128,7 +128,7 @@ JobExecutorType(MultiPlan *multiPlan)
{
ereport(ERROR, (errmsg("cannot use router executor with queries that "
"hit multiple shards"),
errhint("Set citusdb.task_executor_type to \"real-time\" or "
errhint("Set citus.task_executor_type to \"real-time\" or "
"\"task-tracker\".")));
}
@ -138,7 +138,7 @@ JobExecutorType(MultiPlan *multiPlan)
if (list_length(workerDependentTaskList) > 0)
{
ereport(ERROR, (errmsg("cannot use router executor with JOINs"),
errhint("Set citusdb.task_executor_type to \"real-time\" or "
errhint("Set citus.task_executor_type to \"real-time\" or "
"\"task-tracker\".")));
}
@ -146,7 +146,7 @@ JobExecutorType(MultiPlan *multiPlan)
if (masterQuery != NULL && list_length(masterQuery->sortClause) > 0)
{
ereport(ERROR, (errmsg("cannot use router executor with ORDER BY clauses"),
errhint("Set citusdb.task_executor_type to \"real-time\" or "
errhint("Set citus.task_executor_type to \"real-time\" or "
"\"task-tracker\".")));
}
@ -158,7 +158,7 @@ JobExecutorType(MultiPlan *multiPlan)
if (masterQueryHasAggregates)
{
ereport(ERROR, (errmsg("cannot use router executor with aggregates"),
errhint("Set citusdb.task_executor_type to \"real-time\" or "
errhint("Set citus.task_executor_type to \"real-time\" or "
"\"task-tracker\".")));
}
}
@ -173,7 +173,7 @@ JobExecutorType(MultiPlan *multiPlan)
* Every task requires 2 FDs, one file and one connection. Some FDs are taken by
* the VFD pool and there is currently no way to reclaim these before opening a
* connection. We therefore assume some FDs to be reserved for VFDs, based on
* observing a typical size of the pool on a CitusDB master.
* observing a typical size of the pool on a Citus master.
*/
int
MaxMasterConnectionCount(void)

View File

@ -41,7 +41,6 @@ typedef struct TaskMapKey
TaskType taskType;
uint64 jobId;
uint32 taskId;
} TaskMapKey;
@ -53,7 +52,6 @@ typedef struct TaskMapEntry
{
TaskMapKey key;
Task *task;
} TaskMapEntry;
@ -83,7 +81,8 @@ static TaskTracker * TrackerHashLookup(HTAB *trackerHash, const char *nodeName,
static TaskExecStatus ManageTaskExecution(TaskTracker *taskTracker,
TaskTracker *sourceTaskTracker,
Task *task, TaskExecution *taskExecution);
static TransmitExecStatus ManageTransmitExecution(TaskTracker *transmitTracker, Task *task,
static TransmitExecStatus ManageTransmitExecution(TaskTracker *transmitTracker,
Task *task,
TaskExecution *taskExecution);
static bool TaskExecutionsCompleted(List *taskList);
static StringInfo MapFetchTaskQueryString(Task *mapFetchTask, Task *mapTask);
@ -194,8 +193,8 @@ MultiTaskTrackerExecute(Job *job)
TrackerHashConnect(transmitTrackerHash);
/* loop around until all tasks complete, one task fails, or user cancels */
while ( !(allTasksCompleted || taskFailed || taskTransmitFailed ||
clusterFailed || QueryCancelPending) )
while (!(allTasksCompleted || taskFailed || taskTransmitFailed ||
clusterFailed || QueryCancelPending))
{
TaskTracker *taskTracker = NULL;
TaskTracker *transmitTracker = NULL;
@ -826,7 +825,8 @@ TrackerConnectPoll(TaskTracker *taskTracker)
uint32 nodePort = taskTracker->workerPort;
char *nodeDatabase = get_database_name(MyDatabaseId);
int32 connectionId = MultiClientConnectStart(nodeName, nodePort, nodeDatabase);
int32 connectionId = MultiClientConnectStart(nodeName, nodePort,
nodeDatabase);
if (connectionId != INVALID_CONNECTION_ID)
{
taskTracker->connectionId = connectionId;
@ -869,8 +869,9 @@ TrackerConnectPoll(TaskTracker *taskTracker)
uint32 currentCount = taskTracker->connectPollCount;
if (currentCount >= maxCount)
{
ereport(WARNING, (errmsg("could not establish asynchronous connection "
"after %u ms", REMOTE_NODE_CONNECT_TIMEOUT)));
ereport(WARNING, (errmsg("could not establish asynchronous "
"connection after %u ms",
REMOTE_NODE_CONNECT_TIMEOUT)));
taskTracker->trackerStatus = TRACKER_CONNECTION_FAILED;
@ -1212,7 +1213,8 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker,
default:
{
/* we fatal here to avoid leaking client-side resources */
ereport(FATAL, (errmsg("invalid execution status: %d", currentExecutionStatus)));
ereport(FATAL, (errmsg("invalid execution status: %d",
currentExecutionStatus)));
break;
}
}
@ -1327,7 +1329,8 @@ ManageTransmitExecution(TaskTracker *transmitTracker,
else
{
ereport(WARNING, (errcode_for_file_access(),
errmsg("could not open file \"%s\": %m", filename)));
errmsg("could not open file \"%s\": %m",
filename)));
nextTransmitStatus = EXEC_TRANSMIT_TRACKER_RETRY;
}
@ -1463,7 +1466,8 @@ ManageTransmitExecution(TaskTracker *transmitTracker,
default:
{
/* we fatal here to avoid leaking client-side resources */
ereport(FATAL, (errmsg("invalid transmit status: %d", currentTransmitStatus)));
ereport(FATAL, (errmsg("invalid transmit status: %d",
currentTransmitStatus)));
break;
}
}
@ -2317,7 +2321,7 @@ AssignQueuedTasks(TaskTracker *taskTracker)
{
StringInfo taskAssignmentQuery = taskState->taskAssignmentQuery;
if(taskAssignmentCount > 0)
if (taskAssignmentCount > 0)
{
appendStringInfo(multiAssignQuery, ";");
}
@ -2336,7 +2340,7 @@ AssignQueuedTasks(TaskTracker *taskTracker)
taskState = (TrackerTaskState *) hash_seq_search(&status);
}
if(taskAssignmentCount > 0)
if (taskAssignmentCount > 0)
{
void *queryResult = NULL;
int rowCount = 0;
@ -2833,7 +2837,8 @@ TrackerHashCleanupJob(HTAB *taskTrackerHash, Task *jobCleanupTask)
if (queryStatus == CLIENT_QUERY_DONE)
{
ereport(DEBUG4, (errmsg("completed cleanup query for job " UINT64_FORMAT
" on node \"%s:%u\"", jobId, nodeName, nodePort)));
" on node \"%s:%u\"", jobId, nodeName,
nodePort)));
/* clear connection for future cleanup queries */
taskTracker->connectionBusy = false;

View File

@ -1,6 +1,6 @@
/*-------------------------------------------------------------------------
* multi_utility.c
* CitusDB utility hook and related functionality.
* Citus utility hook and related functionality.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*-------------------------------------------------------------------------
@ -76,7 +76,7 @@ static void RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid ol
/*
* Utility for handling citusdb specific concerns around utility statements.
* Utility for handling citus specific concerns around utility statements.
*
* There's two basic types of concerns here:
* 1) Intercept utility statements that run after distributed query
@ -168,29 +168,29 @@ multi_ProcessUtility(Node *parsetree,
/*
* Inform the user about potential caveats.
*
* To prevent failures in aborted transactions, CitusDBHasBeenLoaded() needs
* To prevent failures in aborted transactions, CitusHasBeenLoaded() needs
* to be the second condition. See RelationIdGetRelation() which is called
* by CitusDBHasBeenLoaded().
* by CitusHasBeenLoaded().
*/
if (IsA(parsetree, CreatedbStmt) && CitusDBHasBeenLoaded())
if (IsA(parsetree, CreatedbStmt) && CitusHasBeenLoaded())
{
ereport(NOTICE, (errmsg("CitusDB partially supports CREATE DATABASE for "
ereport(NOTICE, (errmsg("Citus partially supports CREATE DATABASE for "
"distributed databases"),
errdetail("CitusDB does not propagate CREATE DATABASE "
errdetail("Citus does not propagate CREATE DATABASE "
"command to workers"),
errhint("You can manually create a database and its "
"extensions on workers.")));
}
else if (IsA(parsetree, CreateSchemaStmt) && CitusDBHasBeenLoaded())
else if (IsA(parsetree, CreateSchemaStmt) && CitusHasBeenLoaded())
{
ereport(NOTICE, (errmsg("CitusDB partially supports CREATE SCHEMA "
ereport(NOTICE, (errmsg("Citus partially supports CREATE SCHEMA "
"for distributed databases"),
errdetail("schema usage in joins and in some UDFs "
"provided by CitusDB are not supported yet")));
"provided by Citus are not supported yet")));
}
else if (IsA(parsetree, CreateRoleStmt) && CitusDBHasBeenLoaded())
else if (IsA(parsetree, CreateRoleStmt) && CitusHasBeenLoaded())
{
ereport(NOTICE, (errmsg("CitusDB does not support CREATE ROLE/USER "
ereport(NOTICE, (errmsg("Citus does not support CREATE ROLE/USER "
"for distributed databases"),
errdetail("Multiple roles are currently supported "
"only for local tables")));
@ -204,7 +204,7 @@ multi_ProcessUtility(Node *parsetree,
/*
* WarnIfDropCitusExtension prints a WARNING if dropStatement includes dropping
* citusdb extension.
* citus extension.
*/
static void
WarnIfDropCitusExtension(DropStmt *dropStatement)
@ -218,8 +218,8 @@ WarnIfDropCitusExtension(DropStmt *dropStatement)
List *objectNameList = lfirst(dropStatementObject);
char *objectName = NameListToString(objectNameList);
/* we're only concerned with the citusdb extension */
if (strncmp("citusdb", objectName, NAMEDATALEN) == 0)
/* we're only concerned with the citus extension */
if (strncmp("citus", objectName, NAMEDATALEN) == 0)
{
/*
* Warn the user about the possibility of invalid cache. Also, see
@ -296,7 +296,7 @@ VerifyTransmitStmt(CopyStmt *copyStatement)
/*
* ProcessCopyStmt handles CitusDB specific concerns for COPY like supporting
* ProcessCopyStmt handles Citus specific concerns for COPY like supporting
* COPYing from distributed tables and preventing unsupported actions.
*/
static Node *
@ -757,7 +757,7 @@ IsAlterTableRenameStmt(RenameStmt *renameStmt)
isAlterTableRenameStmt = true;
}
#if (PG_VERSION_NUM >=90500)
#if (PG_VERSION_NUM >= 90500)
else if (renameStmt->renameType == OBJECT_TABCONSTRAINT)
{
isAlterTableRenameStmt = true;
@ -905,8 +905,9 @@ ExecuteCommandOnWorkerShards(Oid relationId, const char *commandString,
}
else
{
ereport(DEBUG2, (errmsg("applied command on shard " UINT64_FORMAT " on "
"node %s:%d", shardId, workerName, workerPort)));
ereport(DEBUG2, (errmsg("applied command on shard " UINT64_FORMAT
" on node %s:%d", shardId, workerName,
workerPort)));
}
isFirstPlacement = false;
@ -988,6 +989,7 @@ AllFinalizedPlacementsAccessible(Oid relationId)
static void
RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, void *arg)
{
/* *INDENT-OFF* */
HeapTuple tuple;
struct DropRelationCallbackState *state;
char relkind;
@ -1022,10 +1024,8 @@ RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, voi
classform = (Form_pg_class) GETSTRUCT(tuple);
if (classform->relkind != relkind)
{
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not an index", rel->relname)));
}
/* Allow DROP to either table owner or schema owner */
if (!pg_class_ownercheck(relOid, GetUserId()) &&
@ -1054,4 +1054,5 @@ RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, voi
if (OidIsValid(state->heapOid))
LockRelationOid(state->heapOid, heap_lockmode);
}
/* *INDENT-ON* */
}

View File

@ -222,7 +222,7 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
{
List *shardPlacementList = NIL;
List *droppedPlacementList = NIL;
List *lingeringPlacementList= NIL;
List *lingeringPlacementList = NIL;
ListCell *shardPlacementCell = NULL;
ListCell *droppedPlacementCell = NULL;
ListCell *lingeringPlacementCell = NULL;
@ -251,7 +251,8 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
shardPlacementList = ShardPlacementList(shardId);
foreach(shardPlacementCell, shardPlacementList)
{
ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell);
ShardPlacement *shardPlacement =
(ShardPlacement *) lfirst(shardPlacementCell);
char *workerName = shardPlacement->nodeName;
uint32 workerPort = shardPlacement->nodePort;
bool dropSuccessful = false;
@ -260,15 +261,18 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
char storageType = shardInterval->storageType;
if (storageType == SHARD_STORAGE_TABLE)
{
appendStringInfo(workerDropQuery, DROP_REGULAR_TABLE_COMMAND, quotedShardName);
appendStringInfo(workerDropQuery, DROP_REGULAR_TABLE_COMMAND,
quotedShardName);
}
else if (storageType == SHARD_STORAGE_COLUMNAR ||
storageType == SHARD_STORAGE_FOREIGN)
{
appendStringInfo(workerDropQuery, DROP_FOREIGN_TABLE_COMMAND, quotedShardName);
appendStringInfo(workerDropQuery, DROP_FOREIGN_TABLE_COMMAND,
quotedShardName);
}
dropSuccessful = ExecuteRemoteCommand(workerName, workerPort, workerDropQuery);
dropSuccessful = ExecuteRemoteCommand(workerName, workerPort,
workerDropQuery);
if (dropSuccessful)
{
droppedPlacementList = lappend(droppedPlacementList, shardPlacement);
@ -312,7 +316,8 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
if (QueryCancelPending)
{
ereport(WARNING, (errmsg("cancel requests are ignored during shard deletion")));
ereport(WARNING, (errmsg("cancel requests are ignored during shard "
"deletion")));
QueryCancelPending = false;
}
@ -384,7 +389,7 @@ CheckDeleteCriteria(Node *deleteCriteria)
}
/*
/*
* CheckPartitionColumn checks that the given where clause is based only on the
* partition key of the given relation id.
*/

View File

@ -709,11 +709,16 @@ hostname_client_addr(void)
#ifdef HAVE_IPV6
case AF_INET6:
#endif
{
break;
}
default:
{
ereport(ERROR, (errmsg("invalid address family in connection")));
break;
}
}
remoteHost = palloc0(remoteHostLen);

View File

@ -45,7 +45,8 @@ static bool WorkerCreateShard(char *nodeName, uint32 nodePort,
static bool WorkerShardStats(char *nodeName, uint32 nodePort, Oid relationId,
char *shardName, uint64 *shardLength,
text **shardMinValue, text **shardMaxValue);
static uint64 WorkerTableSize(char *nodeName, uint32 nodePort, char *tableName);
static uint64 WorkerTableSize(char *nodeName, uint32 nodePort, Oid relationId,
char *tableName);
static StringInfo WorkerPartitionValue(char *nodeName, uint32 nodePort, Oid relationId,
char *shardName, char *selectQuery);
@ -77,16 +78,15 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
List *candidateNodeList = NIL;
text *nullMinValue = NULL;
text *nullMaxValue = NULL;
char tableType = 0;
char partitionMethod = 0;
char storageType = SHARD_STORAGE_TABLE;
Oid relationId = ResolveRelationId(relationNameText);
CheckDistributedTable(relationId);
tableType = get_rel_relkind(relationId);
if (tableType != RELKIND_RELATION)
if (CStoreTable(relationId))
{
ereport(ERROR, (errmsg("relation \"%s\" is not a regular table", relationName)));
storageType = SHARD_STORAGE_COLUMNAR;
}
partitionMethod = PartitionMethod(relationId);
@ -130,7 +130,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
CreateShardPlacements(shardId, ddlEventList, candidateNodeList, 0,
ShardReplicationFactor);
InsertShardRow(relationId, shardId, SHARD_STORAGE_TABLE, nullMinValue, nullMaxValue);
InsertShardRow(relationId, shardId, storageType, nullMinValue, nullMaxValue);
PG_RETURN_INT64(shardId);
}
@ -171,9 +171,10 @@ master_append_table_to_shard(PG_FUNCTION_ARGS)
ShardInterval *shardInterval = LoadShardInterval(shardId);
Oid relationId = shardInterval->relationId;
bool cstoreTable = CStoreTable(relationId);
char storageType = shardInterval->storageType;
if (storageType != SHARD_STORAGE_TABLE)
if (storageType != SHARD_STORAGE_TABLE && !cstoreTable)
{
ereport(ERROR, (errmsg("cannot append to shardId " UINT64_FORMAT, shardId),
errdetail("The underlying shard is not a regular table")));
@ -457,7 +458,7 @@ WorkerShardStats(char *nodeName, uint32 nodePort, Oid relationId, char *shardNam
PG_TRY();
{
uint64 tableSize = WorkerTableSize(nodeName, nodePort, shardName);
uint64 tableSize = WorkerTableSize(nodeName, nodePort, relationId, shardName);
StringInfo minValue = WorkerPartitionValue(nodeName, nodePort, relationId,
shardName, SHARD_MIN_VALUE_QUERY);
StringInfo maxValue = WorkerPartitionValue(nodeName, nodePort, relationId,
@ -479,18 +480,27 @@ WorkerShardStats(char *nodeName, uint32 nodePort, Oid relationId, char *shardNam
/*
* WorkerTableSize queries the worker node to extract the disk space used by the
* given relation. The function assumes the relation represents a regular table.
* given relation. The function assumes the relation represents a regular table or
* a cstore_fdw table.
*/
static uint64
WorkerTableSize(char *nodeName, uint32 nodePort, char *tableName)
WorkerTableSize(char *nodeName, uint32 nodePort, Oid relationId, char *tableName)
{
uint64 tableSize = 0;
List *queryResultList = NIL;
StringInfo tableSizeString = NULL;
char *tableSizeStringEnd = NULL;
bool cstoreTable = CStoreTable(relationId);
StringInfo tableSizeQuery = makeStringInfo();
if (cstoreTable)
{
appendStringInfo(tableSizeQuery, SHARD_CSTORE_TABLE_SIZE_QUERY, tableName);
}
else
{
appendStringInfo(tableSizeQuery, SHARD_TABLE_SIZE_QUERY, tableName);
}
queryResultList = ExecuteRemoteQuery(nodeName, nodePort, tableSizeQuery);
if (queryResultList == NIL)

View File

@ -121,16 +121,6 @@ ErrorIfQueryNotSupported(Query *queryTree)
Assert(commandType == CMD_INSERT || commandType == CMD_UPDATE ||
commandType == CMD_DELETE);
if (!(partitionMethod == DISTRIBUTE_BY_HASH ||
partitionMethod == DISTRIBUTE_BY_RANGE))
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot perform distributed planning for the given"
" modification"),
errdetail("Only hash- or range-partitioned tables may be the "
"target of distributed modifications")));
}
/*
* Reject subqueries which are in SELECT or WHERE clause.
* Queries which include subqueries in FROM clauses are rejected below.
@ -403,6 +393,7 @@ DistributedModifyTask(Query *query)
query->onConflict = RebuildOnConflict(relationId, query->onConflict);
}
#else
/* always set to false for PG_VERSION_NUM < 90500 */
upsertQuery = false;
#endif
@ -424,6 +415,7 @@ DistributedModifyTask(Query *query)
#if (PG_VERSION_NUM >= 90500)
/*
* RebuildOnConflict rebuilds OnConflictExpr for correct deparsing. The function
* makes WHERE clause elements explicit and filters dropped columns
@ -458,7 +450,7 @@ RebuildOnConflict(Oid relationId, OnConflictExpr *originalOnConflict)
foreach(targetEntryCell, onConflictSet)
{
TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell);
FormData_pg_attribute *tableAttribute = tableAttributes[targetEntry->resno -1];
FormData_pg_attribute *tableAttribute = tableAttributes[targetEntry->resno - 1];
/* skip dropped columns */
if (tableAttribute->attisdropped)
@ -478,6 +470,8 @@ RebuildOnConflict(Oid relationId, OnConflictExpr *originalOnConflict)
return updatedOnConflict;
}
#endif

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* multi_explain.c
* CitusDB explain support.
* Citus explain support.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*-------------------------------------------------------------------------

View File

@ -37,14 +37,14 @@ int LargeTableShardCount = 4; /* shard counts for a large table */
bool LogMultiJoinOrder = false; /* print join order as a debugging aid */
/* Function pointer type definition for join rule evaluation functions */
typedef JoinOrderNode * (*RuleEvalFunction) (JoinOrderNode *currentJoinNode,
typedef JoinOrderNode *(*RuleEvalFunction) (JoinOrderNode *currentJoinNode,
TableEntry *candidateTable,
List *candidateShardList,
List *applicableJoinClauses,
JoinType joinType);
static char * RuleNameArray[JOIN_RULE_LAST] = {0}; /* ordered join rule names */
static RuleEvalFunction RuleEvalFunctionArray[JOIN_RULE_LAST] = {0}; /* join rules */
static char *RuleNameArray[JOIN_RULE_LAST] = { 0 }; /* ordered join rule names */
static RuleEvalFunction RuleEvalFunctionArray[JOIN_RULE_LAST] = { 0 }; /* join rules */
/* Local functions forward declarations */
@ -54,7 +54,8 @@ static bool JoinExprListWalker(Node *node, List **joinList);
static bool ExtractLeftMostRangeTableIndex(Node *node, int *rangeTableIndex);
static List * MergeShardIntervals(List *leftShardIntervalList,
List *rightShardIntervalList, JoinType joinType);
static bool ShardIntervalsMatch(List *leftShardIntervalList, List *rightShardIntervalList);
static bool ShardIntervalsMatch(List *leftShardIntervalList,
List *rightShardIntervalList);
static List * LoadSortedShardIntervalList(Oid relationId);
static List * JoinOrderForTable(TableEntry *firstTable, List *tableEntryList,
List *joinClauseList);
@ -68,31 +69,41 @@ static List * TableEntryListDifference(List *lhsTableList, List *rhsTableList);
static TableEntry * FindTableEntry(List *tableEntryList, uint32 tableId);
/* Local functions forward declarations for join evaluations */
static JoinOrderNode * EvaluateJoinRules(List *joinedTableList, JoinOrderNode *currentJoinNode,
TableEntry *candidateTable, List *candidateShardList,
static JoinOrderNode * EvaluateJoinRules(List *joinedTableList,
JoinOrderNode *currentJoinNode,
TableEntry *candidateTable,
List *candidateShardList,
List *joinClauseList, JoinType joinType);
static List * RangeTableIdList(List *tableList);
static RuleEvalFunction JoinRuleEvalFunction(JoinRuleType ruleType);
static char * JoinRuleName(JoinRuleType ruleType);
static JoinOrderNode * BroadcastJoin(JoinOrderNode *joinNode, TableEntry *candidateTable,
List *candidateShardList, List *applicableJoinClauses,
List *candidateShardList,
List *applicableJoinClauses,
JoinType joinType);
static JoinOrderNode * LocalJoin(JoinOrderNode *joinNode, TableEntry *candidateTable,
List *candidateShardList, List *applicableJoinClauses,
JoinType joinType);
static bool JoinOnColumns(Var *currentPartitioncolumn, Var *candidatePartitionColumn,
List *joinClauseList);
static JoinOrderNode * SinglePartitionJoin(JoinOrderNode *joinNode, TableEntry *candidateTable,
List *candidateShardList, List *applicableJoinClauses,
static JoinOrderNode * SinglePartitionJoin(JoinOrderNode *joinNode,
TableEntry *candidateTable,
List *candidateShardList,
List *applicableJoinClauses,
JoinType joinType);
static JoinOrderNode * DualPartitionJoin(JoinOrderNode *joinNode, TableEntry *candidateTable,
List *candidateShardList, List *applicableJoinClauses,
static JoinOrderNode * DualPartitionJoin(JoinOrderNode *joinNode,
TableEntry *candidateTable,
List *candidateShardList,
List *applicableJoinClauses,
JoinType joinType);
static JoinOrderNode * CartesianProduct(JoinOrderNode *joinNode, TableEntry *candidateTable,
List *candidateShardList, List *applicableJoinClauses,
static JoinOrderNode * CartesianProduct(JoinOrderNode *joinNode,
TableEntry *candidateTable,
List *candidateShardList,
List *applicableJoinClauses,
JoinType joinType);
static JoinOrderNode * MakeJoinOrderNode(TableEntry *tableEntry, JoinRuleType joinRuleType,
Var *partitionColumn, char partitionMethod);
static JoinOrderNode * MakeJoinOrderNode(TableEntry *tableEntry, JoinRuleType
joinRuleType, Var *partitionColumn,
char partitionMethod);
/*
@ -106,7 +117,7 @@ List *
FixedJoinOrderList(FromExpr *fromExpr, List *tableEntryList)
{
List *joinList = NIL;
ListCell * joinCell = NULL;
ListCell *joinCell = NULL;
List *joinWhereClauseList = NIL;
List *joinOrderList = NIL;
List *joinedTableList = NIL;
@ -175,7 +186,7 @@ FixedJoinOrderList(FromExpr *fromExpr, List *tableEntryList)
"query"),
errdetail("Cannot perform outer joins with broadcast "
"joins of more than 1 shard"),
errhint("Set citusdb.large_table_shard_count to 1")));
errhint("Set citus.large_table_shard_count to 1")));
}
}
else if (nextJoinNode->joinRuleType == LOCAL_PARTITION_JOIN)
@ -199,7 +210,6 @@ FixedJoinOrderList(FromExpr *fromExpr, List *tableEntryList)
"query"),
errdetail("Shards of relations in outer join queries "
"must have 1-to-1 shard partitioning")));
}
}
else
@ -439,7 +449,7 @@ MergeShardIntervals(List *leftShardIntervalList, List *rightShardIntervalList,
bool nextMaxSmaller = comparisonResult > 0;
if ((shardUnion && nextMaxLarger) ||
(!shardUnion && nextMaxSmaller) )
(!shardUnion && nextMaxSmaller))
{
newShardInterval->maxValue = datumCopy(nextMax, typeByValue, typeLen);
}
@ -586,7 +596,8 @@ ShardIntervalsMatch(List *leftShardIntervalList, List *rightShardIntervalList)
nextRightIntervalCell = lnext(rightShardIntervalCell);
if (nextRightIntervalCell != NULL)
{
ShardInterval *nextRightInterval = (ShardInterval *) lfirst(nextRightIntervalCell);
ShardInterval *nextRightInterval =
(ShardInterval *) lfirst(nextRightIntervalCell);
shardIntervalsIntersect = ShardIntervalsOverlap(leftInterval,
nextRightInterval);
if (shardIntervalsIntersect)
@ -1028,7 +1039,7 @@ EvaluateJoinRules(List *joinedTableList, JoinOrderNode *currentJoinNode,
JoinRuleType ruleType = (JoinRuleType) ruleIndex;
RuleEvalFunction ruleEvalFunction = JoinRuleEvalFunction(ruleType);
nextJoinNode = (*ruleEvalFunction) (currentJoinNode,
nextJoinNode = (*ruleEvalFunction)(currentJoinNode,
candidateTable,
candidateShardList,
applicableJoinClauses,

View File

@ -91,7 +91,8 @@ static void ParentSetNewChild(MultiNode *parentNode, MultiNode *oldChildNode,
/* Local functions forward declarations for aggregate expressions */
static void ApplyExtendedOpNodes(MultiExtendedOp *originalNode,
MultiExtendedOp *masterNode, MultiExtendedOp *workerNode);
MultiExtendedOp *masterNode,
MultiExtendedOp *workerNode);
static void TransformSubqueryNode(MultiTable *subqueryNode);
static MultiExtendedOp * MasterExtendedOpNode(MultiExtendedOp *originalOpNode);
static Node * MasterAggregateMutator(Node *originalNode, AttrNumber *columnId);
@ -117,7 +118,8 @@ static void ErrorIfUnsupportedArrayAggregate(Aggref *arrayAggregateExpression);
static void ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression,
MultiNode *logicalPlanNode);
static Var * AggregateDistinctColumn(Aggref *aggregateExpression);
static bool TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode,
static bool TablePartitioningSupportsDistinct(List *tableNodeList,
MultiExtendedOp *opNode,
Var *distinctColumn);
static bool GroupedByColumn(List *groupClauseList, List *targetList, Var *column);
@ -257,6 +259,7 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan)
MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell);
if (tableNode->relationId == SUBQUERY_RELATION_ID)
{
ErrorIfContainsUnsupportedAggregate((MultiNode *) tableNode);
TransformSubqueryNode(tableNode);
}
}
@ -1220,7 +1223,7 @@ MasterExtendedOpNode(MultiExtendedOp *originalOpNode)
bool hasAggregates = contain_agg_clause((Node *) originalExpression);
if (hasAggregates)
{
Node *newNode = MasterAggregateMutator((Node*) originalExpression,
Node *newNode = MasterAggregateMutator((Node *) originalExpression,
&columnId);
newExpression = (Expr *) newNode;
}
@ -1980,23 +1983,31 @@ CountDistinctHashFunctionName(Oid argumentType)
switch (argumentType)
{
case INT4OID:
{
hashFunctionName = pstrdup(HLL_HASH_INTEGER_FUNC_NAME);
break;
}
case INT8OID:
{
hashFunctionName = pstrdup(HLL_HASH_BIGINT_FUNC_NAME);
break;
}
case TEXTOID:
case BPCHAROID:
case VARCHAROID:
{
hashFunctionName = pstrdup(HLL_HASH_TEXT_FUNC_NAME);
break;
}
default:
{
hashFunctionName = pstrdup(HLL_HASH_ANY_FUNC_NAME);
break;
}
}
return hashFunctionName;
}
@ -2135,8 +2146,9 @@ ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression,
bool distinctSupported = true;
List *repartitionNodeList = NIL;
Var *distinctColumn = NULL;
AggregateType aggregateType = GetAggregateType(aggregateExpression->aggfnoid);
List *multiTableNodeList = NIL;
ListCell *multiTableNodeCell = NULL;
AggregateType aggregateType = AGGREGATE_INVALID_FIRST;
/* check if logical plan includes a subquery */
List *subqueryMultiTableList = SubqueryMultiTableList(logicalPlanNode);
@ -2147,7 +2159,20 @@ ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression,
errdetail("distinct in the outermost query is unsupported")));
}
multiTableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable);
foreach(multiTableNodeCell, multiTableNodeList)
{
MultiTable *multiTable = (MultiTable *) lfirst(multiTableNodeCell);
if (multiTable->relationId == SUBQUERY_RELATION_ID)
{
ereport(ERROR, (errmsg("cannot compute count (distinct)"),
errdetail("Subqueries with aggregate (distinct) are "
"not supported yet")));
}
}
/* if we have a count(distinct), and distinct approximation is enabled */
aggregateType = GetAggregateType(aggregateExpression->aggfnoid);
if (aggregateType == AGGREGATE_COUNT &&
CountDistinctErrorRate != DISABLE_DISTINCT_APPROXIMATION)
{
@ -2563,7 +2588,7 @@ ErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerQueryHasLimit)
List *joinTreeTableIndexList = NIL;
uint32 joiningTableCount = 0;
ExtractRangeTableIndexWalker((Node*) subqueryTree->jointree,
ExtractRangeTableIndexWalker((Node *) subqueryTree->jointree,
&joinTreeTableIndexList);
joiningTableCount = list_length(joinTreeTableIndexList);
@ -2639,7 +2664,7 @@ ErrorIfUnsupportedTableCombination(Query *queryTree)
* Extract all range table indexes from the join tree. Note that sub-queries
* that get pulled up by PostgreSQL don't appear in this join tree.
*/
ExtractRangeTableIndexWalker((Node*) queryTree->jointree, &joinTreeTableIndexList);
ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList);
foreach(joinTreeTableIndexCell, joinTreeTableIndexList)
{
/*
@ -3019,7 +3044,7 @@ FullCompositeFieldList(List *compositeFieldList)
uint32 fieldIndex = 0;
ListCell *fieldSelectCell = NULL;
foreach (fieldSelectCell, compositeFieldList)
foreach(fieldSelectCell, compositeFieldList)
{
FieldSelect *fieldSelect = (FieldSelect *) lfirst(fieldSelectCell);
uint32 compositeFieldIndex = 0;
@ -3229,6 +3254,7 @@ SupportedLateralQuery(Query *parentQuery, Query *lateralQuery)
CompositeFieldRecursive(outerQueryExpression, parentQuery);
FieldSelect *localCompositeField =
CompositeFieldRecursive(localQueryExpression, lateralQuery);
/*
* If partition colums are composite fields, add them to list to
* check later if all composite fields are used.
@ -3251,7 +3277,7 @@ SupportedLateralQuery(Query *parentQuery, Query *lateralQuery)
}
/* check composite fields */
if(!supportedLateralQuery)
if (!supportedLateralQuery)
{
bool outerFullCompositeFieldList =
FullCompositeFieldList(outerCompositeFieldList);
@ -3309,7 +3335,7 @@ JoinOnPartitionColumn(Query *query)
* If partition colums are composite fields, add them to list to
* check later if all composite fields are used.
*/
if(leftCompositeField && rightCompositeField)
if (leftCompositeField && rightCompositeField)
{
leftCompositeFieldList = lappend(leftCompositeFieldList,
leftCompositeField);
@ -3318,7 +3344,7 @@ JoinOnPartitionColumn(Query *query)
}
/* if both sides are not composite fields, they are normal columns */
if(!(leftCompositeField && rightCompositeField))
if (!(leftCompositeField && rightCompositeField))
{
joinOnPartitionColumn = true;
break;
@ -3327,7 +3353,7 @@ JoinOnPartitionColumn(Query *query)
}
/* check composite fields */
if(!joinOnPartitionColumn)
if (!joinOnPartitionColumn)
{
bool leftFullCompositeFieldList =
FullCompositeFieldList(leftCompositeFieldList);
@ -3641,7 +3667,7 @@ LeafQuery(Query *queryTree)
* Extract all range table indexes from the join tree. Note that sub-queries
* that get pulled up by PostgreSQL don't appear in this join tree.
*/
ExtractRangeTableIndexWalker((Node*) queryTree->jointree, &joinTreeTableIndexList);
ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList);
foreach(joinTreeTableIndexCell, joinTreeTableIndexList)
{
/*

View File

@ -39,11 +39,11 @@ bool SubqueryPushdown = false; /* is subquery pushdown enabled */
/* Function pointer type definition for apply join rule functions */
typedef MultiNode * (*RuleApplyFunction) (MultiNode *leftNode, MultiNode *rightNode,
typedef MultiNode *(*RuleApplyFunction) (MultiNode *leftNode, MultiNode *rightNode,
Var *partitionColumn, JoinType joinType,
List *joinClauses);
static RuleApplyFunction RuleApplyFunctionArray[JOIN_RULE_LAST] = {0}; /* join rules */
static RuleApplyFunction RuleApplyFunctionArray[JOIN_RULE_LAST] = { 0 }; /* join rules */
/* Local functions forward declarations */
static MultiNode * MultiPlanTree(Query *queryTree);
@ -157,7 +157,7 @@ SubqueryEntryList(Query *queryTree)
* only walk over range table entries at this level and do not recurse into
* subqueries.
*/
ExtractRangeTableIndexWalker((Node*) queryTree->jointree, &joinTreeTableIndexList);
ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList);
foreach(joinTreeTableIndexCell, joinTreeTableIndexList)
{
/*
@ -285,6 +285,7 @@ MultiPlanTree(Query *queryTree)
else
{
bool hasOuterJoin = false;
/*
* We calculate the join order using the list of tables in the query and
* the join clauses between them. Note that this function owns the table
@ -465,6 +466,7 @@ ErrorIfQueryNotSupported(Query *queryTree)
#if (PG_VERSION_NUM >= 90500)
/* HasTablesample returns tree if the query contains tablesample */
static bool
HasTablesample(Query *queryTree)
@ -485,6 +487,8 @@ HasTablesample(Query *queryTree)
return hasTablesample;
}
#endif
@ -529,7 +533,8 @@ HasUnsupportedJoinWalker(Node *node, void *context)
* ErrorIfSubqueryNotSupported checks that we can perform distributed planning for
* the given subquery.
*/
static void ErrorIfSubqueryNotSupported(Query *subqueryTree)
static void
ErrorIfSubqueryNotSupported(Query *subqueryTree)
{
char *errorDetail = NULL;
bool preconditionsSatisfied = true;
@ -587,7 +592,6 @@ HasOuterJoin(Query *queryTree)
static bool
HasOuterJoinWalker(Node *node, void *context)
{
bool hasOuterJoin = false;
if (node == NULL)
{
@ -657,7 +661,7 @@ HasComplexRangeTableType(Query *queryTree)
* Extract all range table indexes from the join tree. Note that sub-queries
* that get pulled up by PostgreSQL don't appear in this join tree.
*/
ExtractRangeTableIndexWalker((Node*) queryTree->jointree, &joinTreeTableIndexList);
ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList);
foreach(joinTreeTableIndexCell, joinTreeTableIndexList)
{
/*
@ -1178,8 +1182,8 @@ IsSelectClause(Node *clause)
/* we currently consider the following nodes as select clauses */
NodeTag nodeTag = nodeTag(clause);
if ( !(nodeTag == T_OpExpr || nodeTag == T_ScalarArrayOpExpr ||
nodeTag == T_NullTest || nodeTag == T_BooleanTest) )
if (!(nodeTag == T_OpExpr || nodeTag == T_ScalarArrayOpExpr ||
nodeTag == T_NullTest || nodeTag == T_BooleanTest))
{
return false;
}
@ -1567,7 +1571,7 @@ ApplyJoinRule(MultiNode *leftNode, MultiNode *rightNode, JoinRuleType ruleType,
/* call the join rule application function to create the new join node */
ruleApplyFunction = JoinRuleApplyFunction(ruleType);
multiNode = (*ruleApplyFunction) (leftNode, rightNode, partitionColumn,
multiNode = (*ruleApplyFunction)(leftNode, rightNode, partitionColumn,
joinType, applicableJoinClauses);
if (joinType != JOIN_INNER && CitusIsA(multiNode, MultiJoin))
@ -1918,7 +1922,7 @@ ErrorIfSubqueryJoin(Query *queryTree)
* Extract all range table indexes from the join tree. Note that sub-queries
* that get pulled up by PostgreSQL don't appear in this join tree.
*/
ExtractRangeTableIndexWalker((Node*) queryTree->jointree, &joinTreeTableIndexList);
ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList);
joiningRangeTableCount = list_length(joinTreeTableIndexList);
if (joiningRangeTableCount > 1)

View File

@ -138,7 +138,7 @@ static OpExpr * MakeOpExpressionWithZeroConst(void);
static List * BuildRestrictInfoList(List *qualList);
static List * FragmentCombinationList(List *rangeTableFragmentsList, Query *jobQuery,
List *dependedJobList);
static JoinSequenceNode * JoinSequenceArray(List * rangeTableFragmentsList,
static JoinSequenceNode * JoinSequenceArray(List *rangeTableFragmentsList,
Query *jobQuery, List *dependedJobList);
static bool PartitionedOnColumn(Var *column, List *rangeTableList, List *dependedJobList);
static void CheckJoinBetweenColumns(OpExpr *joinClause);
@ -155,7 +155,8 @@ static StringInfo DatumArrayString(Datum *datumArray, uint32 datumCount, Oid dat
static Task * CreateBasicTask(uint64 jobId, uint32 taskId, TaskType taskType,
char *queryString);
static void UpdateRangeTableAlias(List *rangeTableList, List *fragmentList);
static Alias * FragmentAlias(RangeTblEntry *rangeTableEntry, RangeTableFragment *fragment);
static Alias * FragmentAlias(RangeTblEntry *rangeTableEntry,
RangeTableFragment *fragment);
static uint64 AnchorShardId(List *fragmentList, uint32 anchorRangeTableId);
static List * PruneSqlTaskDependencies(List *sqlTaskList);
static List * AssignTaskList(List *sqlTaskList);
@ -167,7 +168,7 @@ static Task * GreedyAssignTask(WorkerNode *workerNode, List *taskList,
static List * RoundRobinAssignTaskList(List *taskList);
static List * RoundRobinReorder(Task *task, List *placementList);
static List * ReorderAndAssignTaskList(List *taskList,
List * (*reorderFunction) (Task *, List *));
List * (*reorderFunction)(Task *, List *));
static int CompareTasksByShardId(const void *leftElement, const void *rightElement);
static List * ActiveShardPlacementLists(List *taskList);
static List * ActivePlacementList(List *placementList);
@ -309,6 +310,7 @@ BuildJobTree(MultiTreeRoot *multiTree)
partitionKey, partitionType,
baseRelationId,
JOIN_MAP_MERGE_JOB);
/* reset depended job list */
loopDependedJobList = NIL;
loopDependedJobList = list_make1(mapMergeJob);
@ -538,7 +540,7 @@ BuildJobQuery(MultiNode *multiNode, List *dependedJobList)
* If we are building this query on a repartitioned subquery job then we
* don't need to update column attributes.
*/
if(dependedJobList != NIL)
if (dependedJobList != NIL)
{
Job *job = (Job *) linitial(dependedJobList);
if (CitusIsA(job, MapMergeJob))
@ -870,7 +872,7 @@ TargetEntryList(List *expressionList)
Expr *expression = (Expr *) lfirst(expressionCell);
TargetEntry *targetEntry = makeTargetEntry(expression,
list_length(targetEntryList)+1,
list_length(targetEntryList) + 1,
NULL, false);
targetEntryList = lappend(targetEntryList, targetEntry);
}
@ -1044,7 +1046,7 @@ QueryJoinTree(MultiNode *multiNode, List *dependedJobList, List **rangeTableList
/* fix the column attributes in ON (...) clauses */
columnList = pull_var_clause_default((Node *) joinNode->joinClauseList);
foreach (columnCell, columnList)
foreach(columnCell, columnList)
{
Var *column = (Var *) lfirst(columnCell);
UpdateColumnAttributes(column, *rangeTableList, dependedJobList);
@ -1093,7 +1095,8 @@ QueryJoinTree(MultiNode *multiNode, List *dependedJobList, List **rangeTableList
uint32 columnCount = (uint32) list_length(dependedTargetList);
List *columnNameList = DerivedColumnNameList(columnCount, dependedJob->jobId);
RangeTblEntry *rangeTableEntry = DerivedRangeTableEntry(multiNode, columnNameList,
RangeTblEntry *rangeTableEntry = DerivedRangeTableEntry(multiNode,
columnNameList,
tableIdList);
RangeTblRef *rangeTableRef = makeNode(RangeTblRef);
@ -1864,6 +1867,7 @@ SplitPointObject(ShardInterval **shardIntervalArray, uint32 shardIntervalCount)
return splitPointObject;
}
/* ------------------------------------------------------------
* Functions that relate to building and assigning tasks follow
* ------------------------------------------------------------
@ -2808,6 +2812,10 @@ SimpleOpExpression(Expr *clause)
return false; /* not a binary opclause */
}
/* strip coercions before doing check */
leftOperand = strip_implicit_coercions(leftOperand);
rightOperand = strip_implicit_coercions(rightOperand);
if (IsA(rightOperand, Const) && IsA(leftOperand, Var))
{
constantClause = (Const *) rightOperand;
@ -2896,7 +2904,7 @@ HashableClauseMutator(Node *originalNode, Var *partitionColumn)
* If this node is not hashable, continue walking down the expression tree
* to find and hash clauses which are eligible.
*/
if(newNode == NULL)
if (newNode == NULL)
{
newNode = expression_tree_mutator(originalNode, HashableClauseMutator,
(void *) partitionColumn);
@ -2919,6 +2927,10 @@ OpExpressionContainsColumn(OpExpr *operatorExpression, Var *partitionColumn)
Node *rightOperand = get_rightop((Expr *) operatorExpression);
Var *column = NULL;
/* strip coercions before doing check */
leftOperand = strip_implicit_coercions(leftOperand);
rightOperand = strip_implicit_coercions(rightOperand);
if (IsA(leftOperand, Var))
{
column = (Var *) leftOperand;
@ -3265,7 +3277,7 @@ JoinSequenceArray(List *rangeTableFragmentsList, Query *jobQuery, List *depended
joinSequenceArray[joinedTableCount].joiningRangeTableId = NON_PRUNABLE_JOIN;
joinedTableCount++;
foreach (joinExprCell, joinExprList)
foreach(joinExprCell, joinExprList)
{
JoinExpr *joinExpr = (JoinExpr *) lfirst(joinExprCell);
JoinType joinType = joinExpr->jointype;
@ -3339,7 +3351,7 @@ JoinSequenceArray(List *rangeTableFragmentsList, Query *jobQuery, List *depended
if (IS_OUTER_JOIN(joinType))
{
int innerRangeTableId = 0;
List * tableFragments = NIL;
List *tableFragments = NIL;
int fragmentCount = 0;
if (joinType == JOIN_RIGHT)
@ -3492,7 +3504,7 @@ FindRangeTableFragmentsList(List *rangeTableFragmentsList, int tableId)
if (tableFragments != NIL)
{
RangeTableFragment *tableFragment =
(RangeTableFragment*) linitial(tableFragments);
(RangeTableFragment *) linitial(tableFragments);
if (tableFragment->rangeTableId == tableId)
{
foundTableFragments = tableFragments;
@ -4038,6 +4050,7 @@ FragmentAlias(RangeTblEntry *rangeTableEntry, RangeTableFragment *fragment)
return alias;
}
/*
* AnchorShardId walks over each fragment in the given fragment list, finds the
* fragment that corresponds to the given anchor range tableId, and returns this
@ -4352,7 +4365,7 @@ MergeTaskList(MapMergeJob *mapMergeJob, List *mapTaskList, uint32 taskIdIndex)
StringInfo intermediateTableQueryString =
IntermediateTableQueryString(jobId, taskIdIndex, reduceQuery);
StringInfo mergeAndRunQueryString= makeStringInfo();
StringInfo mergeAndRunQueryString = makeStringInfo();
appendStringInfo(mergeAndRunQueryString, MERGE_FILES_AND_RUN_QUERY_COMMAND,
jobId, taskIdIndex, mergeTableQueryString->data,
intermediateTableQueryString->data);
@ -4952,7 +4965,7 @@ List *
FirstReplicaAssignTaskList(List *taskList)
{
/* No additional reordering need take place for this algorithm */
List * (*reorderFunction)(Task *, List *) = NULL;
List *(*reorderFunction)(Task *, List *) = NULL;
taskList = ReorderAndAssignTaskList(taskList, reorderFunction);
@ -4976,6 +4989,7 @@ RoundRobinAssignTaskList(List *taskList)
return taskList;
}
/*
* RoundRobinReorder implements the core of the round-robin assignment policy.
* It takes a task and placement list and rotates a copy of the placement list
@ -5108,7 +5122,8 @@ ActiveShardPlacementLists(List *taskList)
List *activeShardPlacementList = ActivePlacementList(shardPlacementList);
/* sort shard placements by their insertion time */
activeShardPlacementList = SortList(activeShardPlacementList, CompareShardPlacements);
activeShardPlacementList = SortList(activeShardPlacementList,
CompareShardPlacements);
shardPlacementLists = lappend(shardPlacementLists, activeShardPlacementList);
}
@ -5249,7 +5264,8 @@ AssignDualHashTaskList(List *taskList)
uint32 replicaIndex = 0;
for (replicaIndex = 0; replicaIndex < ShardReplicationFactor; replicaIndex++)
{
uint32 assignmentOffset = beginningNodeIndex + assignedTaskIndex + replicaIndex;
uint32 assignmentOffset = beginningNodeIndex + assignedTaskIndex +
replicaIndex;
uint32 assignmentIndex = assignmentOffset % workerNodeCount;
WorkerNode *workerNode = list_nth(workerNodeList, assignmentIndex);

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* multi_planner.c
* General CitusDB planner code.
* General Citus planner code.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*-------------------------------------------------------------------------
@ -45,26 +45,17 @@ multi_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
PlannedStmt *result = NULL;
/*
* First call into standard planner. This is required because the CitusDB
* First call into standard planner. This is required because the Citus
* planner relies on parse tree transformations made by postgres' planner.
*/
result = standard_planner(parse, cursorOptions, boundParams);
if (NeedsDistributedPlanning(parse))
{
MemoryContext oldcontext = NULL;
MultiPlan *physicalPlan = NULL;
/* Switch to top level message context */
oldcontext = MemoryContextSwitchTo(MessageContext);
physicalPlan = CreatePhysicalPlan(parse);
MultiPlan *physicalPlan = CreatePhysicalPlan(parse);
/* store required data into the planned statement */
result = MultiQueryContainerNode(result, physicalPlan);
/* Now switch back to original context */
MemoryContextSwitchTo(oldcontext);
}
return result;
@ -99,7 +90,7 @@ CreatePhysicalPlan(Query *parse)
/*
* This check is here to make it likely that all node types used in
* CitusDB are dumpable. Explain can dump logical and physical plans
* Citus are dumpable. Explain can dump logical and physical plans
* using the extended outfuncs infrastructure, but it's infeasible to
* test most plans. MultiQueryContainerNode always serializes the
* physical plan, so there's no need to check that separately.
@ -141,7 +132,7 @@ HasCitusToplevelNode(PlannedStmt *result)
* yet. Directly return false, part of the required infrastructure for
* further checks might not be present.
*/
if (!CitusDBHasBeenLoaded())
if (!CitusHasBeenLoaded())
{
return false;
}
@ -268,7 +259,7 @@ GetMultiPlanString(PlannedStmt *result)
if (list_length(fauxFuncExpr->args) != 1)
{
ereport(ERROR, (errmsg("unexpected number of function arguments to "
"citusdb_extradata_container")));
"citus_extradata_container")));
}
multiPlanData = (Const *) linitial(fauxFuncExpr->args);

View File

@ -205,20 +205,31 @@ RelayEventExtendNames(Node *parseTree, uint64 shardId)
switch (relationNameListLength)
{
case 1:
{
relationNameValue = linitial(relationNameList);
break;
}
case 2:
{
relationNameValue = lsecond(relationNameList);
break;
}
case 3:
{
relationNameValue = lthird(relationNameList);
break;
}
default:
{
ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
errmsg("improper relation name: \"%s\"",
NameListToString(relationNameList))));
break;
}
}
relationName = &(relationNameValue->val.str);
AppendShardIdToName(relationName, shardId);

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* shared_library_init.c
* Initialize CitusDB extension
* Initialize Citus extension
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*-------------------------------------------------------------------------
@ -48,23 +48,23 @@ static void NormalizeWorkerListPath(void);
/* GUC enum definitions */
static const struct config_enum_entry task_assignment_policy_options[] = {
{"greedy", TASK_ASSIGNMENT_GREEDY, false},
{"first-replica", TASK_ASSIGNMENT_FIRST_REPLICA, false},
{"round-robin", TASK_ASSIGNMENT_ROUND_ROBIN, false},
{NULL, 0, false}
{ "greedy", TASK_ASSIGNMENT_GREEDY, false },
{ "first-replica", TASK_ASSIGNMENT_FIRST_REPLICA, false },
{ "round-robin", TASK_ASSIGNMENT_ROUND_ROBIN, false },
{ NULL, 0, false }
};
static const struct config_enum_entry task_executor_type_options[] = {
{"real-time", MULTI_EXECUTOR_REAL_TIME, false},
{"task-tracker", MULTI_EXECUTOR_TASK_TRACKER, false},
{"router", MULTI_EXECUTOR_ROUTER, false},
{NULL, 0, false}
{ "real-time", MULTI_EXECUTOR_REAL_TIME, false },
{ "task-tracker", MULTI_EXECUTOR_TASK_TRACKER, false },
{ "router", MULTI_EXECUTOR_ROUTER, false },
{ NULL, 0, false }
};
static const struct config_enum_entry shard_placement_policy_options[] = {
{"local-node-first", SHARD_PLACEMENT_LOCAL_NODE_FIRST, false},
{"round-robin", SHARD_PLACEMENT_ROUND_ROBIN, false},
{NULL, 0, false}
{ "local-node-first", SHARD_PLACEMENT_LOCAL_NODE_FIRST, false },
{ "round-robin", SHARD_PLACEMENT_ROUND_ROBIN, false },
{ NULL, 0, false }
};
@ -74,8 +74,8 @@ _PG_init(void)
{
if (!process_shared_preload_libraries_in_progress)
{
ereport(ERROR, (errmsg("CitusDB can only be loaded via shared_preload_libraries"),
errhint("Add citusdb to shared_preload_libraries.")));
ereport(ERROR, (errmsg("Citus can only be loaded via shared_preload_libraries"),
errhint("Add citus to shared_preload_libraries.")));
}
/*
@ -95,8 +95,8 @@ _PG_init(void)
ExecutorEnd_hook != NULL ||
ProcessUtility_hook != NULL)
{
ereport(ERROR, (errmsg("CitusDB has to be loaded first"),
errhint("Place citusdb at the beginning of "
ereport(ERROR, (errmsg("Citus has to be loaded first"),
errhint("Place citus at the beginning of "
"shared_preload_libraries.")));
}
@ -107,7 +107,7 @@ _PG_init(void)
CreateRequiredDirectories();
/*
* Register CitusDB configuration variables. Do so before intercepting
* Register Citus configuration variables. Do so before intercepting
* hooks or calling initialization functions, in case we want to do the
* latter in a configuration dependent manner.
*/
@ -137,7 +137,7 @@ _PG_init(void)
/*
* CreateRequiredDirectories - Create directories required for CitusDB to
* CreateRequiredDirectories - Create directories required for Citus to
* function.
*
* These used to be created by initdb, but that's not possible anymore.
@ -166,12 +166,12 @@ CreateRequiredDirectories(void)
}
/* Register CitusDB configuration variables. */
/* Register Citus configuration variables. */
static void
RegisterCitusConfigVariables(void)
{
DefineCustomStringVariable(
"citusdb.worker_list_file",
"citus.worker_list_file",
gettext_noop("Sets the server's \"worker_list\" configuration file."),
NULL,
&WorkerListFileName,
@ -182,7 +182,7 @@ RegisterCitusConfigVariables(void)
NormalizeWorkerListPath();
DefineCustomBoolVariable(
"citusdb.binary_master_copy_format",
"citus.binary_master_copy_format",
gettext_noop("Use the binary master copy format."),
gettext_noop("When enabled, data is copied from workers to the master "
"in PostgreSQL's binary serialization format."),
@ -193,7 +193,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomBoolVariable(
"citusdb.binary_worker_copy_format",
"citus.binary_worker_copy_format",
gettext_noop("Use the binary worker copy format."),
gettext_noop("When enabled, data is copied from workers to workers "
"in PostgreSQL's binary serialization format when "
@ -205,10 +205,11 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomBoolVariable(
"citusdb.expire_cached_shards",
gettext_noop("Enables shard cache expiration if a shard's size on disk has changed. "),
gettext_noop("When appending to an existing shard, old data may still be cached on "
"other workers. This configuration entry activates automatic "
"citus.expire_cached_shards",
gettext_noop("Enables shard cache expiration if a shard's size on disk has "
"changed."),
gettext_noop("When appending to an existing shard, old data may still be cached "
"on other workers. This configuration entry activates automatic "
"expiration, but should not be used with manual updates to shards."),
&ExpireCachedShards,
false,
@ -217,7 +218,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomBoolVariable(
"citusdb.subquery_pushdown",
"citus.subquery_pushdown",
gettext_noop("Enables supported subquery pushdown to workers."),
NULL,
&SubqueryPushdown,
@ -227,7 +228,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomBoolVariable(
"citusdb.log_multi_join_order",
"citus.log_multi_join_order",
gettext_noop("Logs the distributed join order to the server log."),
gettext_noop("We use this private configuration entry as a debugging aid. "
"If enabled, we print the distributed join order."),
@ -238,7 +239,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomBoolVariable(
"citusdb.explain_multi_logical_plan",
"citus.explain_multi_logical_plan",
gettext_noop("Enables Explain to print out distributed logical plans."),
gettext_noop("We use this private configuration entry as a debugging aid. "
"If enabled, the Explain command prints out the optimized "
@ -250,7 +251,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomBoolVariable(
"citusdb.explain_multi_physical_plan",
"citus.explain_multi_physical_plan",
gettext_noop("Enables Explain to print out distributed physical plans."),
gettext_noop("We use this private configuration entry as a debugging aid. "
"If enabled, the Explain command prints out the physical "
@ -262,7 +263,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomBoolVariable(
"citusdb.all_modifications_commutative",
"citus.all_modifications_commutative",
gettext_noop("Bypasses commutativity checks when enabled"),
NULL,
&AllModificationsCommutative,
@ -272,7 +273,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.shard_replication_factor",
"citus.shard_replication_factor",
gettext_noop("Sets the replication factor for shards."),
gettext_noop("Shards are replicated across nodes according to this "
"replication factor. Note that shards read this "
@ -285,7 +286,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.shard_max_size",
"citus.shard_max_size",
gettext_noop("Sets the maximum size a shard will grow before it gets split."),
gettext_noop("Shards store table and file data. When the source "
"file's size for one shard exceeds this configuration "
@ -300,7 +301,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.max_worker_nodes_tracked",
"citus.max_worker_nodes_tracked",
gettext_noop("Sets the maximum number of worker nodes that are tracked."),
gettext_noop("Worker nodes' network locations, their membership and "
"health status are tracked in a shared hash table on "
@ -314,7 +315,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.remote_task_check_interval",
"citus.remote_task_check_interval",
gettext_noop("Sets the frequency at which we check job statuses."),
gettext_noop("The master node assigns tasks to workers nodes, and "
"then regularly checks with them about each task's "
@ -327,7 +328,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.task_tracker_delay",
"citus.task_tracker_delay",
gettext_noop("Task tracker sleep time between task management rounds."),
gettext_noop("The task tracker process wakes up regularly, walks over "
"all tasks assigned to it, and schedules and executes these "
@ -341,7 +342,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.max_assign_task_batch_size",
"citus.max_assign_task_batch_size",
gettext_noop("Sets the maximum number of tasks to assign per round."),
gettext_noop("The master node synchronously assigns tasks to workers in "
"batches. Bigger batches allow for faster task assignment, "
@ -355,7 +356,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.max_tracked_tasks_per_node",
"citus.max_tracked_tasks_per_node",
gettext_noop("Sets the maximum number of tracked tasks per node."),
gettext_noop("The task tracker processes keeps all assigned tasks in "
"a shared hash table, and schedules and executes these "
@ -369,7 +370,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.max_running_tasks_per_node",
"citus.max_running_tasks_per_node",
gettext_noop("Sets the maximum number of tasks to run concurrently per node."),
gettext_noop("The task tracker process schedules and executes the tasks "
"assigned to it as appropriate. This configuration value "
@ -382,7 +383,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.partition_buffer_size",
"citus.partition_buffer_size",
gettext_noop("Sets the buffer size to use for partition operations."),
gettext_noop("Worker nodes allow for table data to be repartitioned "
"into multiple text files, much like Hadoop's Map "
@ -396,7 +397,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.large_table_shard_count",
"citus.large_table_shard_count",
gettext_noop("The shard count threshold over which a table is considered large."),
gettext_noop("A distributed table is considered to be large if it has "
"more shards than the value specified here. This largeness "
@ -409,7 +410,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomIntVariable(
"citusdb.limit_clause_row_fetch_count",
"citus.limit_clause_row_fetch_count",
gettext_noop("Number of rows to fetch per task for limit clause optimization."),
gettext_noop("Select queries get partitioned and executed as smaller "
"tasks. In some cases, select queries with limit clauses "
@ -424,7 +425,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomRealVariable(
"citusdb.count_distinct_error_rate",
"citus.count_distinct_error_rate",
gettext_noop("Desired error rate when calculating count(distinct) "
"approximates using the postgresql-hll extension. "
"0.0 disables approximations for count(distinct); 1.0 "
@ -437,7 +438,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomEnumVariable(
"citusdb.task_assignment_policy",
"citus.task_assignment_policy",
gettext_noop("Sets the policy to use when assigning tasks to worker nodes."),
gettext_noop("The master node assigns tasks to worker nodes based on shard "
"locations. This configuration value specifies the policy to "
@ -454,7 +455,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomEnumVariable(
"citusdb.task_executor_type",
"citus.task_executor_type",
gettext_noop("Sets the executor type to be used for distributed queries."),
gettext_noop("The master node chooses between three different executor types "
"when executing a distributed query. The router executor is "
@ -472,7 +473,7 @@ RegisterCitusConfigVariables(void)
NULL, NULL, NULL);
DefineCustomEnumVariable(
"citusdb.shard_placement_policy",
"citus.shard_placement_policy",
gettext_noop("Sets the policy to use when choosing nodes for shard placement."),
gettext_noop("The master node chooses which worker nodes to place new shards "
"on. This configuration value specifies the policy to use when "
@ -486,16 +487,14 @@ RegisterCitusConfigVariables(void)
0,
NULL, NULL, NULL);
/* warn about config items in the citusdb namespace that are not registered above */
EmitWarningsOnPlaceholders("citusdb");
/* Also warn about citus namespace, as that's a very likely misspelling */
/* warn about config items in the citus namespace that are not registered above */
EmitWarningsOnPlaceholders("citus");
}
/*
* NormalizeWorkerListPath converts the path configured via
* citusdb.worker_list_file into an absolute path, falling back to the default
* citus.worker_list_file into an absolute path, falling back to the default
* value if necessary. The previous value of the config variable is
* overwritten with the normalized value.
*
@ -515,8 +514,10 @@ NormalizeWorkerListPath(void)
{
absoluteFileName = malloc(strlen(DataDir) + strlen(WORKER_LIST_FILENAME) + 2);
if (absoluteFileName == NULL)
{
ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
sprintf(absoluteFileName, "%s/%s", DataDir, WORKER_LIST_FILENAME);
}
@ -525,11 +526,12 @@ NormalizeWorkerListPath(void)
ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%s does not know where to find the \"worker_list_file\" "
"configuration file.\n"
"This can be specified as \"citusdb.worker_list_file\" in "
"This can be specified as \"citus.worker_list_file\" in "
"\"%s\", or by the -D invocation option, or by the PGDATA "
"environment variable.\n", progname, ConfigFileName)));
}
SetConfigOption("citusdb.worker_list_file", absoluteFileName, PGC_POSTMASTER, PGC_S_OVERRIDE);
SetConfigOption("citus.worker_list_file", absoluteFileName, PGC_POSTMASTER,
PGC_S_OVERRIDE);
free(absoluteFileName);
}

View File

@ -2,7 +2,7 @@
*
* test/src/connection_cache.c
*
* This file contains functions to exercise CitusDB's connection hash
* This file contains functions to exercise Citus's connection hash
* functionality for purposes of unit testing.
*
* Copyright (c) 2014-2015, Citus Data, Inc.
@ -48,7 +48,7 @@ initialize_remote_temp_table(PG_FUNCTION_ARGS)
int32 nodePort = PG_GETARG_INT32(1);
PGresult *result = NULL;
PGconn *connection = GetConnection(nodeName, nodePort);
PGconn *connection = GetOrEstablishConnection(nodeName, nodePort);
if (connection == NULL)
{
PG_RETURN_BOOL(false);
@ -79,7 +79,7 @@ count_remote_temp_table_rows(PG_FUNCTION_ARGS)
Datum count = Int32GetDatum(-1);
PGresult *result = NULL;
PGconn *connection = GetConnection(nodeName, nodePort);
PGconn *connection = GetOrEstablishConnection(nodeName, nodePort);
if (connection == NULL)
{
PG_RETURN_DATUM(count);
@ -114,7 +114,7 @@ get_and_purge_connection(PG_FUNCTION_ARGS)
char *nodeName = PG_GETARG_CSTRING(0);
int32 nodePort = PG_GETARG_INT32(1);
PGconn *connection = GetConnection(nodeName, nodePort);
PGconn *connection = GetOrEstablishConnection(nodeName, nodePort);
if (connection == NULL)
{
PG_RETURN_BOOL(false);
@ -136,7 +136,7 @@ set_connection_status_bad(PG_FUNCTION_ARGS)
char *nodeName = PG_GETARG_CSTRING(0);
int32 nodePort = PG_GETARG_INT32(1);
PGconn *connection = GetConnection(nodeName, nodePort);
PGconn *connection = GetOrEstablishConnection(nodeName, nodePort);
if (connection == NULL)
{
PG_RETURN_BOOL(false);

View File

@ -3,7 +3,7 @@
* test/src/create_shards.c
*
* This file contains functions to exercise shard creation functionality
* within CitusDB.
* within Citus.
*
* Copyright (c) 2014-2015, Citus Data, Inc.
*

View File

@ -3,7 +3,7 @@
* test/src/distribution_metadata.c
*
* This file contains functions to exercise distributed table metadata
* functionality within CitusDB.
* functionality within Citus.
*
* Copyright (c) 2014-2015, Citus Data, Inc.
*

View File

@ -116,9 +116,9 @@ FakeGetForeignPlan(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid,
ForeignPath *best_path, List *tlist, List *scan_clauses)
#else
static ForeignScan *
FakeGetForeignPlan(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid,
ForeignPath *best_path, List *tlist, List *scan_clauses,
Plan *outer_plan)
FakeGetForeignPlan(PlannerInfo * root, RelOptInfo * baserel, Oid foreigntableid,
ForeignPath * best_path, List * tlist, List * scan_clauses,
Plan * outer_plan)
#endif
{
Index scan_relid = baserel->relid;

View File

@ -3,7 +3,7 @@
* test/src/generate_ddl_commands.c
*
* This file contains functions to exercise DDL generation functionality
* within CitusDB.
* within Citus.
*
* Copyright (c) 2014-2015, Citus Data, Inc.
*

View File

@ -3,7 +3,7 @@
* test/src/create_shards.c
*
* This file contains functions to exercise shard creation functionality
* within CitusDB.
* within Citus.
*
* Copyright (c) 2014-2015, Citus Data, Inc.
*

View File

@ -2,7 +2,7 @@
*
* test/src/test_helper_functions.c
*
* This file contains helper functions used in many CitusDB tests.
* This file contains helper functions used in many Citus tests.
*
* Copyright (c) 2014-2015, Citus Data, Inc.
*

View File

@ -16,7 +16,7 @@
/* exports for SQL callable functions */
PG_FUNCTION_INFO_V1(citusdb_extradata_container);
PG_FUNCTION_INFO_V1(citus_extradata_container);
/*
@ -189,7 +189,7 @@ ExtractRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind *rteKind,
if (list_length(fauxFuncExpr->args) != 4)
{
ereport(ERROR, (errmsg("unexpected number of function arguments to "
"citusdb_extradata_container")));
"citus_extradata_container")));
return;
}
@ -265,7 +265,7 @@ GetRangeTblKind(RangeTblEntry *rte)
{
CitusRTEKind rteKind = CITUS_RTE_RELATION /* invalid */;
switch(rte->rtekind)
switch (rte->rtekind)
{
/* directly rtekind if it's not possibly an extended RTE */
case RTE_RELATION:
@ -273,9 +273,13 @@ GetRangeTblKind(RangeTblEntry *rte)
case RTE_JOIN:
case RTE_VALUES:
case RTE_CTE:
{
rteKind = (CitusRTEKind) rte->rtekind;
break;
}
case RTE_FUNCTION:
{
/*
* Extract extra data - correct even if a plain RTE_FUNCTION, not
* an extended one, ExtractRangeTblExtraData handles that case
@ -284,19 +288,20 @@ GetRangeTblKind(RangeTblEntry *rte)
ExtractRangeTblExtraData(rte, &rteKind, NULL, NULL, NULL);
break;
}
}
return rteKind;
}
/*
* citusdb_extradata_container is a placeholder function to store information
* needed by CitusDB in plain postgres node trees. Executor and other hooks
* citus_extradata_container is a placeholder function to store information
* needed by Citus in plain postgres node trees. Executor and other hooks
* should always intercept statements containing calls to this function. It's
* not actually SQL callable by the user because of an INTERNAL argument.
*/
Datum
citusdb_extradata_container(PG_FUNCTION_ARGS)
citus_extradata_container(PG_FUNCTION_ARGS)
{
ereport(ERROR, (errmsg("not supposed to get here, did you cheat?")));

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* citus_outfuncs.c
* Output functions for CitusDB tree nodes.
* Output functions for Citus tree nodes.
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@ -9,7 +9,7 @@
*
* NOTES
* This is a wrapper around postgres' nodeToString() that additionally
* supports CitusDB node types.
* supports Citus node types.
*
* Keep as closely aligned with the upstream version as possible.
*
@ -220,7 +220,7 @@ _outDatum(StringInfo str, Datum value, int typlen, bool typbyval)
/*****************************************************************************
* Output routines for CitusDB node types
* Output routines for Citus node types
*****************************************************************************/
static void

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* citus_readfuncs.c
* CitusDB adapted reader functions for Citus & Postgres tree nodes
* Citus adapted reader functions for Citus & Postgres tree nodes
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* citus_readfuncs.c
* CitusDB adapted reader functions for Citus & Postgres tree nodes
* Citus adapted reader functions for Citus & Postgres tree nodes
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California

View File

@ -102,6 +102,7 @@ pg_get_extensiondef_string(Oid tableRelationId)
static Oid
get_extension_schema(Oid ext_oid)
{
/* *INDENT-OFF* */
Oid result;
Relation rel;
SysScanDesc scandesc;
@ -131,6 +132,7 @@ get_extension_schema(Oid ext_oid)
heap_close(rel, AccessShareLock);
return result;
/* *INDENT-ON* */
}
@ -186,7 +188,7 @@ AppendOptionListToString(StringInfo stringBuffer, List *optionList)
foreach(optionCell, optionList)
{
DefElem *option = (DefElem*) lfirst(optionCell);
DefElem *option = (DefElem *) lfirst(optionCell);
char *optionName = option->defname;
char *optionValue = defGetString(option);
@ -447,22 +449,36 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
switch (attributeForm->attstorage)
{
case 'p':
{
storageName = "PLAIN";
break;
}
case 'e':
{
storageName = "EXTERNAL";
break;
}
case 'm':
{
storageName = "MAIN";
break;
}
case 'x':
{
storageName = "EXTENDED";
break;
}
default:
{
ereport(ERROR, (errmsg("unrecognized storage type: %c",
attributeForm->attstorage)));
break;
}
}
appendStringInfo(&statement, "ALTER COLUMN %s ",
quote_identifier(attributeName));

View File

@ -32,7 +32,7 @@
/*
* NodeConnectionHash is the connection hash itself. It begins uninitialized.
* The first call to GetConnection triggers hash creation.
* The first call to GetOrEstablishConnection triggers hash creation.
*/
static HTAB *NodeConnectionHash = NULL;
@ -44,10 +44,10 @@ static char * ConnectionGetOptionValue(PGconn *connection, char *optionKeyword);
/*
* GetConnection returns a PGconn which can be used to execute queries on a
* remote PostgreSQL server. If no suitable connection to the specified node on
* the specified port yet exists, the function establishes a new connection and
* returns that.
* GetOrEstablishConnection returns a PGconn which can be used to execute
* queries on a remote PostgreSQL server. If no suitable connection to the
* specified node on the specified port yet exists, the function establishes
* a new connection and adds it to the connection cache before returning it.
*
* Returned connections are guaranteed to be in the CONNECTION_OK state. If the
* requested connection cannot be established, or if it was previously created
@ -56,7 +56,7 @@ static char * ConnectionGetOptionValue(PGconn *connection, char *optionKeyword);
* This function throws an error if a hostname over 255 characters is provided.
*/
PGconn *
GetConnection(char *nodeName, int32 nodePort)
GetOrEstablishConnection(char *nodeName, int32 nodePort)
{
PGconn *connection = NULL;
NodeConnectionKey nodeConnectionKey;
@ -249,7 +249,7 @@ CreateNodeConnectionHash(void)
info.hcxt = CacheMemoryContext;
hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
nodeConnectionHash = hash_create("citusdb connection cache", 32, &info, hashFlags);
nodeConnectionHash = hash_create("citus connection cache", 32, &info, hashFlags);
return nodeConnectionHash;
}
@ -257,7 +257,7 @@ CreateNodeConnectionHash(void)
/*
* ConnectToNode opens a connection to a remote PostgreSQL server. The function
* configures the connection's fallback application name to 'citusdb' and sets
* configures the connection's fallback application name to 'citus' and sets
* the remote encoding to match the local one. This function requires that the
* port be specified as a string for easier use with libpq functions.
*
@ -277,7 +277,7 @@ ConnectToNode(char *nodeName, char *nodePort)
"client_encoding", "connect_timeout", "dbname", NULL
};
const char *valueArray[] = {
nodeName, nodePort, "citusdb", clientEncoding,
nodeName, nodePort, "citus", clientEncoding,
CLIENT_CONNECT_TIMEOUT_SECONDS, dbname, NULL
};

View File

@ -77,7 +77,7 @@ IsDistributedTable(Oid relationId)
* yet. As we can't do lookups in nonexistent tables, directly return
* false.
*/
if (!CitusDBHasBeenLoaded())
if (!CitusHasBeenLoaded())
{
return false;
}
@ -87,6 +87,7 @@ IsDistributedTable(Oid relationId)
return cacheEntry->isDistributedTable;
}
/*
* LoadShardInterval reads shard metadata for given shardId from pg_dist_shard,
* and converts min/max values in these metadata to their properly typed datum
@ -139,6 +140,7 @@ LoadShardInterval(uint64 shardId)
return shardInterval;
}
/*
* DistributedTableCacheEntry looks up a pg_dist_partition entry for a
* relation.
@ -155,7 +157,7 @@ DistributedTableCacheEntry(Oid distributedRelationId)
* yet. As we can't do lookups in nonexistent tables, directly return NULL
* here.
*/
if (!CitusDBHasBeenLoaded())
if (!CitusHasBeenLoaded())
{
return NULL;
}
@ -292,7 +294,7 @@ LookupDistTableCacheEntry(Oid relationId)
/*
* CitusDBHasBeenLoaded returns true if the citusdb extension has been created
* CitusHasBeenLoaded returns true if the citus extension has been created
* in the current database and the extension script has been executed. Otherwise,
* it returns false. The result is cached as this is called very frequently.
*
@ -301,17 +303,17 @@ LookupDistTableCacheEntry(Oid relationId)
* acceptable.
*/
bool
CitusDBHasBeenLoaded(void)
CitusHasBeenLoaded(void)
{
static bool extensionLoaded = false;
/* recheck presence until citusdb has been loaded */
/* recheck presence until citus has been loaded */
if (!extensionLoaded)
{
bool extensionPresent = false;
bool extensionScriptExecuted = true;
Oid extensionOid = get_extension_oid("citusdb", true);
Oid extensionOid = get_extension_oid("citus", true);
if (extensionOid != InvalidOid)
{
extensionPresent = true;
@ -319,7 +321,7 @@ CitusDBHasBeenLoaded(void)
if (extensionPresent)
{
/* check if CitusDB extension objects are still being created */
/* check if Citus extension objects are still being created */
if (creating_extension && CurrentExtensionObject == extensionOid)
{
extensionScriptExecuted = false;
@ -428,7 +430,7 @@ CitusExtraDataContainerFuncId(void)
if (cachedOid == InvalidOid)
{
nameList = list_make2(makeString("pg_catalog"),
makeString("citusdb_extradata_container"));
makeString("citus_extradata_container"));
cachedOid = LookupFuncName(nameList, 1, paramOids, false);
}

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* multi_resowner.c
* CitusDB resource owner integration
* Citus resource owner integration
*
* An extension can't directly add members to ResourceOwnerData. Instead we
* have to use the resource owner callback mechanism. Right now it's
@ -22,7 +22,8 @@
#include "distributed/multi_resowner.h"
typedef struct JobDirectoryEntry {
typedef struct JobDirectoryEntry
{
ResourceOwner owner;
uint64 jobId;
} JobDirectoryEntry;
@ -91,15 +92,17 @@ ResourceOwnerEnlargeJobDirectories(ResourceOwner owner)
if (RegisteredJobDirectories == NULL)
{
newMax = 16;
RegisteredJobDirectories = (JobDirectoryEntry *)
MemoryContextAlloc(TopMemoryContext, newMax * sizeof(JobDirectoryEntry));
RegisteredJobDirectories =
(JobDirectoryEntry *) MemoryContextAlloc(TopMemoryContext,
newMax * sizeof(JobDirectoryEntry));
NumAllocatedJobDirectories = newMax;
}
else if (NumRegisteredJobDirectories + 1 > NumAllocatedJobDirectories)
{
newMax = NumAllocatedJobDirectories * 2;
RegisteredJobDirectories = (JobDirectoryEntry *)
repalloc(RegisteredJobDirectories, newMax * sizeof(JobDirectoryEntry));
RegisteredJobDirectories =
(JobDirectoryEntry *) repalloc(RegisteredJobDirectories,
newMax * sizeof(JobDirectoryEntry));
NumAllocatedJobDirectories = newMax;
}
}
@ -135,7 +138,8 @@ ResourceOwnerForgetJobDirectory(ResourceOwner owner, uint64 jobId)
/* move all later entries one up */
while (jobIndex < lastJobIndex)
{
RegisteredJobDirectories[jobIndex] = RegisteredJobDirectories[jobIndex + 1];
RegisteredJobDirectories[jobIndex] =
RegisteredJobDirectories[jobIndex + 1];
jobIndex++;
}
NumRegisteredJobDirectories = lastJobIndex;

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* resource_lock.c
* Locking Infrastructure for CitusDB.
* Locking Infrastructure for Citus.
*
* To avoid introducing a new type of locktag - that then could not be
* displayed by core functionality - we reuse advisory locks. If we'd just
@ -14,9 +14,10 @@
*/
#include "postgres.h"
#include "c.h"
#include "miscadmin.h"
#include "distributed/relay_utility.h"
#include "distributed/resource_lock.h"
#include "storage/lmgr.h"
@ -68,6 +69,8 @@ LockShardResource(uint64 shardId, LOCKMODE lockmode)
const bool sessionLock = false;
const bool dontWait = false;
AssertArg(shardId != INVALID_SHARD_ID);
SET_LOCKTAG_SHARD_RESOURCE(tag, MyDatabaseId, shardId);
(void) LockAcquire(&tag, lockmode, sessionLock, dontWait);

View File

@ -76,10 +76,10 @@ static void TrackerCleanupJobSchemas(void);
static void TrackerCleanupConnections(HTAB *WorkerTasksHash);
static void TrackerRegisterShutDown(HTAB *WorkerTasksHash);
static void TrackerDelayLoop(void);
static List *SchedulableTaskList(HTAB *WorkerTasksHash);
static List * SchedulableTaskList(HTAB *WorkerTasksHash);
static WorkerTask * SchedulableTaskPriorityQueue(HTAB *WorkerTasksHash);
static uint32 CountTasksMatchingCriteria(HTAB *WorkerTasksHash,
bool (*CriteriaFunction) (WorkerTask *));
bool (*CriteriaFunction)(WorkerTask *));
static bool RunningTask(WorkerTask *workerTask);
static bool SchedulableTask(WorkerTask *workerTask);
static int CompareTasksByTime(const void *first, const void *second);
@ -240,7 +240,7 @@ TaskTrackerMain(Datum main_arg)
/*
* Reload worker membership file. For now we do that in the task
* tracker because that's currently the only background worker in
* CitusDB. And only background workers allow us to safely
* Citus. And only background workers allow us to safely
* register a SIGHUP handler.
*/
LoadWorkerNodeList(WorkerListFileName);
@ -295,7 +295,7 @@ WorkerTasksHashEnter(uint64 jobId, uint32 taskId)
{
ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("Try increasing citusdb.max_tracked_tasks_per_node.")));
errhint("Try increasing citus.max_tracked_tasks_per_node.")));
}
/* check that we do not have the same task assigned twice to this node */
@ -494,6 +494,7 @@ TrackerDelayLoop(void)
}
}
/* ------------------------------------------------------------
* Signal handling and shared hash initialization functions follow
* ------------------------------------------------------------
@ -579,8 +580,8 @@ TaskTrackerShmemInit(void)
LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);
/* allocate struct containing task tracker related shared state */
WorkerTasksSharedState = (WorkerTasksSharedStateData *)
ShmemInitStruct("Worker Task Control",
WorkerTasksSharedState =
(WorkerTasksSharedStateData *) ShmemInitStruct("Worker Task Control",
sizeof(WorkerTasksSharedStateData),
&alreadyInitialized);
@ -607,6 +608,7 @@ TaskTrackerShmemInit(void)
}
}
/* ------------------------------------------------------------
* Task scheduling and management functions follow
* ------------------------------------------------------------
@ -719,7 +721,7 @@ SchedulableTaskPriorityQueue(HTAB *WorkerTasksHash)
/* Counts the number of tasks that match the given criteria function. */
static uint32
CountTasksMatchingCriteria(HTAB *WorkerTasksHash,
bool (*CriteriaFunction) (WorkerTask *))
bool (*CriteriaFunction)(WorkerTask *))
{
HASH_SEQ_STATUS status;
WorkerTask *currentTask = NULL;
@ -730,7 +732,7 @@ CountTasksMatchingCriteria(HTAB *WorkerTasksHash,
currentTask = (WorkerTask *) hash_seq_search(&status);
while (currentTask != NULL)
{
bool matchesCriteria = (*CriteriaFunction) (currentTask);
bool matchesCriteria = (*CriteriaFunction)(currentTask);
if (matchesCriteria)
{
taskCount++;

View File

@ -331,7 +331,7 @@ UpdateTask(WorkerTask *workerTask, char *taskCallString)
if (taskStatus == TASK_SUCCEEDED || taskStatus == TASK_CANCEL_REQUESTED ||
taskStatus == TASK_CANCELED)
{
; /* nothing to do */
/* nothing to do */
}
else if (taskStatus == TASK_PERMANENTLY_FAILED)
{

View File

@ -53,11 +53,14 @@ static void ReceiveResourceCleanup(int32 connectionId, const char *filename,
static void DeleteFile(const char *filename);
static void FetchTableCommon(text *tableName, uint64 remoteTableSize,
ArrayType *nodeNameObject, ArrayType *nodePortObject,
bool (*FetchTableFunction) (const char *, uint32, StringInfo));
bool (*FetchTableFunction)(const char *, uint32,
StringInfo));
static uint64 LocalTableSize(Oid relationId);
static uint64 ExtractShardId(StringInfo tableName);
static bool FetchRegularTable(const char *nodeName, uint32 nodePort, StringInfo tableName);
static bool FetchForeignTable(const char *nodeName, uint32 nodePort, StringInfo tableName);
static bool FetchRegularTable(const char *nodeName, uint32 nodePort,
StringInfo tableName);
static bool FetchForeignTable(const char *nodeName, uint32 nodePort,
StringInfo tableName);
static List * TableDDLCommandList(const char *nodeName, uint32 nodePort,
StringInfo tableName);
static StringInfo ForeignFilePath(const char *nodeName, uint32 nodePort,
@ -309,7 +312,7 @@ ReceiveRegularFile(const char *nodeName, uint32 nodePort,
}
else if (copyStatus == CLIENT_COPY_MORE)
{
; /* remote node will continue to send more data */
/* remote node will continue to send more data */
}
else
{
@ -468,7 +471,7 @@ worker_fetch_foreign_file(PG_FUNCTION_ARGS)
static void
FetchTableCommon(text *tableNameText, uint64 remoteTableSize,
ArrayType *nodeNameObject, ArrayType *nodePortObject,
bool (*FetchTableFunction) (const char *, uint32, StringInfo))
bool (*FetchTableFunction)(const char *, uint32, StringInfo))
{
StringInfo tableName = NULL;
char *tableNameCString = NULL;
@ -531,7 +534,7 @@ FetchTableCommon(text *tableNameText, uint64 remoteTableSize,
if (remoteTableSize > localTableSize)
{
/* table is not up to date, drop the table */
ObjectAddress tableObject = {InvalidOid, InvalidOid, 0};
ObjectAddress tableObject = { InvalidOid, InvalidOid, 0 };
tableObject.classId = RelationRelationId;
tableObject.objectId = relationId;
@ -554,7 +557,7 @@ FetchTableCommon(text *tableNameText, uint64 remoteTableSize,
char *nodeName = TextDatumGetCString(nodeNameDatum);
uint32 nodePort = DatumGetUInt32(nodePortDatum);
tableFetched = (*FetchTableFunction) (nodeName, nodePort, tableName);
tableFetched = (*FetchTableFunction)(nodeName, nodePort, tableName);
nodeIndex++;
}
@ -994,11 +997,10 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS)
StringInfo remoteCopyCommand = NULL;
CopyStmt *localCopyCommand = NULL;
RangeVar *localTable = NULL;
uint64 copiedRowCount = 0;
uint64 shardId = INVALID_SHARD_ID;
bool received = false;
char *quotedTableName = NULL;
const char *queryString = NULL;
StringInfo queryString = NULL;
const char *schemaName = NULL;
/* copy remote table's data to this node */
@ -1032,8 +1034,13 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS)
localTable = makeRangeVar((char *) schemaName, shardNameString->data, -1);
localCopyCommand = CopyStatement(localTable, localFilePath->data);
DoCopy(localCopyCommand, queryString, &copiedRowCount);
(void) copiedRowCount;
quotedTableName = quote_qualified_identifier(schemaName, shardNameString->data);
queryString = makeStringInfo();
appendStringInfo(queryString, COPY_IN_COMMAND, quotedTableName, localFilePath->data);
ProcessUtility((Node *) localCopyCommand, queryString->data,
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
/* finally delete the temporary file we created */
DeleteFile(localFilePath->data);

View File

@ -256,8 +256,8 @@ JobSchemaName(uint64 jobId)
*/
#ifdef HAVE_INTTYPES_H
StringInfo jobSchemaName = makeStringInfo();
appendStringInfo(jobSchemaName, "%s%0*"PRIu64,
JOB_SCHEMA_PREFIX, MIN_JOB_DIRNAME_WIDTH, jobId);
appendStringInfo(jobSchemaName, "%s%0*" PRIu64, JOB_SCHEMA_PREFIX,
MIN_JOB_DIRNAME_WIDTH, jobId);
#else
StringInfo jobSchemaName = makeStringInfo();
appendStringInfo(jobSchemaName, "%s%0*llu",

View File

@ -59,7 +59,7 @@ static void FileOutputStreamWrite(FileOutputStream file, StringInfo dataToWrite)
static void FileOutputStreamFlush(FileOutputStream file);
static void FilterAndPartitionTable(const char *filterQuery,
const char *columnName, Oid columnType,
uint32 (*PartitionIdFunction) (Datum, const void *),
uint32 (*PartitionIdFunction)(Datum, const void *),
const void *partitionIdContext,
FileOutputStream *partitionFileArray,
uint32 fileCount);
@ -463,7 +463,7 @@ JobDirectoryName(uint64 jobId)
*/
#ifdef HAVE_INTTYPES_H
StringInfo jobDirectoryName = makeStringInfo();
appendStringInfo(jobDirectoryName, "base/%s/%s%0*"PRIu64,
appendStringInfo(jobDirectoryName, "base/%s/%s%0*" PRIu64,
PG_JOB_CACHE_DIR, JOB_DIRECTORY_PREFIX,
MIN_JOB_DIRNAME_WIDTH, jobId);
#else
@ -726,7 +726,7 @@ FileOutputStreamFlush(FileOutputStream file)
static void
FilterAndPartitionTable(const char *filterQuery,
const char *partitionColumnName, Oid partitionColumnType,
uint32 (*PartitionIdFunction) (Datum, const void *),
uint32 (*PartitionIdFunction)(Datum, const void *),
const void *partitionIdContext,
FileOutputStream *partitionFileArray,
uint32 fileCount)
@ -808,7 +808,7 @@ FilterAndPartitionTable(const char *filterQuery,
*/
if (!partitionKeyNull)
{
partitionId = (*PartitionIdFunction) (partitionKey, partitionIdContext);
partitionId = (*PartitionIdFunction)(partitionKey, partitionIdContext);
}
else
{
@ -946,7 +946,7 @@ InitRowOutputState(void)
}
/* set up transcoding information and default text output characters */
if ( (fileEncoding != databaseEncoding) || (databaseEncodingMaxLength > 1) )
if ((fileEncoding != databaseEncoding) || (databaseEncodingMaxLength > 1))
{
rowOutputState->need_transcoding = true;
}
@ -1057,7 +1057,7 @@ OutputRow(HeapTuple row, TupleDesc rowDescriptor,
CopySendString(rowOutputState, rowOutputState->null_print_client);
}
lastColumn = ((columnIndex+1) == columnCount);
lastColumn = ((columnIndex + 1) == columnCount);
if (!lastColumn)
{
CopySendChar(rowOutputState, rowOutputState->delim[0]);
@ -1094,9 +1094,9 @@ OutputBinaryHeaders(FileOutputStream *partitionFileArray, uint32 fileCount)
{
/* Generate header for a binary copy */
const int32 zero = 0;
FileOutputStream partitionFile = {0, 0, 0};
FileOutputStream partitionFile = { 0, 0, 0 };
PartialCopyStateData headerOutputStateData;
PartialCopyState headerOutputState = (PartialCopyState) &headerOutputStateData;
PartialCopyState headerOutputState = (PartialCopyState) & headerOutputStateData;
memset(headerOutputState, 0, sizeof(PartialCopyStateData));
headerOutputState->fe_msgbuf = makeStringInfo();
@ -1128,9 +1128,9 @@ OutputBinaryFooters(FileOutputStream *partitionFileArray, uint32 fileCount)
{
/* Generate footer for a binary copy */
int16 negative = -1;
FileOutputStream partitionFile = {0, 0, 0};
FileOutputStream partitionFile = { 0, 0, 0 };
PartialCopyStateData footerOutputStateData;
PartialCopyState footerOutputState = (PartialCopyState) &footerOutputStateData;
PartialCopyState footerOutputState = (PartialCopyState) & footerOutputStateData;
memset(footerOutputState, 0, sizeof(PartialCopyStateData));
footerOutputState->fe_msgbuf = makeStringInfo();
@ -1143,6 +1143,7 @@ OutputBinaryFooters(FileOutputStream *partitionFileArray, uint32 fileCount)
}
/* *INDENT-OFF* */
/* Append data to the copy buffer in outputState */
static void
CopySendData(PartialCopyState outputState, const void *databuf, int datasize)
@ -1282,6 +1283,7 @@ CopyAttributeOutText(PartialCopyState cstate, char *string)
}
/* *INDENT-ON* */
/* Helper function to send pending copy output */
static inline void
CopyFlushOutput(PartialCopyState cstate, char *start, char *pointer)

View File

@ -9,12 +9,12 @@
#
#-------------------------------------------------------------------------
citusdb_subdir = src/bin/csql
citusdb_top_builddir = ../../..
citus_subdir = src/bin/csql
citus_top_builddir = ../../..
PROGRAM = csql
PGFILEDESC = "csql - the CitusDB interactive terminal"
PGFILEDESC = "csql - the Citus interactive terminal"
PGAPPICON=win32
OBJS =command.o common.o help.o input.o stringutils.o mainloop.o copy.o \
@ -26,7 +26,7 @@ OBJS =command.o common.o help.o input.o stringutils.o mainloop.o copy.o \
PG_LIBS = $(libpq)
include $(citusdb_top_builddir)/Makefile.global
include $(citus_top_builddir)/Makefile.global
override CPPFLAGS += -I$(libpq_srcdir) -I$(top_srcdir)/src/bin/csql

View File

@ -1,5 +1,5 @@
/*
* csql - the CitusDB interactive terminal
* csql - the Citus interactive terminal
* copy_options.c
* Routines for parsing copy and stage meta commands.
*
@ -16,7 +16,22 @@
#include "stringutils.h"
/* Concatenates "more" onto "var", and frees the original value of *var. */
/* *INDENT-OFF* */
void
free_copy_options(copy_options * ptr)
{
if (!ptr)
return;
free(ptr->before_tofrom);
free(ptr->after_tofrom);
free(ptr->file);
free(ptr->tableName);
free(ptr->columnList);
free(ptr);
}
/* concatenate "more" onto "var", freeing the original value of *var */
static void
xstrcat(char **var, const char *more)
{
@ -210,21 +225,9 @@ error:
return NULL;
}
/* *INDENT-ON* */
/* Frees copy options. */
void
free_copy_options(copy_options * ptr)
{
if (!ptr)
return;
free(ptr->before_tofrom);
free(ptr->after_tofrom);
free(ptr->file);
free(ptr->tableName);
free(ptr->columnList);
free(ptr);
}
/*
* ParseStageOptions takes the given copy options, parses the additional options

View File

@ -1,5 +1,5 @@
/*
* csql - the CitusDB interactive terminal
* csql - the Citus interactive terminal
* copy_options.h
* Shared declarations for parsing copy and stage meta-commands. The stage
* meta-command borrows from copy's syntax, but does not yet support

View File

@ -67,7 +67,7 @@ usage(unsigned short int pager)
output = PageOutput(59, pager ? &(pset.popt.topt) : NULL);
printf(_("csql is the CitusDB interactive terminal.\n\n"));
printf(_("csql is the Citus interactive terminal.\n\n"));
fprintf(output, _("Usage:\n"));
printf(_(" csql [OPTION]... [DBNAME [USERNAME]]\n\n"));

View File

@ -200,7 +200,7 @@ MainLoop(FILE *source)
(line[4] == '\0' || line[4] == ';' || isspace((unsigned char) line[4])))
{
free(line);
puts(_("You are using csql, the command-line interface to CitusDB."));
puts(_("You are using csql, the command-line interface to Citus."));
printf(_("Type: \\copyright for distribution terms\n"
" \\h for help with SQL commands\n"
" \\? for help with csql commands\n"

View File

@ -1,5 +1,5 @@
/*
* csql - the CitusDB interactive terminal
* csql - the Citus interactive terminal
* stage.c
* Helper routines to execute the csql meta-command \stage. These routines
* communicate with the master and worker nodes; and create new shards and
@ -26,7 +26,8 @@
static bool FileSize(char *filename, uint64 *fileSize);
static PGconn * ConnectToWorkerNode(const char *nodeName, uint32 nodePort,
const char *nodeDatabase);
static PGresult * ExecuteRemoteCommand(PGconn *remoteConnection, const char *remoteCommand,
static PGresult * ExecuteRemoteCommand(PGconn *remoteConnection,
const char *remoteCommand,
const char **parameterValues, int parameterCount);
static TableMetadata * InitTableMetadata(const char *tableName);
static ShardMetadata * InitShardMetadata(int shardPlacementPolicy);
@ -41,7 +42,8 @@ static uint64 GetValueUint64(const PGresult *result, int rowNumber, int columnNu
static bool MasterGetTableMetadata(const char *tableName, TableMetadata *tableMetadata);
static bool MasterGetTableDDLEvents(const char *tableName, TableMetadata *tableMetadata);
static bool MasterGetNewShardId(ShardMetadata *shardMetadata);
static bool MasterGetCandidateNodes(ShardMetadata *shardMetadata, int shardPlacementPolicy);
static bool MasterGetCandidateNodes(ShardMetadata *shardMetadata,
int shardPlacementPolicy);
static bool MasterInsertShardRow(uint32 logicalRelid, char storageType,
const ShardMetadata *shardMetadata);
static bool MasterInsertPlacementRows(const ShardMetadata *shardMetadata);
@ -62,7 +64,8 @@ static bool ApplyShardDDLCommand(PGconn *workerNode, uint64 shardId, const char
static bool TransmitTableData(PGconn *workerNode, uint64 shardId,
uint64 shardMaxSize, copy_options *stageOptions,
uint64 currentFileOffset, uint64 *nextFileOffset);
static bool TransmitFile(PGconn *workerNode, const char *localPath, const char *remotePath);
static bool TransmitFile(PGconn *workerNode, const char *localPath,
const char *remotePath);
static bool FileStreamOK(const copy_options *stageOptions);
static PQExpBuffer CreateCopyQueryString(const char *tableName, const char *columnList,
const char *afterToFrom);
@ -341,7 +344,6 @@ DoStageData(const char *stageCommand)
/* update current file offset */
currentFileOffset = nextFileOffset;
} /* while more file data left for sharding */
/*

View File

@ -1,5 +1,5 @@
/*
* csql - the CitusDB interactive terminal
* csql - the Citus interactive terminal
* stage.h
* Declarations for the csql meta-command \stage. These declarations define a
* protocol for the client to communicate to the master and worker nodes.
@ -33,17 +33,19 @@
#define MASTER_GET_TABLE_METADATA "SELECT * FROM master_get_table_metadata($1::text)"
#define MASTER_GET_TABLE_DDL_EVENTS "SELECT * FROM master_get_table_ddl_events($1::text)"
#define MASTER_GET_NEW_SHARDID "SELECT * FROM master_get_new_shardid()"
#define MASTER_GET_LOCAL_FIRST_CANDIDATE_NODES "SELECT * FROM \
master_get_local_first_candidate_nodes()"
#define MASTER_GET_ROUND_ROBIN_CANDIDATE_NODES "SELECT * FROM \
master_get_round_robin_candidate_nodes($1::int8)"
#define MASTER_GET_LOCAL_FIRST_CANDIDATE_NODES \
"SELECT * FROM master_get_local_first_candidate_nodes()"
#define MASTER_GET_ROUND_ROBIN_CANDIDATE_NODES \
"SELECT * FROM master_get_round_robin_candidate_nodes($1::int8)"
#define MASTER_INSERT_SHARD_ROW "INSERT INTO pg_dist_shard \
(logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES \
($1::oid, $2::int8, $3::char, $4::text, $5::text)"
#define MASTER_INSERT_PLACEMENT_ROW "INSERT INTO pg_dist_shard_placement \
(shardid, shardstate, shardlength, nodename, nodeport) VALUES \
($1::int8, $2::int4, $3::int8, $4::text, $5::int4)"
#define MASTER_INSERT_SHARD_ROW \
"INSERT INTO pg_dist_shard " \
"(logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES " \
"($1::oid, $2::int8, $3::char, $4::text, $5::text)"
#define MASTER_INSERT_PLACEMENT_ROW \
"INSERT INTO pg_dist_shard_placement " \
"(shardid, shardstate, shardlength, nodename, nodeport) VALUES " \
"($1::int8, $2::int4, $3::int8, $4::text, $5::int4)"
/* Column names used to identify response fields as returned from the master. */
#define LOGICAL_RELID_FIELD "logical_relid"
@ -61,11 +63,11 @@
#define SHARD_MIN_MAX_COMMAND "SELECT min(%s), max(%s) FROM %s"
#define SHARD_TABLE_SIZE_COMMAND "SELECT pg_table_size('%s')"
#define SET_FOREIGN_TABLE_FILENAME "ALTER FOREIGN TABLE %s OPTIONS (SET filename '%s')"
#define GET_COLUMNAR_TABLE_FILENAME_OPTION "SELECT * FROM \
(SELECT (pg_options_to_table(ftoptions)).* FROM pg_foreign_table \
WHERE ftrelid = %u) AS Q WHERE option_name = 'filename';"
#define APPLY_SHARD_DDL_COMMAND "SELECT * FROM worker_apply_shard_ddl_command \
($1::int8, $2::text)"
#define GET_COLUMNAR_TABLE_FILENAME_OPTION \
"SELECT * FROM (SELECT (pg_options_to_table(ftoptions)).* FROM pg_foreign_table " \
"WHERE ftrelid = %u) AS Q WHERE option_name = 'filename';"
#define APPLY_SHARD_DDL_COMMAND \
"SELECT * FROM worker_apply_shard_ddl_command ($1::int8, $2::text)"
#define REMOTE_FILE_SIZE_COMMAND "SELECT size FROM pg_stat_file('%s')"
#define SHARD_COLUMNAR_TABLE_SIZE_COMMAND "SELECT cstore_table_size('%s')"
@ -100,7 +102,6 @@ typedef struct TableMetadata
char **ddlEventList; /* DDL statements used for creating new shard */
uint32 ddlEventCount; /* DDL statement count; statement list size */
} TableMetadata;
@ -122,7 +123,6 @@ typedef struct ShardMetadata
char *shardMinValue; /* partition key's minimum value in shard */
char *shardMaxValue; /* partition key's maximum value in shard */
uint64 shardSize; /* shard size; updated during staging */
} ShardMetadata;

View File

@ -1,4 +1,4 @@
/stamp-h
/stamp-ext-h
/citusdb_config.h
/citusdb_config.h.in~
/citus_config.h
/citus_config.h.in~

View File

@ -1,9 +1,9 @@
/* src/include/citusdb_config.h.in. Generated from configure.in by autoheader. */
/* src/include/citus_config.h.in. Generated from configure.in by autoheader. */
/*
* citusdb_config.h.in is generated by autoconf/autoheader and
* converted into citusdb_config.h by configure. Include when code needs to
* citus_config.h.in is generated by autoconf/autoheader and
* converted into citus_config.h by configure. Include when code needs to
* depend on determinations made by configure.
*
* Do not manually edit!

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* citus_nodefuncs.h
* Node (de-)serialization support for CitusDB.
* Node (de-)serialization support for Citus.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* citus_nodes.h
* Additional node types, and related infrastructure, for CitusDB.
* Additional node types, and related infrastructure, for Citus.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*
@ -52,7 +52,7 @@ typedef enum CitusNodeTag
#define CitusIsA(nodeptr,_type_) (CitusNodeTag(nodeptr) == T_##_type_)
/* CitusDB variant of newNode(), don't use directly. */
/* Citus variant of newNode(), don't use directly. */
#define CitusNewNode(size, tag) \
({ Node *_result; \
AssertMacro((size) >= sizeof(Node)); /* need the tag, at least */ \
@ -63,7 +63,7 @@ typedef enum CitusNodeTag
/*
* CitusMakeNode is CitusDB variant of makeNode(). Use it to create nodes of
* CitusMakeNode is Citus variant of makeNode(). Use it to create nodes of
* the types listed in the CitusNodeTag enum and plain NodeTag. Initializes
* memory, besides the node tag, to 0.
*/

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* citus_ruleutils.h
* CitusDB ruleutils wrapper functions and exported PostgreSQL ruleutils
* Citus ruleutils wrapper functions and exported PostgreSQL ruleutils
* functions.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
@ -15,17 +15,18 @@
#include "nodes/parsenodes.h"
/* Function declarations for version independent CitusDB ruleutils wrapper functions */
extern char *pg_get_extensiondef_string(Oid tableRelationId);
extern char *pg_get_serverdef_string(Oid tableRelationId);
extern char *pg_get_tableschemadef_string(Oid tableRelationId);
extern char *pg_get_tablecolumnoptionsdef_string(Oid tableRelationId);
extern char *pg_get_indexclusterdef_string(Oid indexRelationId);
/* Function declarations for version independent Citus ruleutils wrapper functions */
extern char * pg_get_extensiondef_string(Oid tableRelationId);
extern char * pg_get_serverdef_string(Oid tableRelationId);
extern char * pg_get_tableschemadef_string(Oid tableRelationId);
extern char * pg_get_tablecolumnoptionsdef_string(Oid tableRelationId);
extern char * pg_get_indexclusterdef_string(Oid indexRelationId);
/* Function declarations for version dependent PostgreSQL ruleutils functions */
extern void pg_get_query_def(Query *query, StringInfo buffer);
extern void deparse_shard_query(Query *query, Oid distrelid, int64 shardid, StringInfo buffer);
extern char *generate_relation_name(Oid relid, List *namespaces);
extern void deparse_shard_query(Query *query, Oid distrelid, int64 shardid, StringInfo
buffer);
extern char * generate_relation_name(Oid relid, List *namespaces);
#endif /* CITUS_RULEUTILS_H */

View File

@ -51,7 +51,7 @@ typedef struct NodeConnectionEntry
/* function declarations for obtaining and using a connection */
extern PGconn * GetConnection(char *nodeName, int32 nodePort);
extern PGconn * GetOrEstablishConnection(char *nodeName, int32 nodePort);
extern void PurgeConnection(PGconn *connection);
extern void ReportRemoteError(PGconn *connection, PGresult *result);

View File

@ -9,8 +9,8 @@
*-------------------------------------------------------------------------
*/
#ifndef CITUSDB_LISTUTILS_H
#define CITUSDB_LISTUTILS_H
#ifndef CITUS_LISTUTILS_H
#define CITUS_LISTUTILS_H
#include "postgres.h"
#include "c.h"
@ -23,4 +23,4 @@ extern List * SortList(List *pointerList,
int (*ComparisonFunction)(const void *, const void *));
#endif /* CITUSDB_LISTUTILS_H */
#endif /* CITUS_LISTUTILS_H */

View File

@ -38,7 +38,6 @@ typedef struct ShardInterval
Datum minValue; /* a shard's typed min value datum */
Datum maxValue; /* a shard's typed max value datum */
uint64 shardId;
} ShardInterval;
@ -52,7 +51,6 @@ typedef struct ShardPlacement
RelayFileState shardState;
char *nodeName;
uint32 nodePort;
} ShardPlacement;

View File

@ -49,13 +49,14 @@
#define SHARDID_SEQUENCE_NAME "pg_dist_shardid_seq"
/* Remote call definitions to help with data staging and deletion */
#define WORKER_APPLY_SHARD_DDL_COMMAND "SELECT worker_apply_shard_ddl_command \
("UINT64_FORMAT", %s)"
#define WORKER_APPEND_TABLE_TO_SHARD "SELECT worker_append_table_to_shard \
(%s, %s, %s, %u)"
#define WORKER_APPLY_SHARD_DDL_COMMAND \
"SELECT worker_apply_shard_ddl_command (" UINT64_FORMAT ", %s)"
#define WORKER_APPEND_TABLE_TO_SHARD \
"SELECT worker_append_table_to_shard (%s, %s, %s, %u)"
#define SHARD_MIN_VALUE_QUERY "SELECT min(%s) FROM %s"
#define SHARD_MAX_VALUE_QUERY "SELECT max(%s) FROM %s"
#define SHARD_TABLE_SIZE_QUERY "SELECT pg_table_size('%s')"
#define SHARD_CSTORE_TABLE_SIZE_QUERY "SELECT cstore_table_size('%s')"
#define DROP_REGULAR_TABLE_COMMAND "DROP TABLE IF EXISTS %s"
#define DROP_FOREIGN_TABLE_COMMAND "DROP FOREIGN TABLE IF EXISTS %s"
#define CREATE_SCHEMA_COMMAND "CREATE SCHEMA IF NOT EXISTS %s"
@ -67,7 +68,6 @@ typedef enum
SHARD_PLACEMENT_INVALID_FIRST = 0,
SHARD_PLACEMENT_LOCAL_NODE_FIRST = 1,
SHARD_PLACEMENT_ROUND_ROBIN = 2
} ShardPlacementPolicyType;

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* metadata_cache.h
* Executor support for CitusDB.
* Executor support for Citus.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*
@ -47,7 +47,7 @@ extern ShardInterval * LoadShardInterval(uint64 shardId);
extern DistTableCacheEntry * DistributedTableCacheEntry(Oid distributedRelationId);
extern void CitusInvalidateRelcacheByRelid(Oid relationId);
extern bool CitusDBHasBeenLoaded(void);
extern bool CitusHasBeenLoaded(void);
/* relation oids */
extern Oid DistPartitionRelationId(void);

View File

@ -24,6 +24,7 @@
#define INVALID_TASK_ID 0
#if (PG_VERSION_NUM >= 90500)
/* reserved alias name for UPSERTs */
#define UPSERT_ALIAS "citus_table_alias"
#endif

View File

@ -29,7 +29,6 @@ typedef enum
CLIENT_CONNECTION_BAD = 1,
CLIENT_CONNECTION_BUSY = 2,
CLIENT_CONNECTION_READY = 3
} ConnectStatus;
@ -40,7 +39,6 @@ typedef enum
CLIENT_RESULT_UNAVAILABLE = 1,
CLIENT_RESULT_BUSY = 2,
CLIENT_RESULT_READY = 3
} ResultStatus;
@ -51,7 +49,6 @@ typedef enum
CLIENT_QUERY_FAILED = 1,
CLIENT_QUERY_DONE = 2,
CLIENT_QUERY_COPY = 3
} QueryStatus;
@ -62,7 +59,6 @@ typedef enum
CLIENT_COPY_MORE = 1,
CLIENT_COPY_FAILED = 2,
CLIENT_COPY_DONE = 3
} CopyStatus;
@ -73,7 +69,6 @@ typedef enum
CLIENT_BATCH_QUERY_FAILED = 1,
CLIENT_BATCH_QUERY_CONTINUE = 2,
CLIENT_BATCH_QUERY_DONE = 3
} BatchQueryStatus;

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* multi_executor.h
* Executor support for CitusDB.
* Executor support for Citus.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*-------------------------------------------------------------------------

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* multi_explain.h
* Explain support for CitusDB.
* Explain support for Citus.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*-------------------------------------------------------------------------

View File

@ -40,7 +40,6 @@ typedef enum JoinRuleType
* RuleNameArray.
*/
JOIN_RULE_LAST
} JoinRuleType;
@ -53,7 +52,6 @@ typedef struct TableEntry
{
Oid relationId;
uint32 rangeTableId;
} TableEntry;
@ -72,7 +70,6 @@ typedef struct JoinOrderNode
char partitionMethod;
List *joinClauseList; /* not relevant for the first table */
List *shardIntervalList;
} JoinOrderNode;

View File

@ -55,7 +55,6 @@ typedef enum
AGGREGATE_SUM = 4,
AGGREGATE_COUNT = 5,
AGGREGATE_ARRAY_AGG = 6
} AggregateType;
@ -69,7 +68,6 @@ typedef enum
PUSH_DOWN_VALID = 1,
PUSH_DOWN_NOT_VALID = 2,
PUSH_DOWN_SPECIAL_CONDITIONS = 3
} PushDownStatus;
@ -82,7 +80,6 @@ typedef enum
PULL_UP_INVALID_FIRST = 0,
PULL_UP_VALID = 1,
PULL_UP_NOT_VALID = 2
} PullUpStatus;
@ -97,8 +94,10 @@ typedef enum
* Please note that the order of elements in this array is tied to the order of
* values in the preceding AggregateType enum. This order needs to be preserved.
*/
static const char * const AggregateNames[] = { "invalid", "avg", "min", "max",
"sum", "count", "array_agg" };
static const char *const AggregateNames[] = {
"invalid", "avg", "min", "max", "sum",
"count", "array_agg"
};
/* Config variable managed via guc.c */

View File

@ -40,8 +40,8 @@ typedef struct MultiNode
CitusNodeTag type;
struct MultiNode *parentNode;
/* child node(s) are defined in unary and binary nodes */
/* child node(s) are defined in unary and binary nodes */
} MultiNode;
@ -51,7 +51,6 @@ typedef struct MultiUnaryNode
MultiNode node;
struct MultiNode *childNode;
} MultiUnaryNode;
@ -62,7 +61,6 @@ typedef struct MultiBinaryNode
struct MultiNode *leftChildNode;
struct MultiNode *rightChildNode;
} MultiBinaryNode;
@ -73,7 +71,6 @@ typedef struct MultiBinaryNode
typedef struct MultiTreeRoot
{
MultiUnaryNode unaryNode;
} MultiTreeRoot;
@ -91,7 +88,6 @@ typedef struct MultiTable
Alias *alias;
Alias *referenceNames;
Query *subquery; /* this field is only valid for non-relation subquery types */
} MultiTable;
@ -100,7 +96,6 @@ typedef struct MultiProject
{
MultiUnaryNode unaryNode;
List *columnList;
} MultiProject;
@ -112,7 +107,6 @@ typedef struct MultiProject
typedef struct MultiCollect
{
MultiUnaryNode unaryNode;
} MultiCollect;
@ -125,7 +119,6 @@ typedef struct MultiSelect
{
MultiUnaryNode unaryNode;
List *selectClauseList;
} MultiSelect;
@ -140,7 +133,6 @@ typedef struct MultiJoin
List *joinClauseList;
JoinRuleType joinRuleType;
JoinType joinType;
} MultiJoin;
@ -150,7 +142,6 @@ typedef struct MultiPartition
MultiUnaryNode unaryNode;
Var *partitionColumn;
uint32 splitPointTableId;
} MultiPartition;
@ -158,7 +149,6 @@ typedef struct MultiPartition
typedef struct MultiCartesianProduct
{
MultiBinaryNode binaryNode;
} MultiCartesianProduct;
@ -183,7 +173,6 @@ typedef struct MultiExtendedOp
List *sortClauseList;
Node *limitCount;
Node *limitOffset;
} MultiExtendedOp;

View File

@ -40,7 +40,8 @@
(" UINT64_FORMAT ", %d, %s, '%s', %d, %d)"
#define MERGE_FILES_INTO_TABLE_COMMAND "SELECT worker_merge_files_into_table \
(" UINT64_FORMAT ", %d, '%s', '%s')"
#define MERGE_FILES_AND_RUN_QUERY_COMMAND "SELECT worker_merge_files_and_run_query(" UINT64_FORMAT ", %d, '%s', '%s')"
#define MERGE_FILES_AND_RUN_QUERY_COMMAND \
"SELECT worker_merge_files_and_run_query(" UINT64_FORMAT ", %d, '%s', '%s')"
typedef enum CitusRTEKind
@ -62,7 +63,6 @@ typedef enum
PARTITION_INVALID_FIRST = 0,
RANGE_PARTITION_TYPE = 1,
HASH_PARTITION_TYPE = 2
} PartitionType;
@ -77,7 +77,6 @@ typedef enum
MAP_OUTPUT_FETCH_TASK = 5,
MERGE_FETCH_TASK = 6,
MODIFY_TASK = 7
} TaskType;
@ -88,7 +87,6 @@ typedef enum
TASK_ASSIGNMENT_GREEDY = 1,
TASK_ASSIGNMENT_ROUND_ROBIN = 2,
TASK_ASSIGNMENT_FIRST_REPLICA = 3
} TaskAssignmentPolicyType;
@ -99,7 +97,6 @@ typedef enum
JOIN_MAP_MERGE_JOB = 1,
SUBQUERY_MAP_MERGE_JOB = 2,
TOP_LEVEL_WORKER_JOB = 3
} BoundaryNodeJobType;
@ -133,7 +130,6 @@ typedef struct MapMergeJob
ShardInterval **sortedShardIntervalArray; /* only applies to range partitioning */
List *mapTaskList;
List *mergeTaskList;
} MapMergeJob;
@ -164,7 +160,6 @@ typedef struct Task
uint64 shardId; /* only applies to shard fetch tasks */
TaskExecution *taskExecution; /* used by task tracker executor */
bool upsertQuery; /* only applies to modify tasks */
} Task;
@ -177,7 +172,6 @@ typedef struct RangeTableFragment
CitusRTEKind fragmentType;
void *fragmentReference;
uint32 rangeTableId;
} RangeTableFragment;
@ -190,7 +184,6 @@ typedef struct JoinSequenceNode
{
uint32 rangeTableId;
int32 joiningRangeTableId;
} JoinSequenceNode;
@ -203,7 +196,6 @@ typedef struct MultiPlan
Job *workerJob;
Query *masterQuery;
char *masterTableName;
} MultiPlan;

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* multi_planner.h
* General CitusDB planner code.
* General Citus planner code.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*-------------------------------------------------------------------------
@ -13,7 +13,7 @@
#include "nodes/plannodes.h"
#include "nodes/relation.h"
extern PlannedStmt *multi_planner(Query *parse, int cursorOptions,
extern PlannedStmt * multi_planner(Query *parse, int cursorOptions,
ParamListInfo boundParams);
extern bool HasCitusToplevelNode(PlannedStmt *planStatement);

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* multi_resowner.h
* CitusDB resource owner integration.
* Citus resource owner integration.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*-------------------------------------------------------------------------

View File

@ -32,9 +32,9 @@
/* Task tracker executor related defines */
#define TASK_ASSIGNMENT_QUERY "SELECT task_tracker_assign_task \
("UINT64_FORMAT", %u, %s)"
#define TASK_STATUS_QUERY "SELECT task_tracker_task_status("UINT64_FORMAT", %u)"
#define JOB_CLEANUP_QUERY "SELECT task_tracker_cleanup_job("UINT64_FORMAT")"
("UINT64_FORMAT ", %u, %s)"
#define TASK_STATUS_QUERY "SELECT task_tracker_task_status("UINT64_FORMAT ", %u)"
#define JOB_CLEANUP_QUERY "SELECT task_tracker_cleanup_job("UINT64_FORMAT ")"
#define JOB_CLEANUP_TASK_ID INT_MAX
@ -60,7 +60,6 @@ typedef enum
EXEC_TASK_TRACKER_FAILED = 14,
EXEC_SOURCE_TASK_TRACKER_RETRY = 15,
EXEC_SOURCE_TASK_TRACKER_FAILED = 16
} TaskExecStatus;
@ -74,7 +73,6 @@ typedef enum
EXEC_TRANSMIT_TRACKER_RETRY = 4,
EXEC_TRANSMIT_TRACKER_FAILED = 5,
EXEC_TRANSMIT_DONE = 6
} TransmitExecStatus;
@ -86,7 +84,6 @@ typedef enum
TRACKER_CONNECT_POLL = 2,
TRACKER_CONNECTED = 3,
TRACKER_CONNECTION_FAILED = 4
} TrackerStatus;
@ -97,7 +94,6 @@ typedef enum
MULTI_EXECUTOR_REAL_TIME = 1,
MULTI_EXECUTOR_TASK_TRACKER = 2,
MULTI_EXECUTOR_ROUTER = 3
} MultiExecutorType;
@ -107,7 +103,6 @@ typedef enum
CONNECT_ACTION_NONE = 0,
CONNECT_ACTION_OPENED = 1,
CONNECT_ACTION_CLOSED = 2
} ConnectAction;
@ -132,7 +127,6 @@ struct TaskExecution
uint32 querySourceNodeIndex; /* only applies to map fetch tasks */
int32 dataFetchTaskIndex;
uint32 failureCount;
};
@ -147,7 +141,6 @@ typedef struct TrackerTaskState
uint32 taskId;
TaskStatus status;
StringInfo taskAssignmentQuery;
} TrackerTaskState;
@ -171,7 +164,6 @@ typedef struct TaskTracker
int32 currentTaskIndex;
bool connectionBusy;
TrackerTaskState *connectionBusyOnTask;
} TaskTracker;
@ -184,7 +176,6 @@ typedef struct WorkerNodeState
uint32 workerPort;
char workerName[WORKER_LENGTH];
uint32 openConnectionCount;
} WorkerNodeState;

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* multi_utility.h
* CitusDB utility hook and related functionality.
* Citus utility hook and related functionality.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*-------------------------------------------------------------------------

View File

@ -36,7 +36,6 @@ typedef enum
FILE_CACHED = 2,
FILE_INACTIVE = 3,
FILE_TO_DELETE = 4
} RelayFileState;

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* resource_lock.h
* Locking Infrastructure for CitusDB.
* Locking Infrastructure for Citus.
*
* Copyright (c) 2012-2015, Citus Data, Inc.
*-------------------------------------------------------------------------
@ -21,7 +21,7 @@
* advisory locks. Only 1 and 2 are used allowing us to define non-conflicting
* lock methods.
*
* In case postgres starts to use additional values, CitusDB's values
* In case postgres starts to use additional values, Citus's values
* will have to be changed. That just requires re-compiling and a restart.
*/
typedef enum AdvisoryLocktagClass
@ -29,7 +29,8 @@ typedef enum AdvisoryLocktagClass
/* values defined in postgres' lockfuncs.c */
ADV_LOCKTAG_CLASS_INT64 = 1,
ADV_LOCKTAG_CLASS_INT32 = 2,
/* CitusDB lock types */
/* Citus lock types */
ADV_LOCKTAG_CLASS_CITUS_SHARD_METADATA = 4,
ADV_LOCKTAG_CLASS_CITUS_SHARD = 5,
ADV_LOCKTAG_CLASS_CITUS_JOB = 6

View File

@ -63,7 +63,6 @@ typedef enum
* TASK_STATUS_LAST, should never have their numbers changed.
*/
TASK_STATUS_LAST
} TaskStatus;
@ -85,7 +84,6 @@ typedef struct WorkerTask
char databaseName[NAMEDATALEN]; /* name to use for local backend connection */
int32 connectionId; /* connection id to local backend */
uint32 failureCount; /* number of task failures */
} WorkerTask;
@ -97,6 +95,7 @@ typedef struct WorkerTasksSharedStateData
{
/* Hash table shared by the task tracker and task tracker protocol functions */
HTAB *taskHash;
/* Lock protecting workerNodesHash */
LWLock *taskHashLock;
} WorkerTasksSharedStateData;

View File

@ -10,8 +10,8 @@
*-------------------------------------------------------------------------
*/
#ifndef CITUSDB_TEST_HELPER_FUNCTIONS_H
#define CITUSDB_TEST_HELPER_FUNCTIONS_H
#ifndef CITUS_TEST_HELPER_FUNCTIONS_H
#define CITUS_TEST_HELPER_FUNCTIONS_H
#include "postgres.h"
#include "c.h"
@ -70,4 +70,4 @@ extern Datum prune_using_both_values(PG_FUNCTION_ARGS);
extern Datum debug_equality_expression(PG_FUNCTION_ARGS);
#endif /* CITUSDB_TEST_HELPER_FUNCTIONS_H */
#endif /* CITUS_TEST_HELPER_FUNCTIONS_H */

View File

@ -23,7 +23,7 @@
/* Maximum length of worker port number (represented as string) */
#define MAX_PORT_LENGTH 10
/* default filename for citusdb.worker_list_file */
/* default filename for citus.worker_list_file */
#define WORKER_LIST_FILENAME "pg_worker_list.conf"
/* Implementation specific definitions used in finding worker nodes */
@ -48,7 +48,6 @@ typedef struct WorkerNode
char workerRack[WORKER_LENGTH]; /* node's network location */
bool inWorkerFile; /* is node in current membership file? */
} WorkerNode;

View File

@ -65,7 +65,6 @@ typedef struct RangePartitionContext
FmgrInfo *comparisonFunction;
Datum *splitPointArray;
int32 splitPointCount;
} RangePartitionContext;
@ -77,7 +76,6 @@ typedef struct HashPartitionContext
{
FmgrInfo *hashFunction;
uint32 partitionCount;
} HashPartitionContext;
@ -114,7 +112,6 @@ typedef struct FileOutputStream
File fileDescriptor;
StringInfo fileBuffer;
StringInfo filePath;
} FileOutputStream;

View File

@ -1,9 +1,9 @@
# Makefile for tests of the CitusDB extension
# Makefile for tests of the Citus extension
citusdb_subdir = src/test/regress
citusdb_top_builddir = ../../..
citus_subdir = src/test/regress
citus_top_builddir = ../../..
include $(citusdb_top_builddir)/Makefile.global
include $(citus_top_builddir)/Makefile.global
# ensure MAJORVERSION is defined (missing in older versions)
ifndef MAJORVERSION
@ -11,11 +11,11 @@ MAJORVERSION := $(basename $(VERSION))
endif
##
## CitusDB regression support
## Citus regression support
##
MULTI_INSTALLDIR=$(CURDIR)/tmp_check/install
pg_regress_multi_check = $(PERL) $(citusdb_abs_srcdir)/pg_regress_multi.pl --pgxsdir="$(pgxsdir)" --bindir="$(bindir)" --libdir="$(libdir)" --majorversion="$(MAJORVERSION)"
MULTI_REGRESS_OPTS = --inputdir=$(citusdb_abs_srcdir) $(pg_regress_locale_flags)
pg_regress_multi_check = $(PERL) $(citus_abs_srcdir)/pg_regress_multi.pl --pgxsdir="$(pgxsdir)" --bindir="$(bindir)" --libdir="$(libdir)" --majorversion="$(MAJORVERSION)"
MULTI_REGRESS_OPTS = --inputdir=$(citus_abs_srcdir) $(pg_regress_locale_flags)
# XXX: Can't actually do useful testruns against install - $libdir
# etc will point to the directory configured during postgres'
@ -26,12 +26,12 @@ cleandir-main:
###
tempinstall-main: cleandir-main
#### mkdir -p $(MULTI_INSTALLDIR)
### $(MAKE) DESTDIR=$(MULTI_INSTALLDIR) -C $(citusdb_top_builddir) install > tmp_check/install.log 2>&1
### $(MAKE) DESTDIR=$(MULTI_INSTALLDIR) -C $(citus_top_builddir) install > tmp_check/install.log 2>&1
# Test input and expected files. These are created by pg_regress itself, so we
# don't have a rule to create them. We do need rules to clean them however.
input_files := $(patsubst $(citusdb_abs_srcdir)/input/%.source,sql/%.sql, $(wildcard $(citusdb_abs_srcdir)/input/*.source))
output_files := $(patsubst $(citusdb_abs_srcdir)/output/%.source,expected/%.out, $(wildcard $(citusdb_abs_srcdir)/output/*.source))
input_files := $(patsubst $(citus_abs_srcdir)/input/%.source,sql/%.sql, $(wildcard $(citus_abs_srcdir)/input/*.source))
output_files := $(patsubst $(citus_abs_srcdir)/output/%.source,expected/%.out, $(wildcard $(citus_abs_srcdir)/output/*.source))
# have make check actually run all tests, but keep check-full as an
# intermediate, for muscle memory backward compatibility.
@ -42,32 +42,32 @@ check-full: check-multi check-multi-task-tracker check-multi-binary check-worker
# using pg_regress_multi_check unnecessarily starts up multiple nodes, which isn't needed
# for check-worker. But that's harmless besides a few cycles.
check-worker: all
$(pg_regress_multi_check) --load-extension=citusdb \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citusdb_abs_srcdir)/worker_schedule $(EXTRA_TESTS)
$(pg_regress_multi_check) --load-extension=citus \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/worker_schedule $(EXTRA_TESTS)
check-multi: all tempinstall-main
$(pg_regress_multi_check) --load-extension=citusdb \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citusdb_abs_srcdir)/multi_schedule $(EXTRA_TESTS)
$(pg_regress_multi_check) --load-extension=citus \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS)
check-multi-fdw: all tempinstall-main
$(pg_regress_multi_check) --load-extension=citusdb --load-extension=file_fdw -- \
$(MULTI_REGRESS_OPTS) --schedule=$(citusdb_abs_srcdir)/multi_fdw_schedule $(EXTRA_TESTS)
$(pg_regress_multi_check) --load-extension=citus --load-extension=file_fdw -- \
$(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_fdw_schedule $(EXTRA_TESTS)
check-multi-hll: all tempinstall-main
$(pg_regress_multi_check) --load-extension=citusdb --load-extension=hll -- \
$(pg_regress_multi_check) --load-extension=citus --load-extension=hll -- \
$(MULTI_REGRESS_OPTS) $(EXTRA_TESTS) multi_create_table multi_master_protocol multi_stage_data multi_agg_approximate_distinct
check-multi-task-tracker: all tempinstall-main
$(pg_regress_multi_check) --load-extension=citusdb \
--server-option=citusdb.task_executor_type=task-tracker \
--server-option=citusdb.task_tracker_delay=50ms \
--server-option=citusdb.large_table_shard_count=1 \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citusdb_abs_srcdir)/multi_schedule $(EXTRA_TESTS)
$(pg_regress_multi_check) --load-extension=citus \
--server-option=citus.task_executor_type=task-tracker \
--server-option=citus.task_tracker_delay=50ms \
--server-option=citus.large_table_shard_count=1 \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS)
check-multi-binary: all tempinstall-main
$(pg_regress_multi_check) --load-extension=citusdb \
--server-option=citusdb.binary_worker_copy_format=on \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citusdb_abs_srcdir)/multi_schedule $(EXTRA_TESTS)
$(pg_regress_multi_check) --load-extension=citus \
--server-option=citus.binary_worker_copy_format=on \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS)
clean distclean maintainer-clean:
rm -f $(output_files) $(input_files)

View File

@ -7,14 +7,14 @@ ERROR: cannot compute aggregate (distinct)
DETAIL: table partitioning is unsuitable for aggregate (distinct)
HINT: You can load the hll extension from contrib packages and enable distinct approximations.
-- Check approximate count(distinct) at different precisions / error rates
SET citusdb.count_distinct_error_rate = 0.1;
SET citus.count_distinct_error_rate = 0.1;
SELECT count(distinct l_orderkey) FROM lineitem;
count
-------
2612
(1 row)
SET citusdb.count_distinct_error_rate = 0.01;
SET citus.count_distinct_error_rate = 0.01;
SELECT count(distinct l_orderkey) FROM lineitem;
count
-------
@ -102,7 +102,7 @@ SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM linei
-- If we have an order by on count(distinct) that we intend to push down to
-- worker nodes, we need to error out. Otherwise, we are fine.
SET citusdb.limit_clause_row_fetch_count = 1000;
SET citus.limit_clause_row_fetch_count = 1000;
SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total
FROM lineitem
GROUP BY l_returnflag
@ -123,7 +123,7 @@ SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as t
(3 rows)
-- Check that we can revert config and disable count(distinct) approximations
SET citusdb.count_distinct_error_rate = 0.0;
SET citus.count_distinct_error_rate = 0.0;
SELECT count(distinct l_orderkey) FROM lineitem;
ERROR: cannot compute aggregate (distinct)
DETAIL: table partitioning is unsuitable for aggregate (distinct)

View File

@ -96,10 +96,10 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_agg(l_orderkey) FROM li
GROUP BY l_quantity ORDER BY l_quantity;
l_quantity | count | avg | array_agg
------------+-------+-----------------------+--------------------------------------------------------------------------------------------------
1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476}
2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476}
3.00 | 14 | 4714.0392857142857143 | {5509,5543,5605,5606,5827,9124,9157,9184,9223,9254,9349,9414,9475,9477}
4.00 | 19 | 5929.7136842105263158 | {5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985,9091,9120,9281,9347,9382,9440,9473}
1.00 | 17 | 1477.1258823529411765 | {8997,9026,9158,9184,9220,9222,9348,9383,9476,5543,5633,5634,5698,5766,5856,5857,5986}
2.00 | 19 | 3078.4242105263157895 | {9030,9058,9123,9124,9188,9344,9441,9476,5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923}
3.00 | 14 | 4714.0392857142857143 | {9124,9157,9184,9223,9254,9349,9414,9475,9477,5509,5543,5605,5606,5827}
4.00 | 19 | 5929.7136842105263158 | {9091,9120,9281,9347,9382,9440,9473,5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985}
(4 rows)
SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month
@ -107,10 +107,10 @@ SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month
AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity;
l_quantity | my_month
------------+------------------------------------------------
1.00 | {9,5,7,5,9,11,11,4,7,7,4,7,4,2,6,3,5}
2.00 | {11,10,8,5,5,12,3,11,7,11,5,7,6,6,10,1,12,6,5}
3.00 | {4,9,8,11,7,10,6,7,8,5,8,9,11,3}
4.00 | {1,5,6,11,12,10,9,6,1,2,5,1,11,6,2,8,2,6,10}
1.00 | {7,7,4,7,4,2,6,3,5,9,5,7,5,9,11,11,4}
2.00 | {7,6,6,10,1,12,6,5,11,10,8,5,5,12,3,11,7,11,5}
3.00 | {10,6,7,8,5,8,9,11,3,4,9,8,11,7}
4.00 | {11,6,2,8,2,6,10,1,5,6,11,12,10,9,6,1,2,5,1}
(4 rows)
SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity < 5
@ -118,10 +118,10 @@ SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity
AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity;
l_quantity | array_agg
------------+---------------------------------------------
1.00 | {11269,11397,11713,11715,11973,18317,18445}
2.00 | {11847,18061,18247,18953}
1.00 | {18317,18445,11269,11397,11713,11715,11973}
2.00 | {18061,18247,18953,11847}
3.00 | {18249,18315,18699,18951,18955}
4.00 | {11653,11659,18241,18765}
4.00 | {18241,18765,11653,11659}
(4 rows)
-- Check that we can execute array_agg() with an expression containing NULL values

View File

@ -2,8 +2,8 @@
-- MULTI_BINARY_MASTER_COPY
--
-- Try binary master copy for different executors
SET citusdb.binary_master_copy_format TO 'on';
SET citusdb.task_executor_type TO 'task-tracker';
SET citus.binary_master_copy_format TO 'on';
SET citus.task_executor_type TO 'task-tracker';
SELECT count(*) FROM lineitem;
count
-------
@ -17,7 +17,7 @@ SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190;
MAIL
(2 rows)
SET citusdb.task_executor_type TO 'real-time';
SET citus.task_executor_type TO 'real-time';
SELECT count(*) FROM lineitem;
count
-------

View File

@ -3,19 +3,19 @@
-- ===================================================================
CREATE FUNCTION initialize_remote_temp_table(cstring, integer)
RETURNS bool
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION count_remote_temp_table_rows(cstring, integer)
RETURNS integer
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION get_and_purge_connection(cstring, integer)
RETURNS bool
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION set_connection_status_bad(cstring, integer)
RETURNS bool
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
-- ===================================================================
-- test connection hash functionality

View File

@ -4,7 +4,7 @@
-- create fake fdw for use in tests
CREATE FUNCTION fake_fdw_handler()
RETURNS fdw_handler
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER fake_fdw HANDLER fake_fdw_handler;
CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw;

View File

@ -9,8 +9,8 @@ CREATE SCHEMA "A$AP Mob"
id bigint PRIMARY KEY,
data text NOT NULL DEFAULT 'lorem ipsum'
);
NOTICE: CitusDB partially supports CREATE SCHEMA for distributed databases
DETAIL: schema usage in joins and in some UDFs provided by CitusDB are not supported yet
NOTICE: Citus partially supports CREATE SCHEMA for distributed databases
DETAIL: schema usage in joins and in some UDFs provided by Citus are not supported yet
\set insert_target '"A$AP Mob"."Dr. Bronner''s ""Magic"" Soaps"'
-- create proxy and save proxy table name
SELECT create_insert_proxy_for_table(:'insert_target') AS proxy_tablename

View File

@ -3,7 +3,7 @@
-- ===================================================================
CREATE FUNCTION sort_names(cstring, cstring, cstring)
RETURNS cstring
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
-- create a custom type...
CREATE TYPE dummy_type AS (
@ -47,7 +47,7 @@ SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'has
ERROR: column "bad_column" of relation "table_to_distribute" does not exist
-- use unrecognized partition type
SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized');
ERROR: invalid input value for enum citusdb.distribution_type: "unrecognized"
ERROR: invalid input value for enum citus.distribution_type: "unrecognized"
LINE 1: ..._distributed_table('table_to_distribute', 'name', 'unrecogni...
^
-- use a partition column of a type lacking any default operator class
@ -209,7 +209,7 @@ SELECT master_create_worker_shards('weird_shard_count', 7, 1);
(1 row)
-- CitusDB ensures all shards are roughly the same size
-- Citus ensures all shards are roughly the same size
SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size
FROM pg_dist_shard
WHERE logicalrelid = 'weird_shard_count'::regclass

View File

@ -109,7 +109,7 @@ SELECT master_create_distributed_table('supplier', 's_suppkey', 'append');
(1 row)
-- now test that CitusDB cannot distribute unique constraints that do not include
-- now test that Citus cannot distribute unique constraints that do not include
-- the partition column
CREATE TABLE primary_key_on_non_part_col
(
@ -127,7 +127,7 @@ CREATE TABLE unique_const_on_non_part_col
SELECT master_create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash');
ERROR: cannot distribute relation: "primary_key_on_non_part_col"
DETAIL: Distributed relations cannot have UNIQUE constraints or PRIMARY KEYs that do not include the partition column.
-- now show that CitusDB can distribute unique constrints that include
-- now show that Citus can distribute unique constrints that include
-- the partition column
CREATE TABLE primary_key_on_part_col
(

View File

@ -121,7 +121,7 @@ CREATE TABLE varchar_hash_partitioned_table
id int,
name varchar
);
SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'id', 'hash');
SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash');
master_create_distributed_table
---------------------------------
@ -139,16 +139,16 @@ INSERT INTO varchar_hash_partitioned_table VALUES (2, 'Ozgun');
INSERT INTO varchar_hash_partitioned_table VALUES (3, 'Onder');
INSERT INTO varchar_hash_partitioned_table VALUES (4, 'Sumedh');
INSERT INTO varchar_hash_partitioned_table VALUES (5, 'Marco');
SELECT * FROM varchar_hash_partitioned_table WHERE name = 'Onder';
SELECT * FROM varchar_hash_partitioned_table WHERE id = 1;
id | name
----+-------
3 | Onder
1 | Jason
(1 row)
UPDATE varchar_hash_partitioned_table SET name = 'Samay' WHERE id = 5;
SELECT * FROM varchar_hash_partitioned_table WHERE name = 'Samay';
UPDATE varchar_hash_partitioned_table SET id = 6 WHERE name = 'Jason';
SELECT * FROM varchar_hash_partitioned_table WHERE id = 6;
id | name
----+-------
5 | Samay
6 | Jason
(1 row)

View File

@ -3,55 +3,55 @@
-- ===================================================================
CREATE FUNCTION load_shard_id_array(regclass)
RETURNS bigint[]
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION load_shard_interval_array(bigint, anyelement)
RETURNS anyarray
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION load_shard_placement_array(bigint, bool)
RETURNS text[]
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION partition_column_id(regclass)
RETURNS smallint
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION partition_type(regclass)
RETURNS "char"
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION is_distributed_table(regclass)
RETURNS boolean
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION column_name_to_column_id(regclass, cstring)
RETURNS smallint
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION create_monolithic_shard_row(regclass)
RETURNS bigint
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION create_healthy_local_shard_placement_row(bigint)
RETURNS void
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION delete_shard_placement_row(bigint, text, bigint)
RETURNS bool
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION update_shard_placement_row_state(bigint, text, bigint, int)
RETURNS bool
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION acquire_shared_shard_lock(bigint)
RETURNS void
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION column_name_to_column(regclass, text)
RETURNS text
AS 'citusdb'
AS 'citus'
LANGUAGE C STRICT;
-- ===================================================================
-- test distribution metadata functionality

Some files were not shown because too many files have changed in this diff Show More