mirror of https://github.com/citusdata/citus.git
Merge branch 'main' into create_alter_database
commit
bb76a9b4b9
|
@ -68,7 +68,7 @@ USER citus
|
|||
|
||||
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
|
||||
FROM base AS pg14
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.9
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.10
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
|
|||
RUN rm .pgenv-staging/config/default.conf
|
||||
|
||||
FROM base AS pg15
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.4
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.5
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
|
|||
RUN rm .pgenv-staging/config/default.conf
|
||||
|
||||
FROM base AS pg16
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.0
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.1
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -210,7 +210,7 @@ COPY --chown=citus:citus .psqlrc .
|
|||
RUN sudo chown --from=root:root citus:citus -R ~
|
||||
|
||||
# sets default pg version
|
||||
RUN pgenv switch 16.0
|
||||
RUN pgenv switch 16.1
|
||||
|
||||
# make connecting to the coordinator easy
|
||||
ENV PGPORT=9700
|
||||
|
|
|
@ -31,11 +31,11 @@ jobs:
|
|||
pgupgrade_image_name: "citus/pgupgradetester"
|
||||
style_checker_image_name: "citus/stylechecker"
|
||||
style_checker_tools_version: "0.8.18"
|
||||
image_suffix: "-v9d71045"
|
||||
pg14_version: '{ "major": "14", "full": "14.9" }'
|
||||
pg15_version: '{ "major": "15", "full": "15.4" }'
|
||||
pg16_version: '{ "major": "16", "full": "16.0" }'
|
||||
upgrade_pg_versions: "14.9-15.4-16.0"
|
||||
image_suffix: "-vbd8441d"
|
||||
pg14_version: '{ "major": "14", "full": "14.10" }'
|
||||
pg15_version: '{ "major": "15", "full": "15.5" }'
|
||||
pg16_version: '{ "major": "16", "full": "16.1" }'
|
||||
upgrade_pg_versions: "14.10-15.5-16.1"
|
||||
steps:
|
||||
# Since GHA jobs needs at least one step we use a noop step here.
|
||||
- name: Set up parameters
|
||||
|
|
|
@ -135,20 +135,10 @@ DEPS = {
|
|||
),
|
||||
"alter_role_propagation": TestDeps("minimal_schedule"),
|
||||
"background_rebalance": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_test_helpers",
|
||||
"multi_cluster_management",
|
||||
],
|
||||
worker_count=3,
|
||||
None, ["multi_test_helpers", "multi_cluster_management"], worker_count=3
|
||||
),
|
||||
"background_rebalance_parallel": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_test_helpers",
|
||||
"multi_cluster_management",
|
||||
],
|
||||
worker_count=6,
|
||||
None, ["multi_test_helpers", "multi_cluster_management"], worker_count=6
|
||||
),
|
||||
"function_propagation": TestDeps("minimal_schedule"),
|
||||
"citus_shards": TestDeps("minimal_schedule"),
|
||||
|
@ -165,30 +155,17 @@ DEPS = {
|
|||
),
|
||||
"schema_based_sharding": TestDeps("minimal_schedule"),
|
||||
"multi_sequence_default": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_test_helpers",
|
||||
"multi_cluster_management",
|
||||
"multi_table_ddl",
|
||||
],
|
||||
None, ["multi_test_helpers", "multi_cluster_management", "multi_table_ddl"]
|
||||
),
|
||||
"grant_on_schema_propagation": TestDeps("minimal_schedule"),
|
||||
"propagate_extension_commands": TestDeps("minimal_schedule"),
|
||||
"multi_size_queries": TestDeps("base_schedule", ["multi_copy"]),
|
||||
"multi_mx_node_metadata": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_extension",
|
||||
"multi_test_helpers",
|
||||
"multi_test_helpers_superuser",
|
||||
],
|
||||
None, ["multi_extension", "multi_test_helpers", "multi_test_helpers_superuser"]
|
||||
),
|
||||
"multi_mx_function_table_reference": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_cluster_management",
|
||||
"remove_coordinator_from_metadata",
|
||||
],
|
||||
["multi_cluster_management", "remove_coordinator_from_metadata"],
|
||||
# because it queries node group id and it changes as we add / remove nodes
|
||||
repeatable=False,
|
||||
),
|
||||
|
@ -201,16 +178,27 @@ DEPS = {
|
|||
],
|
||||
),
|
||||
"metadata_sync_helpers": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_mx_node_metadata",
|
||||
"multi_cluster_management",
|
||||
],
|
||||
None, ["multi_mx_node_metadata", "multi_cluster_management"]
|
||||
),
|
||||
"multi_utilities": TestDeps(
|
||||
"multi_utilities": TestDeps("minimal_schedule", ["multi_data_types"]),
|
||||
"multi_tenant_isolation_nonblocking": TestDeps(
|
||||
"minimal_schedule", ["multi_data_types", "remove_coordinator_from_metadata"]
|
||||
),
|
||||
"remove_non_default_nodes": TestDeps(
|
||||
None, ["multi_mx_node_metadata", "multi_cluster_management"], repeatable=False
|
||||
),
|
||||
"citus_split_shard_columnar_partitioned": TestDeps(
|
||||
"minimal_schedule", ["remove_coordinator_from_metadata"]
|
||||
),
|
||||
"add_coordinator": TestDeps(
|
||||
"minimal_schedule", ["remove_coordinator_from_metadata"], repeatable=False
|
||||
),
|
||||
"multi_multiuser_auth": TestDeps(
|
||||
"minimal_schedule",
|
||||
["multi_data_types"],
|
||||
["multi_create_table", "multi_create_users", "multi_multiuser_load_data"],
|
||||
repeatable=False,
|
||||
),
|
||||
"multi_prepare_plsql": TestDeps("base_schedule"),
|
||||
}
|
||||
|
||||
|
||||
|
@ -303,9 +291,13 @@ def run_schedule_with_multiregress(test_name, schedule, dependencies, args):
|
|||
worker_count = needed_worker_count(test_name, dependencies)
|
||||
|
||||
# find suitable make recipe
|
||||
if dependencies.schedule == "base_isolation_schedule" or "isolation" in test_name:
|
||||
if dependencies.schedule == "base_isolation_schedule" or test_name.startswith(
|
||||
"isolation"
|
||||
):
|
||||
make_recipe = "check-isolation-custom-schedule"
|
||||
elif dependencies.schedule == "failure_base_schedule" or "failure" in test_name:
|
||||
elif dependencies.schedule == "failure_base_schedule" or test_name.startswith(
|
||||
"failure"
|
||||
):
|
||||
make_recipe = "check-failure-custom-schedule"
|
||||
else:
|
||||
make_recipe = "check-custom-schedule"
|
||||
|
@ -418,10 +410,7 @@ def test_dependencies(test_name, test_schedule, schedule_line, args):
|
|||
if "upgrade_columnar_before" not in before_tests:
|
||||
before_tests.append("upgrade_columnar_before")
|
||||
|
||||
return TestDeps(
|
||||
default_base_schedule(test_schedule, args),
|
||||
before_tests,
|
||||
)
|
||||
return TestDeps(default_base_schedule(test_schedule, args), before_tests)
|
||||
|
||||
# before_ tests leave stuff around on purpose for the after tests. So they
|
||||
# are not repeatable by definition.
|
||||
|
|
|
@ -2,13 +2,6 @@
|
|||
-- ADD_COORDINATOR
|
||||
--
|
||||
-- node trying to add itself without specifying groupid => 0 should error out
|
||||
-- first remove the coordinator to for testing master_add_node for coordinator
|
||||
SELECT master_remove_node('localhost', :master_port);
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_node('localhost', :master_port);
|
||||
ERROR: Node cannot add itself as a worker.
|
||||
HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636);
|
||||
|
|
|
@ -135,4 +135,10 @@ NOTICE: drop cascades to 3 other objects
|
|||
DETAIL: drop cascades to table citus_split_shard_by_split_points_negative.range_paritioned_table_to_split
|
||||
drop cascades to table citus_split_shard_by_split_points_negative.table_to_split
|
||||
drop cascades to table citus_split_shard_by_split_points_negative.table_to_split_replication_factor_2
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
wait_for_resource_cleanup
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
--END : Cleanup
|
||||
|
|
|
@ -12,19 +12,9 @@
|
|||
\set bob_worker_1_pw triplex-royalty-warranty-stand-cheek
|
||||
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
|
||||
\set bob_fallback_pw :bob_worker_1_pw
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
||||
worker_1_id
|
||||
---------------------------------------------------------------------
|
||||
17
|
||||
(1 row)
|
||||
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||
\gset
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
||||
worker_2_id
|
||||
---------------------------------------------------------------------
|
||||
35
|
||||
(1 row)
|
||||
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||
\gset
|
||||
-- alice is a superuser so she can update own password
|
||||
CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER;
|
||||
|
|
|
@ -6,19 +6,9 @@
|
|||
-- Test of ability to override host/port for a node
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 20000000;
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
||||
worker_1_id
|
||||
---------------------------------------------------------------------
|
||||
17
|
||||
(1 row)
|
||||
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||
\gset
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
||||
worker_2_id
|
||||
---------------------------------------------------------------------
|
||||
35
|
||||
(1 row)
|
||||
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||
\gset
|
||||
CREATE TABLE lotsa_connections (id integer, name text);
|
||||
SELECT create_distributed_table('lotsa_connections', 'id');
|
||||
|
|
|
@ -1317,11 +1317,11 @@ SELECT type_ddl_plpgsql();
|
|||
(1 row)
|
||||
|
||||
-- find all renamed types to verify the schema name didn't leak, nor a crash happened
|
||||
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup';
|
||||
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1;
|
||||
nspname | typname
|
||||
---------------------------------------------------------------------
|
||||
public | prepare_ddl_type_backup
|
||||
otherschema | prepare_ddl_type_backup
|
||||
public | prepare_ddl_type_backup
|
||||
(2 rows)
|
||||
|
||||
DROP TYPE prepare_ddl_type_backup;
|
||||
|
@ -1332,6 +1332,7 @@ DROP FUNCTION ddl_in_plpgsql();
|
|||
DROP FUNCTION copy_in_plpgsql();
|
||||
DROP TABLE prepare_ddl;
|
||||
DROP TABLE local_ddl;
|
||||
DROP TABLE plpgsql_table;
|
||||
DROP SCHEMA otherschema;
|
||||
-- clean-up functions
|
||||
DROP FUNCTION plpgsql_test_1();
|
||||
|
|
|
@ -1275,8 +1275,9 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0;
|
|||
TRUNCATE TABLE pg_catalog.pg_dist_colocation;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
|
||||
SELECT citus_set_coordinator_host('localhost');
|
||||
citus_set_coordinator_host
|
||||
-- make sure we don't have any replication objects leftover on the nodes
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
wait_for_resource_cleanup
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
-- The default nodes for the citus test suite are coordinator and 2 worker nodes
|
||||
-- Which we identify with master_port, worker_1_port, worker_2_port.
|
||||
-- When needed in some tests, GetLocalNodeId() does not behave correctly,
|
||||
-- So we remove the non default nodes. This tests expects the non default nodes
|
||||
-- to not have any active placements.
|
||||
SELECT any_value(citus_remove_node('localhost', nodeport))
|
||||
FROM pg_dist_node
|
||||
WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port);
|
||||
any_value
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
@ -3,43 +3,6 @@ SET search_path TO worker_split_binary_copy_test;
|
|||
SET citus.shard_count TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 81060000;
|
||||
-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly.
|
||||
SELECT citus_remove_node('localhost', 8887);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_remove_node('localhost', 9995);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_remove_node('localhost', 9992);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_remove_node('localhost', 9998);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_remove_node('localhost', 9997);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_remove_node('localhost', 8888);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- BEGIN: Create distributed table and insert data.
|
||||
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
|
||||
l_orderkey bigint not null,
|
||||
|
|
|
@ -299,6 +299,7 @@ test: multi_foreign_key_relation_graph
|
|||
# Replicating reference tables to coordinator. Add coordinator to pg_dist_node
|
||||
# and rerun some of the tests.
|
||||
# --------
|
||||
test: remove_coordinator_from_metadata
|
||||
test: add_coordinator
|
||||
test: replicate_reference_tables_to_coordinator
|
||||
test: citus_local_tables
|
||||
|
|
|
@ -83,7 +83,8 @@ test: forcedelegation_functions
|
|||
# this should be run alone as it gets too many clients
|
||||
test: join_pushdown
|
||||
test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message
|
||||
test: multi_agg_distinct multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction
|
||||
test: multi_agg_distinct
|
||||
test: multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction
|
||||
test: multi_reference_table multi_select_for_update relation_access_tracking pg13_with_ties
|
||||
test: custom_aggregate_support aggregate_support tdigest_aggregate_support
|
||||
test: multi_average_expression multi_working_columns multi_having_pushdown having_subquery
|
||||
|
|
|
@ -10,6 +10,7 @@ test: foreign_key_to_reference_table
|
|||
# Split tests go here.
|
||||
test: split_shard
|
||||
test: worker_split_copy_test
|
||||
test: remove_non_default_nodes
|
||||
test: worker_split_binary_copy_test
|
||||
test: worker_split_text_copy_test
|
||||
test: citus_split_shard_by_split_points_negative
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
--
|
||||
|
||||
-- node trying to add itself without specifying groupid => 0 should error out
|
||||
-- first remove the coordinator to for testing master_add_node for coordinator
|
||||
SELECT master_remove_node('localhost', :master_port);
|
||||
SELECT master_add_node('localhost', :master_port);
|
||||
|
||||
SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset
|
||||
|
|
|
@ -113,4 +113,5 @@ SELECT citus_split_shard_by_split_points(
|
|||
--BEGIN : Cleanup
|
||||
\c - postgres - :master_port
|
||||
DROP SCHEMA "citus_split_shard_by_split_points_negative" CASCADE;
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
--END : Cleanup
|
||||
|
|
|
@ -16,9 +16,9 @@
|
|||
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
|
||||
\set bob_fallback_pw :bob_worker_1_pw
|
||||
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||
\gset
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||
\gset
|
||||
|
||||
-- alice is a superuser so she can update own password
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 20000000;
|
||||
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||
\gset
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||
\gset
|
||||
|
||||
CREATE TABLE lotsa_connections (id integer, name text);
|
||||
|
|
|
@ -624,7 +624,7 @@ CREATE TYPE prepare_ddl_type AS (x int, y int);
|
|||
SELECT type_ddl_plpgsql();
|
||||
|
||||
-- find all renamed types to verify the schema name didn't leak, nor a crash happened
|
||||
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup';
|
||||
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1;
|
||||
|
||||
DROP TYPE prepare_ddl_type_backup;
|
||||
RESET search_path;
|
||||
|
@ -635,6 +635,7 @@ DROP FUNCTION ddl_in_plpgsql();
|
|||
DROP FUNCTION copy_in_plpgsql();
|
||||
DROP TABLE prepare_ddl;
|
||||
DROP TABLE local_ddl;
|
||||
DROP TABLE plpgsql_table;
|
||||
DROP SCHEMA otherschema;
|
||||
|
||||
-- clean-up functions
|
||||
|
|
|
@ -608,5 +608,5 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
|
|||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
|
||||
|
||||
SELECT citus_set_coordinator_host('localhost');
|
||||
|
||||
-- make sure we don't have any replication objects leftover on the nodes
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
-- The default nodes for the citus test suite are coordinator and 2 worker nodes
|
||||
-- Which we identify with master_port, worker_1_port, worker_2_port.
|
||||
-- When needed in some tests, GetLocalNodeId() does not behave correctly,
|
||||
-- So we remove the non default nodes. This tests expects the non default nodes
|
||||
-- to not have any active placements.
|
||||
SELECT any_value(citus_remove_node('localhost', nodeport))
|
||||
FROM pg_dist_node
|
||||
WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port);
|
|
@ -4,14 +4,6 @@ SET citus.shard_count TO 1;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 81060000;
|
||||
|
||||
-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly.
|
||||
SELECT citus_remove_node('localhost', 8887);
|
||||
SELECT citus_remove_node('localhost', 9995);
|
||||
SELECT citus_remove_node('localhost', 9992);
|
||||
SELECT citus_remove_node('localhost', 9998);
|
||||
SELECT citus_remove_node('localhost', 9997);
|
||||
SELECT citus_remove_node('localhost', 8888);
|
||||
|
||||
-- BEGIN: Create distributed table and insert data.
|
||||
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
|
||||
l_orderkey bigint not null,
|
||||
|
|
Loading…
Reference in New Issue