From 71d99b72ce065d192c83b518673187bfaa805eba Mon Sep 17 00:00:00 2001 From: Eren Basak Date: Fri, 14 Oct 2016 16:57:38 +0300 Subject: [PATCH 1/3] Add support for proper valgrind tests This change allows valgrind tests (`make check-multi-vg`) to be run seamlessly without test output errors and timeout problems. --- src/test/regress/Makefile | 2 +- src/test/regress/input/multi_load_data.source | 2 ++ .../regress/input/multi_load_more_data.source | 2 ++ .../regress/output/multi_load_data.source | 3 ++ .../output/multi_load_more_data.source | 3 ++ src/test/regress/pg_regress_multi.pl | 36 ++++++++++++------- 6 files changed, 34 insertions(+), 14 deletions(-) diff --git a/src/test/regress/Makefile b/src/test/regress/Makefile index 701afa168..e340e2ddc 100644 --- a/src/test/regress/Makefile +++ b/src/test/regress/Makefile @@ -51,6 +51,7 @@ check-multi: all tempinstall-main check-multi-vg: all tempinstall-main $(pg_regress_multi_check) --load-extension=citus --valgrind \ + --pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS) check-isolation: all tempinstall-main @@ -70,7 +71,6 @@ check-multi-task-tracker-extra: all tempinstall-main --server-option=citus.large_table_shard_count=1 \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_task_tracker_extra_schedule $(EXTRA_TESTS) - check-multi-binary: all tempinstall-main $(pg_regress_multi_check) --load-extension=citus \ --server-option=citus.binary_worker_copy_format=on \ diff --git a/src/test/regress/input/multi_load_data.source b/src/test/regress/input/multi_load_data.source index a48c51e36..a4f6d064c 100644 --- a/src/test/regress/input/multi_load_data.source +++ b/src/test/regress/input/multi_load_data.source @@ -23,3 +23,5 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; \copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' + +VACUUM ANALYZE; diff --git a/src/test/regress/input/multi_load_more_data.source b/src/test/regress/input/multi_load_more_data.source index 1b7f1f117..10070b089 100644 --- a/src/test/regress/input/multi_load_more_data.source +++ b/src/test/regress/input/multi_load_more_data.source @@ -13,3 +13,5 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' + +VACUUM ANALYZE; diff --git a/src/test/regress/output/multi_load_data.source b/src/test/regress/output/multi_load_data.source index a54306e69..b78dc2fd8 100644 --- a/src/test/regress/output/multi_load_data.source +++ b/src/test/regress/output/multi_load_data.source @@ -19,3 +19,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; \copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' +VACUUM ANALYZE; +WARNING: not propagating VACUUM command to worker nodes +HINT: Provide a specific table in order to VACUUM distributed tables. diff --git a/src/test/regress/output/multi_load_more_data.source b/src/test/regress/output/multi_load_more_data.source index b41f68dc8..bef2e29f9 100644 --- a/src/test/regress/output/multi_load_more_data.source +++ b/src/test/regress/output/multi_load_more_data.source @@ -8,3 +8,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' +VACUUM ANALYZE; +WARNING: not propagating VACUUM command to worker nodes +HINT: Provide a specific table in order to VACUUM distributed tables. diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 4c10cc896..1454d9086 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -26,16 +26,19 @@ sub Usage() print " pg_regress_multi [MULTI OPTIONS] -- [PG REGRESS OPTS]\n"; print "\n"; print "Multi Options:\n"; - print " --isolationtester Run isolationtester tests instead of plain tests\n"; - print " --vanillatest Run postgres tests with citus loaded as shared preload library\n"; - print " --bindir Path to postgres binary directory\n"; - print " --libdir Path to postgres library directory\n"; - print " --postgres-builddir Path to postgres build directory\n"; - print " --postgres-srcdir Path to postgres build directory\n"; - print " --pgxsdir Path to the PGXS directory\n"; - print " --load-extension Extensions to install in all nodes\n"; - print " --server-option Config option to pass to the server\n"; - print " --valgrind Run server via valgrind\n"; + print " --isolationtester Run isolationtester tests instead of plain tests\n"; + print " --vanillatest Run postgres tests with citus loaded as shared preload library\n"; + print " --bindir Path to postgres binary directory\n"; + print " --libdir Path to postgres library directory\n"; + print " --postgres-builddir Path to postgres build directory\n"; + print " --postgres-srcdir Path to postgres build directory\n"; + print " --pgxsdir Path to the PGXS directory\n"; + print " --load-extension Extensions to install in all nodes\n"; + print " --server-option Config option to pass to the server\n"; + print " --valgrind Run server via valgrind\n"; + print " --valgrind-path Path to the valgrind executable\n"; + print " --pg_ctl-timeout Timeout for pg_ctl\n"; + print " --connection-timeout Timeout for connecting to worker nodes\n"; exit 1; } @@ -56,6 +59,9 @@ my %fdwServers = (); my %functions = (); my %operators = (); my $valgrind = 0; +my $valgrind_path = "valgrind"; +my $pg_ctl_timeout = undef; +my $connection_timeout = 5000; my $serversAreShutdown = "TRUE"; @@ -71,6 +77,9 @@ GetOptions( 'load-extension=s' => \@extensions, 'server-option=s' => \@userPgOptions, 'valgrind' => \$valgrind, + 'valgrind-path=s' => \$valgrind_path, + 'pg_ctl-timeout=s' => \$pg_ctl_timeout, + 'connection-timeout=s' => \$connection_timeout, 'help' => sub { Usage() }); # Update environment to include [DY]LD_LIBRARY_PATH/LIBDIR/etc - @@ -136,9 +145,9 @@ MESSAGE } # valgrind starts slow, need to increase timeout -if ($valgrind) +if (defined $pg_ctl_timeout) { - $ENV{PGCTLTIMEOUT} = '360'; + $ENV{PGCTLTIMEOUT} = "$pg_ctl_timeout"; } # We don't want valgrind to run pg_ctl itself, as that'd trigger a lot @@ -163,7 +172,7 @@ sub replace_postgres or die "Could not create postgres wrapper at $bindir/postgres"; print $fh <<"END"; #!/bin/bash -exec valgrind \\ +exec $valgrind_path \\ --quiet \\ --suppressions=${postgresSrcdir}/src/tools/valgrind.supp \\ --trace-children=yes --track-origins=yes --read-var-info=yes \\ @@ -220,6 +229,7 @@ push(@pgOptions, '-c', "citus.expire_cached_shards=on"); push(@pgOptions, '-c', "citus.task_tracker_delay=10ms"); push(@pgOptions, '-c', "citus.remote_task_check_interval=1ms"); push(@pgOptions, '-c', "citus.shard_replication_factor=2"); +push(@pgOptions, '-c', "citus.node_connection_timeout=${connection_timeout}"); # Add externally added options last, so they overwrite the default ones above for my $option (@userPgOptions) From 9312ef8bcf65ec8e3dd2dd3e192a61518ff5e294 Mon Sep 17 00:00:00 2001 From: Burak Yucesoy Date: Wed, 12 Apr 2017 11:47:13 +0300 Subject: [PATCH 2/3] Stabilize test outputs --- src/test/regress/expected/multi_explain.out | 46 ++++++++----------- src/test/regress/expected/multi_explain_0.out | 46 ++++++++----------- src/test/regress/input/multi_load_data.source | 2 - .../regress/input/multi_load_more_data.source | 2 - .../input/multi_outer_join_reference.source | 3 +- .../regress/output/multi_load_data.source | 3 -- .../output/multi_load_more_data.source | 3 -- .../output/multi_outer_join_reference.source | 23 +++++----- src/test/regress/sql/multi_explain.sql | 4 ++ 9 files changed, 58 insertions(+), 74 deletions(-) diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index 5fa8ea4a3..e3ae6bfd3 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -34,6 +34,9 @@ BEGIN RETURN result; END; $BODY$ LANGUAGE plpgsql; +-- VACUMM related tables to ensure test outputs are stable +VACUUM ANALYZE lineitem; +VACUUM ANALYZE orders; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem @@ -287,12 +290,13 @@ Limit -> Limit -> Sort Sort Key: lineitem.l_quantity - -> Hash Join - Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) - -> Seq Scan on lineitem_290001 lineitem - Filter: (l_quantity < 5.0) - -> Hash - -> Seq Scan on orders_290008 orders + -> Merge Join + Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) + -> Index Scan using orders_pkey_290008 on orders_290008 orders + -> Sort + Sort Key: lineitem.l_orderkey + -> Seq Scan on lineitem_290001 lineitem + Filter: (l_quantity < 5.0) -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem VALUES(1,0); @@ -314,11 +318,9 @@ Custom Scan (Citus Router) -> Task Node: host=localhost port=57638 dbname=regression -> Update on lineitem_290000 - -> Bitmap Heap Scan on lineitem_290000 - Recheck Cond: (l_orderkey = 1) + -> Index Scan using lineitem_pkey_290000 on lineitem_290000 + Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) - -> Bitmap Index Scan on lineitem_pkey_290000 - Index Cond: (l_orderkey = 1) -- Test delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem @@ -329,11 +331,9 @@ Custom Scan (Citus Router) -> Task Node: host=localhost port=57638 dbname=regression -> Delete on lineitem_290000 - -> Bitmap Heap Scan on lineitem_290000 - Recheck Cond: (l_orderkey = 1) + -> Index Scan using lineitem_pkey_290000 on lineitem_290000 + Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) - -> Bitmap Index Scan on lineitem_pkey_290000 - Index Cond: (l_orderkey = 1) -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; @@ -342,10 +342,8 @@ Custom Scan (Citus Router) Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression - -> Bitmap Heap Scan on lineitem_290000 lineitem - Recheck Cond: (l_orderkey = 5) - -> Bitmap Index Scan on lineitem_pkey_290000 - Index Cond: (l_orderkey = 5) + -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem + Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); t @@ -646,10 +644,8 @@ Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0) Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression - -> Bitmap Heap Scan on lineitem_290000 lineitem (cost=4.30..13.44 rows=3 width=18) - Recheck Cond: (l_orderkey = 5) - -> Bitmap Index Scan on lineitem_pkey_290000 (cost=0.00..4.30 rows=3 width=0) - Index Cond: (l_orderkey = 5) + -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5) + Index Cond: (l_orderkey = 5) PREPARE real_time_executor_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query; @@ -671,7 +667,5 @@ Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0) Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression - -> Bitmap Heap Scan on lineitem_290000 lineitem (cost=4.30..13.44 rows=3 width=18) - Recheck Cond: (l_orderkey = 5) - -> Bitmap Index Scan on lineitem_pkey_290000 (cost=0.00..4.30 rows=3 width=0) - Index Cond: (l_orderkey = 5) + -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5) + Index Cond: (l_orderkey = 5) diff --git a/src/test/regress/expected/multi_explain_0.out b/src/test/regress/expected/multi_explain_0.out index 69eb07d04..c7f7166a3 100644 --- a/src/test/regress/expected/multi_explain_0.out +++ b/src/test/regress/expected/multi_explain_0.out @@ -34,6 +34,9 @@ BEGIN RETURN result; END; $BODY$ LANGUAGE plpgsql; +-- VACUMM related tables to ensure test outputs are stable +VACUUM ANALYZE lineitem; +VACUUM ANALYZE orders; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem @@ -266,12 +269,13 @@ Limit -> Limit -> Sort Sort Key: lineitem.l_quantity - -> Hash Join - Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) - -> Seq Scan on lineitem_290001 lineitem - Filter: (l_quantity < 5.0) - -> Hash - -> Seq Scan on orders_290008 orders + -> Merge Join + Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) + -> Index Scan using orders_pkey_290008 on orders_290008 orders + -> Sort + Sort Key: lineitem.l_orderkey + -> Seq Scan on lineitem_290001 lineitem + Filter: (l_quantity < 5.0) -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem VALUES(1,0); @@ -293,11 +297,9 @@ Custom Scan (Citus Router) -> Task Node: host=localhost port=57638 dbname=regression -> Update on lineitem_290000 - -> Bitmap Heap Scan on lineitem_290000 - Recheck Cond: (l_orderkey = 1) + -> Index Scan using lineitem_pkey_290000 on lineitem_290000 + Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) - -> Bitmap Index Scan on lineitem_pkey_290000 - Index Cond: (l_orderkey = 1) -- Test delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem @@ -308,11 +310,9 @@ Custom Scan (Citus Router) -> Task Node: host=localhost port=57638 dbname=regression -> Delete on lineitem_290000 - -> Bitmap Heap Scan on lineitem_290000 - Recheck Cond: (l_orderkey = 1) + -> Index Scan using lineitem_pkey_290000 on lineitem_290000 + Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) - -> Bitmap Index Scan on lineitem_pkey_290000 - Index Cond: (l_orderkey = 1) -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; @@ -321,10 +321,8 @@ Custom Scan (Citus Router) Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression - -> Bitmap Heap Scan on lineitem_290000 lineitem - Recheck Cond: (l_orderkey = 5) - -> Bitmap Index Scan on lineitem_pkey_290000 - Index Cond: (l_orderkey = 5) + -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem + Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); t @@ -617,10 +615,8 @@ Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0) Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression - -> Bitmap Heap Scan on lineitem_290000 lineitem (cost=4.30..13.44 rows=3 width=18) - Recheck Cond: (l_orderkey = 5) - -> Bitmap Index Scan on lineitem_pkey_290000 (cost=0.00..4.30 rows=3 width=0) - Index Cond: (l_orderkey = 5) + -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5) + Index Cond: (l_orderkey = 5) PREPARE real_time_executor_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query; @@ -642,7 +638,5 @@ Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0) Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression - -> Bitmap Heap Scan on lineitem_290000 lineitem (cost=4.30..13.44 rows=3 width=18) - Recheck Cond: (l_orderkey = 5) - -> Bitmap Index Scan on lineitem_pkey_290000 (cost=0.00..4.30 rows=3 width=0) - Index Cond: (l_orderkey = 5) + -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5) + Index Cond: (l_orderkey = 5) diff --git a/src/test/regress/input/multi_load_data.source b/src/test/regress/input/multi_load_data.source index a4f6d064c..a48c51e36 100644 --- a/src/test/regress/input/multi_load_data.source +++ b/src/test/regress/input/multi_load_data.source @@ -23,5 +23,3 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; \copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' - -VACUUM ANALYZE; diff --git a/src/test/regress/input/multi_load_more_data.source b/src/test/regress/input/multi_load_more_data.source index 10070b089..1b7f1f117 100644 --- a/src/test/regress/input/multi_load_more_data.source +++ b/src/test/regress/input/multi_load_more_data.source @@ -13,5 +13,3 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' - -VACUUM ANALYZE; diff --git a/src/test/regress/input/multi_outer_join_reference.source b/src/test/regress/input/multi_outer_join_reference.source index 08261ee34..f1e5946c4 100644 --- a/src/test/regress/input/multi_outer_join_reference.source +++ b/src/test/regress/input/multi_outer_join_reference.source @@ -447,7 +447,8 @@ SELECT t_custkey, r_custkey FROM multi_outer_join_right_reference FULL JOIN - multi_outer_join_third_reference ON (t_custkey = r_custkey); + multi_outer_join_third_reference ON (t_custkey = r_custkey) +ORDER BY 1; -- DROP unused tables to clean up workspace DROP TABLE multi_outer_join_left_hash; diff --git a/src/test/regress/output/multi_load_data.source b/src/test/regress/output/multi_load_data.source index b78dc2fd8..a54306e69 100644 --- a/src/test/regress/output/multi_load_data.source +++ b/src/test/regress/output/multi_load_data.source @@ -19,6 +19,3 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; \copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -VACUUM ANALYZE; -WARNING: not propagating VACUUM command to worker nodes -HINT: Provide a specific table in order to VACUUM distributed tables. diff --git a/src/test/regress/output/multi_load_more_data.source b/src/test/regress/output/multi_load_more_data.source index bef2e29f9..b41f68dc8 100644 --- a/src/test/regress/output/multi_load_more_data.source +++ b/src/test/regress/output/multi_load_more_data.source @@ -8,6 +8,3 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' -VACUUM ANALYZE; -WARNING: not propagating VACUUM command to worker nodes -HINT: Provide a specific table in order to VACUUM distributed tables. diff --git a/src/test/regress/output/multi_outer_join_reference.source b/src/test/regress/output/multi_outer_join_reference.source index ecaf1ae01..82d1b88ed 100644 --- a/src/test/regress/output/multi_outer_join_reference.source +++ b/src/test/regress/output/multi_outer_join_reference.source @@ -798,9 +798,20 @@ SELECT t_custkey, r_custkey FROM multi_outer_join_right_reference FULL JOIN - multi_outer_join_third_reference ON (t_custkey = r_custkey); + multi_outer_join_third_reference ON (t_custkey = r_custkey) +ORDER BY 1; t_custkey | r_custkey -----------+----------- + 1 | + 2 | + 3 | + 4 | + 5 | + 6 | + 7 | + 8 | + 9 | + 10 | 11 | 11 12 | 12 13 | 13 @@ -821,16 +832,6 @@ FROM 28 | 28 29 | 29 30 | 30 - 10 | - 2 | - 5 | - 8 | - 6 | - 4 | - 1 | - 3 | - 9 | - 7 | (30 rows) -- DROP unused tables to clean up workspace diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql index 068dc45db..1d6e94db2 100644 --- a/src/test/regress/sql/multi_explain.sql +++ b/src/test/regress/sql/multi_explain.sql @@ -36,6 +36,10 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; +-- VACUMM related tables to ensure test outputs are stable +VACUUM ANALYZE lineitem; +VACUUM ANALYZE orders; + -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem From a35d0cd8aff82f486c0477aac9d9f943207869ed Mon Sep 17 00:00:00 2001 From: Burak Yucesoy Date: Wed, 12 Apr 2017 12:49:19 +0300 Subject: [PATCH 3/3] Configure valgrind command line arguments --- src/test/regress/Makefile | 2 +- src/test/regress/pg_regress_multi.pl | 31 ++++++++++++++++------------ 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/src/test/regress/Makefile b/src/test/regress/Makefile index e340e2ddc..3dded6576 100644 --- a/src/test/regress/Makefile +++ b/src/test/regress/Makefile @@ -51,7 +51,7 @@ check-multi: all tempinstall-main check-multi-vg: all tempinstall-main $(pg_regress_multi_check) --load-extension=citus --valgrind \ - --pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind \ + --pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind --valgrind-log-file=$(VALGRIND_LOG_FILE) \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS) check-isolation: all tempinstall-main diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 1454d9086..1d6c5d005 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -37,6 +37,7 @@ sub Usage() print " --server-option Config option to pass to the server\n"; print " --valgrind Run server via valgrind\n"; print " --valgrind-path Path to the valgrind executable\n"; + print " --valgrind-log-file Path to the write valgrind logs\n"; print " --pg_ctl-timeout Timeout for pg_ctl\n"; print " --connection-timeout Timeout for connecting to worker nodes\n"; exit 1; @@ -59,9 +60,10 @@ my %fdwServers = (); my %functions = (); my %operators = (); my $valgrind = 0; -my $valgrind_path = "valgrind"; -my $pg_ctl_timeout = undef; -my $connection_timeout = 5000; +my $valgrindPath = "valgrind"; +my $valgrindLogFile = "valgrind_test_log.txt"; +my $pgCtlTimeout = undef; +my $connectionTimeout = 5000; my $serversAreShutdown = "TRUE"; @@ -77,9 +79,10 @@ GetOptions( 'load-extension=s' => \@extensions, 'server-option=s' => \@userPgOptions, 'valgrind' => \$valgrind, - 'valgrind-path=s' => \$valgrind_path, - 'pg_ctl-timeout=s' => \$pg_ctl_timeout, - 'connection-timeout=s' => \$connection_timeout, + 'valgrind-path=s' => \$valgrindPath, + 'valgrind-log-file=s' => \$valgrindLogFile, + 'pg_ctl-timeout=s' => \$pgCtlTimeout, + 'connection-timeout=s' => \$connectionTimeout, 'help' => sub { Usage() }); # Update environment to include [DY]LD_LIBRARY_PATH/LIBDIR/etc - @@ -144,10 +147,12 @@ are present. MESSAGE } -# valgrind starts slow, need to increase timeout -if (defined $pg_ctl_timeout) +# If pgCtlTimeout is defined, we will set related environment variable. +# This is generally used with valgrind because valgrind starts slow and we +# need to increase timeout. +if (defined $pgCtlTimeout) { - $ENV{PGCTLTIMEOUT} = "$pg_ctl_timeout"; + $ENV{PGCTLTIMEOUT} = "$pgCtlTimeout"; } # We don't want valgrind to run pg_ctl itself, as that'd trigger a lot @@ -172,13 +177,13 @@ sub replace_postgres or die "Could not create postgres wrapper at $bindir/postgres"; print $fh <<"END"; #!/bin/bash -exec $valgrind_path \\ +exec $valgrindPath \\ --quiet \\ --suppressions=${postgresSrcdir}/src/tools/valgrind.supp \\ - --trace-children=yes --track-origins=yes --read-var-info=yes \\ + --trace-children=yes --track-origins=yes --read-var-info=no \\ --leak-check=no \\ - --error-exitcode=128 \\ --error-markers=VALGRINDERROR-BEGIN,VALGRINDERROR-END \\ + --log-file=$valgrindLogFile \\ $bindir/postgres.orig \\ "\$@" END @@ -229,7 +234,7 @@ push(@pgOptions, '-c', "citus.expire_cached_shards=on"); push(@pgOptions, '-c', "citus.task_tracker_delay=10ms"); push(@pgOptions, '-c', "citus.remote_task_check_interval=1ms"); push(@pgOptions, '-c', "citus.shard_replication_factor=2"); -push(@pgOptions, '-c', "citus.node_connection_timeout=${connection_timeout}"); +push(@pgOptions, '-c', "citus.node_connection_timeout=${connectionTimeout}"); # Add externally added options last, so they overwrite the default ones above for my $option (@userPgOptions)