mirror of https://github.com/citusdata/citus.git
Normalizes Memory Usage, Buckets, Batches for PG15 explain diffs
We create a new function in multi_test_helpers, which is similar to explain_merge function in PG15. This explain helper function normalies Memory Usage, Buckets and Batches, and we use it in the tests which give a different output for PG15.naisila/failure_pg15
parent
8b141aef13
commit
3c9eea031e
|
@ -301,12 +301,14 @@ Sort
|
|||
Group Key: l_quantity
|
||||
-> Seq Scan on lineitem_360000 lineitem
|
||||
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
Sort (actual rows=50 loops=1)
|
||||
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
||||
Sort Method: quicksort Memory: 27kB
|
||||
Sort Method: quicksort Memory: xxx
|
||||
-> HashAggregate (actual rows=50 loops=1)
|
||||
Group Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
|
||||
|
@ -369,13 +371,15 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
|||
END;
|
||||
DROP TABLE t1, t2;
|
||||
-- Test query text output, with ANALYZE ON
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
Sort (actual rows=50 loops=1)
|
||||
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
|
||||
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
||||
Sort Method: quicksort Memory: 27kB
|
||||
Sort Method: quicksort Memory: xxx
|
||||
-> HashAggregate (actual rows=50 loops=1)
|
||||
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
|
||||
Group Key: remote_scan.l_quantity
|
||||
|
@ -1046,12 +1050,14 @@ Custom Scan (Citus Adaptive)
|
|||
-> Delete on lineitem_hash_part_360044 lineitem_hash_part
|
||||
-> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
|
||||
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
Sort (actual rows=50 loops=1)
|
||||
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
||||
Sort Method: quicksort Memory: 27kB
|
||||
Sort Method: quicksort Memory: xxx
|
||||
-> HashAggregate (actual rows=50 loops=1)
|
||||
Group Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
|
||||
|
|
|
@ -72,6 +72,15 @@ BEGIN
|
|||
END LOOP;
|
||||
RETURN;
|
||||
END; $$ language plpgsql;
|
||||
-- Create a function to normalize Memory Usage, Buckets, Batches
|
||||
CREATE OR REPLACE FUNCTION plan_normalize_memory(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
query_plan := regexp_replace(query_plan, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g');
|
||||
RETURN NEXT;
|
||||
END LOOP;
|
||||
END; $$ language plpgsql;
|
||||
-- helper function that returns true if output of given explain has "is not null" (case in-sensitive)
|
||||
CREATE OR REPLACE FUNCTION explain_has_is_not_null(explain_command text)
|
||||
RETURNS BOOLEAN AS $$
|
||||
|
|
|
@ -122,9 +122,11 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
|
|||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
|
||||
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
|
||||
-- EXPLAIN ANALYZE doesn't show worker tasks for repartition joins yet
|
||||
SET citus.shard_count TO 3;
|
||||
|
@ -142,9 +144,11 @@ END;
|
|||
DROP TABLE t1, t2;
|
||||
|
||||
-- Test query text output, with ANALYZE ON
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
|
||||
-- Test query text output, with ANALYZE OFF
|
||||
EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
||||
|
@ -488,9 +492,11 @@ EXPLAIN (COSTS FALSE)
|
|||
DELETE FROM lineitem_hash_part;
|
||||
|
||||
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
|
||||
SET citus.explain_all_tasks TO off;
|
||||
|
||||
|
|
|
@ -78,6 +78,16 @@ BEGIN
|
|||
RETURN;
|
||||
END; $$ language plpgsql;
|
||||
|
||||
-- Create a function to normalize Memory Usage, Buckets, Batches
|
||||
CREATE OR REPLACE FUNCTION plan_normalize_memory(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
query_plan := regexp_replace(query_plan, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g');
|
||||
RETURN NEXT;
|
||||
END LOOP;
|
||||
END; $$ language plpgsql;
|
||||
|
||||
-- helper function that returns true if output of given explain has "is not null" (case in-sensitive)
|
||||
CREATE OR REPLACE FUNCTION explain_has_is_not_null(explain_command text)
|
||||
RETURNS BOOLEAN AS $$
|
||||
|
|
Loading…
Reference in New Issue