summaryrefslogtreecommitdiff
path: root/src/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/test')
-rw-r--r--src/test/isolation/expected/drop-index-concurrently-1.out2
-rw-r--r--src/test/regress/expected/incremental_sort.out1441
-rw-r--r--src/test/regress/expected/partition_aggregate.out2
-rw-r--r--src/test/regress/expected/sysviews.out3
-rw-r--r--src/test/regress/parallel_schedule2
-rw-r--r--src/test/regress/serial_schedule1
-rw-r--r--src/test/regress/sql/incremental_sort.sql213
-rw-r--r--src/test/regress/sql/partition_aggregate.sql2
8 files changed, 1663 insertions, 3 deletions
diff --git a/src/test/isolation/expected/drop-index-concurrently-1.out b/src/test/isolation/expected/drop-index-concurrently-1.out
index 75dff56bc46..8e6adb66bb1 100644
--- a/src/test/isolation/expected/drop-index-concurrently-1.out
+++ b/src/test/isolation/expected/drop-index-concurrently-1.out
@@ -21,7 +21,7 @@ QUERY PLAN
Sort
Sort Key: id, data
- -> Seq Scan on test_dc
+ -> Index Scan using test_dc_pkey on test_dc
Filter: ((data)::text = '34'::text)
step select2: SELECT * FROM test_dc WHERE data=34 ORDER BY id,data;
id data
diff --git a/src/test/regress/expected/incremental_sort.out b/src/test/regress/expected/incremental_sort.out
new file mode 100644
index 00000000000..f130c606c87
--- /dev/null
+++ b/src/test/regress/expected/incremental_sort.out
@@ -0,0 +1,1441 @@
+-- When we have to sort the entire table, incremental sort will
+-- be slower than plain sort, so it should not be used.
+explain (costs off)
+select * from (select * from tenk1 order by four) t order by four, ten;
+ QUERY PLAN
+-----------------------------------
+ Sort
+ Sort Key: tenk1.four, tenk1.ten
+ -> Sort
+ Sort Key: tenk1.four
+ -> Seq Scan on tenk1
+(5 rows)
+
+-- When there is a LIMIT clause, incremental sort is beneficial because
+-- it only has to sort some of the groups, and not the entire table.
+explain (costs off)
+select * from (select * from tenk1 order by four) t order by four, ten
+limit 1;
+ QUERY PLAN
+-----------------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: tenk1.four, tenk1.ten
+ Presorted Key: tenk1.four
+ -> Sort
+ Sort Key: tenk1.four
+ -> Seq Scan on tenk1
+(7 rows)
+
+-- When work_mem is not enough to sort the entire table, incremental sort
+-- may be faster if individual groups still fit into work_mem.
+set work_mem to '2MB';
+explain (costs off)
+select * from (select * from tenk1 order by four) t order by four, ten;
+ QUERY PLAN
+-----------------------------------
+ Incremental Sort
+ Sort Key: tenk1.four, tenk1.ten
+ Presorted Key: tenk1.four
+ -> Sort
+ Sort Key: tenk1.four
+ -> Seq Scan on tenk1
+(6 rows)
+
+reset work_mem;
+create table t(a integer, b integer);
+create or replace function explain_analyze_without_memory(query text)
+returns table (out_line text) language plpgsql
+as
+$$
+declare
+ line text;
+begin
+ for line in
+ execute 'explain (analyze, costs off, summary off, timing off) ' || query
+ loop
+ out_line := regexp_replace(line, '\d+kB', 'NNkB', 'g');
+ return next;
+ end loop;
+end;
+$$;
+create or replace function explain_analyze_inc_sort_nodes(query text)
+returns jsonb language plpgsql
+as
+$$
+declare
+ elements jsonb;
+ element jsonb;
+ matching_nodes jsonb := '[]'::jsonb;
+begin
+ execute 'explain (analyze, costs off, summary off, timing off, format ''json'') ' || query into strict elements;
+ while jsonb_array_length(elements) > 0 loop
+ element := elements->0;
+ elements := elements - 0;
+ case jsonb_typeof(element)
+ when 'array' then
+ if jsonb_array_length(element) > 0 then
+ elements := elements || element;
+ end if;
+ when 'object' then
+ if element ? 'Plan' then
+ elements := elements || jsonb_build_array(element->'Plan');
+ element := element - 'Plan';
+ else
+ if element ? 'Plans' then
+ elements := elements || jsonb_build_array(element->'Plans');
+ element := element - 'Plans';
+ end if;
+ if (element->>'Node Type')::text = 'Incremental Sort' then
+ matching_nodes := matching_nodes || element;
+ end if;
+ end if;
+ end case;
+ end loop;
+ return matching_nodes;
+end;
+$$;
+create or replace function explain_analyze_inc_sort_nodes_without_memory(query text)
+returns jsonb language plpgsql
+as
+$$
+declare
+ nodes jsonb := '[]'::jsonb;
+ node jsonb;
+ group_key text;
+ space_key text;
+begin
+ for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop
+ for group_key in select unnest(array['Full-sort Groups', 'Presorted Groups']::text[]) t loop
+ for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop
+ node := jsonb_set(node, array[group_key, space_key, 'Average Sort Space Used'], '"NN"', false);
+ node := jsonb_set(node, array[group_key, space_key, 'Maximum Sort Space Used'], '"NN"', false);
+ end loop;
+ end loop;
+ nodes := nodes || node;
+ end loop;
+ return nodes;
+end;
+$$;
+create or replace function explain_analyze_inc_sort_nodes_verify_invariants(query text)
+returns bool language plpgsql
+as
+$$
+declare
+ node jsonb;
+ group_stats jsonb;
+ group_key text;
+ space_key text;
+begin
+ for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop
+ for group_key in select unnest(array['Full-sort Groups', 'Presorted Groups']::text[]) t loop
+ group_stats := node->group_key;
+ for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop
+ if (group_stats->space_key->'Maximum Sort Space Used')::bigint < (group_stats->space_key->'Maximum Sort Space Used')::bigint then
+ raise exception '% has invalid max space < average space', group_key;
+ end if;
+ end loop;
+ end loop;
+ end loop;
+ return true;
+end;
+$$;
+-- A single large group tested around each mode transition point.
+insert into t(a, b) select 1, i from generate_series(1, 100) n(i);
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 31;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 31;
+ a | b
+---+----
+ 1 | 1
+ 1 | 2
+ 1 | 3
+ 1 | 4
+ 1 | 5
+ 1 | 6
+ 1 | 7
+ 1 | 8
+ 1 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 1 | 20
+ 1 | 21
+ 1 | 22
+ 1 | 23
+ 1 | 24
+ 1 | 25
+ 1 | 26
+ 1 | 27
+ 1 | 28
+ 1 | 29
+ 1 | 30
+ 1 | 31
+(31 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 32;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 32;
+ a | b
+---+----
+ 1 | 1
+ 1 | 2
+ 1 | 3
+ 1 | 4
+ 1 | 5
+ 1 | 6
+ 1 | 7
+ 1 | 8
+ 1 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 1 | 20
+ 1 | 21
+ 1 | 22
+ 1 | 23
+ 1 | 24
+ 1 | 25
+ 1 | 26
+ 1 | 27
+ 1 | 28
+ 1 | 29
+ 1 | 30
+ 1 | 31
+ 1 | 32
+(32 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 33;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 33;
+ a | b
+---+----
+ 1 | 1
+ 1 | 2
+ 1 | 3
+ 1 | 4
+ 1 | 5
+ 1 | 6
+ 1 | 7
+ 1 | 8
+ 1 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 1 | 20
+ 1 | 21
+ 1 | 22
+ 1 | 23
+ 1 | 24
+ 1 | 25
+ 1 | 26
+ 1 | 27
+ 1 | 28
+ 1 | 29
+ 1 | 30
+ 1 | 31
+ 1 | 32
+ 1 | 33
+(33 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 65;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 65;
+ a | b
+---+----
+ 1 | 1
+ 1 | 2
+ 1 | 3
+ 1 | 4
+ 1 | 5
+ 1 | 6
+ 1 | 7
+ 1 | 8
+ 1 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 1 | 20
+ 1 | 21
+ 1 | 22
+ 1 | 23
+ 1 | 24
+ 1 | 25
+ 1 | 26
+ 1 | 27
+ 1 | 28
+ 1 | 29
+ 1 | 30
+ 1 | 31
+ 1 | 32
+ 1 | 33
+ 1 | 34
+ 1 | 35
+ 1 | 36
+ 1 | 37
+ 1 | 38
+ 1 | 39
+ 1 | 40
+ 1 | 41
+ 1 | 42
+ 1 | 43
+ 1 | 44
+ 1 | 45
+ 1 | 46
+ 1 | 47
+ 1 | 48
+ 1 | 49
+ 1 | 50
+ 1 | 51
+ 1 | 52
+ 1 | 53
+ 1 | 54
+ 1 | 55
+ 1 | 56
+ 1 | 57
+ 1 | 58
+ 1 | 59
+ 1 | 60
+ 1 | 61
+ 1 | 62
+ 1 | 63
+ 1 | 64
+ 1 | 65
+(65 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 66;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 66;
+ a | b
+---+----
+ 1 | 1
+ 1 | 2
+ 1 | 3
+ 1 | 4
+ 1 | 5
+ 1 | 6
+ 1 | 7
+ 1 | 8
+ 1 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 1 | 20
+ 1 | 21
+ 1 | 22
+ 1 | 23
+ 1 | 24
+ 1 | 25
+ 1 | 26
+ 1 | 27
+ 1 | 28
+ 1 | 29
+ 1 | 30
+ 1 | 31
+ 1 | 32
+ 1 | 33
+ 1 | 34
+ 1 | 35
+ 1 | 36
+ 1 | 37
+ 1 | 38
+ 1 | 39
+ 1 | 40
+ 1 | 41
+ 1 | 42
+ 1 | 43
+ 1 | 44
+ 1 | 45
+ 1 | 46
+ 1 | 47
+ 1 | 48
+ 1 | 49
+ 1 | 50
+ 1 | 51
+ 1 | 52
+ 1 | 53
+ 1 | 54
+ 1 | 55
+ 1 | 56
+ 1 | 57
+ 1 | 58
+ 1 | 59
+ 1 | 60
+ 1 | 61
+ 1 | 62
+ 1 | 63
+ 1 | 64
+ 1 | 65
+ 1 | 66
+(66 rows)
+
+delete from t;
+-- An initial large group followed by a small group.
+insert into t(a, b) select (case when i < 50 then 1 else 2 end), i from generate_series(1, 100) n(i);
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 55;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 55;
+ a | b
+---+----
+ 1 | 1
+ 1 | 2
+ 1 | 3
+ 1 | 4
+ 1 | 5
+ 1 | 6
+ 1 | 7
+ 1 | 8
+ 1 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 1 | 20
+ 1 | 21
+ 1 | 22
+ 1 | 23
+ 1 | 24
+ 1 | 25
+ 1 | 26
+ 1 | 27
+ 1 | 28
+ 1 | 29
+ 1 | 30
+ 1 | 31
+ 1 | 32
+ 1 | 33
+ 1 | 34
+ 1 | 35
+ 1 | 36
+ 1 | 37
+ 1 | 38
+ 1 | 39
+ 1 | 40
+ 1 | 41
+ 1 | 42
+ 1 | 43
+ 1 | 44
+ 1 | 45
+ 1 | 46
+ 1 | 47
+ 1 | 48
+ 1 | 49
+ 2 | 50
+ 2 | 51
+ 2 | 52
+ 2 | 53
+ 2 | 54
+ 2 | 55
+(55 rows)
+
+-- Test EXPLAIN ANALYZE with only a fullsort group.
+select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 55');
+ explain_analyze_without_memory
+------------------------------------------------------------------------------------------------
+ Limit (actual rows=55 loops=1)
+ -> Incremental Sort (actual rows=55 loops=1)
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ Full-sort Groups: 2 Sort Methods: top-N heapsort, quicksort Memory: avg=NNkB peak=NNkB
+ -> Sort (actual rows=100 loops=1)
+ Sort Key: t.a
+ Sort Method: quicksort Memory: NNkB
+ -> Seq Scan on t (actual rows=100 loops=1)
+(9 rows)
+
+select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 55'));
+ jsonb_pretty
+--------------------------------------------------
+ [ +
+ { +
+ "Sort Key": [ +
+ "t.a", +
+ "t.b" +
+ ], +
+ "Node Type": "Incremental Sort", +
+ "Actual Rows": 55, +
+ "Actual Loops": 1, +
+ "Presorted Key": [ +
+ "t.a" +
+ ], +
+ "Parallel Aware": false, +
+ "Full-sort Groups": { +
+ "Group Count": 2, +
+ "Sort Methods Used": [ +
+ "top-N heapsort", +
+ "quicksort" +
+ ], +
+ "Sort Space Memory": { +
+ "Average Sort Space Used": "NN",+
+ "Maximum Sort Space Used": "NN" +
+ } +
+ }, +
+ "Parent Relationship": "Outer" +
+ } +
+ ]
+(1 row)
+
+select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 55');
+ explain_analyze_inc_sort_nodes_verify_invariants
+--------------------------------------------------
+ t
+(1 row)
+
+delete from t;
+-- An initial small group followed by a large group.
+insert into t(a, b) select (case when i < 5 then i else 9 end), i from generate_series(1, 100) n(i);
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 70;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 70;
+ a | b
+---+----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 9 | 5
+ 9 | 6
+ 9 | 7
+ 9 | 8
+ 9 | 9
+ 9 | 10
+ 9 | 11
+ 9 | 12
+ 9 | 13
+ 9 | 14
+ 9 | 15
+ 9 | 16
+ 9 | 17
+ 9 | 18
+ 9 | 19
+ 9 | 20
+ 9 | 21
+ 9 | 22
+ 9 | 23
+ 9 | 24
+ 9 | 25
+ 9 | 26
+ 9 | 27
+ 9 | 28
+ 9 | 29
+ 9 | 30
+ 9 | 31
+ 9 | 32
+ 9 | 33
+ 9 | 34
+ 9 | 35
+ 9 | 36
+ 9 | 37
+ 9 | 38
+ 9 | 39
+ 9 | 40
+ 9 | 41
+ 9 | 42
+ 9 | 43
+ 9 | 44
+ 9 | 45
+ 9 | 46
+ 9 | 47
+ 9 | 48
+ 9 | 49
+ 9 | 50
+ 9 | 51
+ 9 | 52
+ 9 | 53
+ 9 | 54
+ 9 | 55
+ 9 | 56
+ 9 | 57
+ 9 | 58
+ 9 | 59
+ 9 | 60
+ 9 | 61
+ 9 | 62
+ 9 | 63
+ 9 | 64
+ 9 | 65
+ 9 | 66
+ 9 | 67
+ 9 | 68
+ 9 | 69
+ 9 | 70
+(70 rows)
+
+-- Test rescan.
+begin;
+-- We force the planner to choose a plan with incremental sort on the right side
+-- of a nested loop join node. That way we trigger the rescan code path.
+set local enable_hashjoin = off;
+set local enable_mergejoin = off;
+set local enable_material = off;
+set local enable_sort = off;
+explain (costs off) select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2);
+ QUERY PLAN
+------------------------------------------------
+ Nested Loop Left Join
+ Join Filter: (t_1.a = t.a)
+ -> Seq Scan on t
+ Filter: (a = ANY ('{1,2}'::integer[]))
+ -> Incremental Sort
+ Sort Key: t_1.a, t_1.b
+ Presorted Key: t_1.a
+ -> Sort
+ Sort Key: t_1.a
+ -> Seq Scan on t t_1
+(10 rows)
+
+select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2);
+ a | b | a | b
+---+---+---+---
+ 1 | 1 | 1 | 1
+ 2 | 2 | 2 | 2
+(2 rows)
+
+rollback;
+-- Test EXPLAIN ANALYZE with both fullsort and presorted groups.
+select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 70');
+ explain_analyze_without_memory
+-----------------------------------------------------------------------------------------------------------------------------------------------------
+ Limit (actual rows=70 loops=1)
+ -> Incremental Sort (actual rows=70 loops=1)
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ Full-sort Groups: 1 Sort Method: quicksort Memory: avg=NNkB peak=NNkB Presorted Groups: 5 Sort Method: quicksort Memory: avg=NNkB peak=NNkB
+ -> Sort (actual rows=100 loops=1)
+ Sort Key: t.a
+ Sort Method: quicksort Memory: NNkB
+ -> Seq Scan on t (actual rows=100 loops=1)
+(9 rows)
+
+select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 70'));
+ jsonb_pretty
+--------------------------------------------------
+ [ +
+ { +
+ "Sort Key": [ +
+ "t.a", +
+ "t.b" +
+ ], +
+ "Node Type": "Incremental Sort", +
+ "Actual Rows": 70, +
+ "Actual Loops": 1, +
+ "Presorted Key": [ +
+ "t.a" +
+ ], +
+ "Parallel Aware": false, +
+ "Full-sort Groups": { +
+ "Group Count": 1, +
+ "Sort Methods Used": [ +
+ "quicksort" +
+ ], +
+ "Sort Space Memory": { +
+ "Average Sort Space Used": "NN",+
+ "Maximum Sort Space Used": "NN" +
+ } +
+ }, +
+ "Presorted Groups": { +
+ "Group Count": 5, +
+ "Sort Methods Used": [ +
+ "quicksort" +
+ ], +
+ "Sort Space Memory": { +
+ "Average Sort Space Used": "NN",+
+ "Maximum Sort Space Used": "NN" +
+ } +
+ }, +
+ "Parent Relationship": "Outer" +
+ } +
+ ]
+(1 row)
+
+select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 70');
+ explain_analyze_inc_sort_nodes_verify_invariants
+--------------------------------------------------
+ t
+(1 row)
+
+delete from t;
+-- Small groups of 10 tuples each tested around each mode transition point.
+insert into t(a, b) select i / 10, i from generate_series(1, 70) n(i);
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 31;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 31;
+ a | b
+---+----
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 | 4
+ 0 | 5
+ 0 | 6
+ 0 | 7
+ 0 | 8
+ 0 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 2 | 20
+ 2 | 21
+ 2 | 22
+ 2 | 23
+ 2 | 24
+ 2 | 25
+ 2 | 26
+ 2 | 27
+ 2 | 28
+ 2 | 29
+ 3 | 30
+ 3 | 31
+(31 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 32;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 32;
+ a | b
+---+----
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 | 4
+ 0 | 5
+ 0 | 6
+ 0 | 7
+ 0 | 8
+ 0 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 2 | 20
+ 2 | 21
+ 2 | 22
+ 2 | 23
+ 2 | 24
+ 2 | 25
+ 2 | 26
+ 2 | 27
+ 2 | 28
+ 2 | 29
+ 3 | 30
+ 3 | 31
+ 3 | 32
+(32 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 33;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 33;
+ a | b
+---+----
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 | 4
+ 0 | 5
+ 0 | 6
+ 0 | 7
+ 0 | 8
+ 0 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 2 | 20
+ 2 | 21
+ 2 | 22
+ 2 | 23
+ 2 | 24
+ 2 | 25
+ 2 | 26
+ 2 | 27
+ 2 | 28
+ 2 | 29
+ 3 | 30
+ 3 | 31
+ 3 | 32
+ 3 | 33
+(33 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 65;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 65;
+ a | b
+---+----
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 | 4
+ 0 | 5
+ 0 | 6
+ 0 | 7
+ 0 | 8
+ 0 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 2 | 20
+ 2 | 21
+ 2 | 22
+ 2 | 23
+ 2 | 24
+ 2 | 25
+ 2 | 26
+ 2 | 27
+ 2 | 28
+ 2 | 29
+ 3 | 30
+ 3 | 31
+ 3 | 32
+ 3 | 33
+ 3 | 34
+ 3 | 35
+ 3 | 36
+ 3 | 37
+ 3 | 38
+ 3 | 39
+ 4 | 40
+ 4 | 41
+ 4 | 42
+ 4 | 43
+ 4 | 44
+ 4 | 45
+ 4 | 46
+ 4 | 47
+ 4 | 48
+ 4 | 49
+ 5 | 50
+ 5 | 51
+ 5 | 52
+ 5 | 53
+ 5 | 54
+ 5 | 55
+ 5 | 56
+ 5 | 57
+ 5 | 58
+ 5 | 59
+ 6 | 60
+ 6 | 61
+ 6 | 62
+ 6 | 63
+ 6 | 64
+ 6 | 65
+(65 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 66;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 66;
+ a | b
+---+----
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 | 4
+ 0 | 5
+ 0 | 6
+ 0 | 7
+ 0 | 8
+ 0 | 9
+ 1 | 10
+ 1 | 11
+ 1 | 12
+ 1 | 13
+ 1 | 14
+ 1 | 15
+ 1 | 16
+ 1 | 17
+ 1 | 18
+ 1 | 19
+ 2 | 20
+ 2 | 21
+ 2 | 22
+ 2 | 23
+ 2 | 24
+ 2 | 25
+ 2 | 26
+ 2 | 27
+ 2 | 28
+ 2 | 29
+ 3 | 30
+ 3 | 31
+ 3 | 32
+ 3 | 33
+ 3 | 34
+ 3 | 35
+ 3 | 36
+ 3 | 37
+ 3 | 38
+ 3 | 39
+ 4 | 40
+ 4 | 41
+ 4 | 42
+ 4 | 43
+ 4 | 44
+ 4 | 45
+ 4 | 46
+ 4 | 47
+ 4 | 48
+ 4 | 49
+ 5 | 50
+ 5 | 51
+ 5 | 52
+ 5 | 53
+ 5 | 54
+ 5 | 55
+ 5 | 56
+ 5 | 57
+ 5 | 58
+ 5 | 59
+ 6 | 60
+ 6 | 61
+ 6 | 62
+ 6 | 63
+ 6 | 64
+ 6 | 65
+ 6 | 66
+(66 rows)
+
+delete from t;
+-- Small groups of only 1 tuple each tested around each mode transition point.
+insert into t(a, b) select i, i from generate_series(1, 70) n(i);
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 31;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 31;
+ a | b
+----+----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+ 10 | 10
+ 11 | 11
+ 12 | 12
+ 13 | 13
+ 14 | 14
+ 15 | 15
+ 16 | 16
+ 17 | 17
+ 18 | 18
+ 19 | 19
+ 20 | 20
+ 21 | 21
+ 22 | 22
+ 23 | 23
+ 24 | 24
+ 25 | 25
+ 26 | 26
+ 27 | 27
+ 28 | 28
+ 29 | 29
+ 30 | 30
+ 31 | 31
+(31 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 32;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 32;
+ a | b
+----+----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+ 10 | 10
+ 11 | 11
+ 12 | 12
+ 13 | 13
+ 14 | 14
+ 15 | 15
+ 16 | 16
+ 17 | 17
+ 18 | 18
+ 19 | 19
+ 20 | 20
+ 21 | 21
+ 22 | 22
+ 23 | 23
+ 24 | 24
+ 25 | 25
+ 26 | 26
+ 27 | 27
+ 28 | 28
+ 29 | 29
+ 30 | 30
+ 31 | 31
+ 32 | 32
+(32 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 33;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 33;
+ a | b
+----+----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+ 10 | 10
+ 11 | 11
+ 12 | 12
+ 13 | 13
+ 14 | 14
+ 15 | 15
+ 16 | 16
+ 17 | 17
+ 18 | 18
+ 19 | 19
+ 20 | 20
+ 21 | 21
+ 22 | 22
+ 23 | 23
+ 24 | 24
+ 25 | 25
+ 26 | 26
+ 27 | 27
+ 28 | 28
+ 29 | 29
+ 30 | 30
+ 31 | 31
+ 32 | 32
+ 33 | 33
+(33 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 65;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 65;
+ a | b
+----+----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+ 10 | 10
+ 11 | 11
+ 12 | 12
+ 13 | 13
+ 14 | 14
+ 15 | 15
+ 16 | 16
+ 17 | 17
+ 18 | 18
+ 19 | 19
+ 20 | 20
+ 21 | 21
+ 22 | 22
+ 23 | 23
+ 24 | 24
+ 25 | 25
+ 26 | 26
+ 27 | 27
+ 28 | 28
+ 29 | 29
+ 30 | 30
+ 31 | 31
+ 32 | 32
+ 33 | 33
+ 34 | 34
+ 35 | 35
+ 36 | 36
+ 37 | 37
+ 38 | 38
+ 39 | 39
+ 40 | 40
+ 41 | 41
+ 42 | 42
+ 43 | 43
+ 44 | 44
+ 45 | 45
+ 46 | 46
+ 47 | 47
+ 48 | 48
+ 49 | 49
+ 50 | 50
+ 51 | 51
+ 52 | 52
+ 53 | 53
+ 54 | 54
+ 55 | 55
+ 56 | 56
+ 57 | 57
+ 58 | 58
+ 59 | 59
+ 60 | 60
+ 61 | 61
+ 62 | 62
+ 63 | 63
+ 64 | 64
+ 65 | 65
+(65 rows)
+
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 66;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Incremental Sort
+ Sort Key: t.a, t.b
+ Presorted Key: t.a
+ -> Sort
+ Sort Key: t.a
+ -> Seq Scan on t
+(7 rows)
+
+select * from (select * from t order by a) s order by a, b limit 66;
+ a | b
+----+----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+ 10 | 10
+ 11 | 11
+ 12 | 12
+ 13 | 13
+ 14 | 14
+ 15 | 15
+ 16 | 16
+ 17 | 17
+ 18 | 18
+ 19 | 19
+ 20 | 20
+ 21 | 21
+ 22 | 22
+ 23 | 23
+ 24 | 24
+ 25 | 25
+ 26 | 26
+ 27 | 27
+ 28 | 28
+ 29 | 29
+ 30 | 30
+ 31 | 31
+ 32 | 32
+ 33 | 33
+ 34 | 34
+ 35 | 35
+ 36 | 36
+ 37 | 37
+ 38 | 38
+ 39 | 39
+ 40 | 40
+ 41 | 41
+ 42 | 42
+ 43 | 43
+ 44 | 44
+ 45 | 45
+ 46 | 46
+ 47 | 47
+ 48 | 48
+ 49 | 49
+ 50 | 50
+ 51 | 51
+ 52 | 52
+ 53 | 53
+ 54 | 54
+ 55 | 55
+ 56 | 56
+ 57 | 57
+ 58 | 58
+ 59 | 59
+ 60 | 60
+ 61 | 61
+ 62 | 62
+ 63 | 63
+ 64 | 64
+ 65 | 65
+ 66 | 66
+(66 rows)
+
+delete from t;
+drop table t;
+-- Incremental sort vs. parallel queries
+set min_parallel_table_scan_size = '1kB';
+set min_parallel_index_scan_size = '1kB';
+set parallel_setup_cost = 0;
+set parallel_tuple_cost = 0;
+create table t (a int, b int, c int);
+insert into t select mod(i,10),mod(i,10),i from generate_series(1,10000) s(i);
+create index on t (a);
+analyze t;
+set enable_incrementalsort = off;
+explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1;
+ QUERY PLAN
+------------------------------------------------------
+ Limit
+ -> Sort
+ Sort Key: a, b, (sum(c))
+ -> Finalize HashAggregate
+ Group Key: a, b
+ -> Gather
+ Workers Planned: 2
+ -> Partial HashAggregate
+ Group Key: a, b
+ -> Parallel Seq Scan on t
+(10 rows)
+
+set enable_incrementalsort = on;
+explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1;
+ QUERY PLAN
+------------------------------------------------------
+ Limit
+ -> Sort
+ Sort Key: a, b, (sum(c))
+ -> Finalize HashAggregate
+ Group Key: a, b
+ -> Gather
+ Workers Planned: 2
+ -> Partial HashAggregate
+ Group Key: a, b
+ -> Parallel Seq Scan on t
+(10 rows)
+
+drop table t;
diff --git a/src/test/regress/expected/partition_aggregate.out b/src/test/regress/expected/partition_aggregate.out
index fb4b3422612..c36970575f5 100644
--- a/src/test/regress/expected/partition_aggregate.out
+++ b/src/test/regress/expected/partition_aggregate.out
@@ -11,6 +11,8 @@ SET enable_partitionwise_aggregate TO true;
SET enable_partitionwise_join TO true;
-- Disable parallel plans.
SET max_parallel_workers_per_gather TO 0;
+-- Disable incremental sort, which can influence selected plans due to fuzz factor.
+SET enable_incrementalsort TO off;
--
-- Tests for list partitioned tables.
--
diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out
index 715842b87af..a126f0ad613 100644
--- a/src/test/regress/expected/sysviews.out
+++ b/src/test/regress/expected/sysviews.out
@@ -78,6 +78,7 @@ select name, setting from pg_settings where name like 'enable%';
enable_hashagg | on
enable_hashagg_disk | on
enable_hashjoin | on
+ enable_incrementalsort | on
enable_indexonlyscan | on
enable_indexscan | on
enable_material | on
@@ -91,7 +92,7 @@ select name, setting from pg_settings where name like 'enable%';
enable_seqscan | on
enable_sort | on
enable_tidscan | on
-(19 rows)
+(20 rows)
-- Test that the pg_timezone_names and pg_timezone_abbrevs views are
-- more-or-less working. We can't test their contents in any great detail
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index a98dba7b2f3..a741e89616a 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -78,7 +78,7 @@ test: brin gin gist spgist privileges init_privs security_label collate matview
# ----------
# Another group of parallel tests
# ----------
-test: create_table_like alter_generic alter_operator misc async dbsize misc_functions sysviews tsrf tidscan collate.icu.utf8
+test: create_table_like alter_generic alter_operator misc async dbsize misc_functions sysviews tsrf tidscan collate.icu.utf8 incremental_sort
# rules cannot run concurrently with any test that creates
# a view or rule in the public schema
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index 3f66e0b859e..1a6821ca46b 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -89,6 +89,7 @@ test: select_distinct_on
test: select_implicit
test: select_having
test: subselect
+test: incremental_sort
test: union
test: case
test: join
diff --git a/src/test/regress/sql/incremental_sort.sql b/src/test/regress/sql/incremental_sort.sql
new file mode 100644
index 00000000000..3b359efa29a
--- /dev/null
+++ b/src/test/regress/sql/incremental_sort.sql
@@ -0,0 +1,213 @@
+-- When we have to sort the entire table, incremental sort will
+-- be slower than plain sort, so it should not be used.
+explain (costs off)
+select * from (select * from tenk1 order by four) t order by four, ten;
+
+-- When there is a LIMIT clause, incremental sort is beneficial because
+-- it only has to sort some of the groups, and not the entire table.
+explain (costs off)
+select * from (select * from tenk1 order by four) t order by four, ten
+limit 1;
+
+-- When work_mem is not enough to sort the entire table, incremental sort
+-- may be faster if individual groups still fit into work_mem.
+set work_mem to '2MB';
+explain (costs off)
+select * from (select * from tenk1 order by four) t order by four, ten;
+reset work_mem;
+
+create table t(a integer, b integer);
+
+create or replace function explain_analyze_without_memory(query text)
+returns table (out_line text) language plpgsql
+as
+$$
+declare
+ line text;
+begin
+ for line in
+ execute 'explain (analyze, costs off, summary off, timing off) ' || query
+ loop
+ out_line := regexp_replace(line, '\d+kB', 'NNkB', 'g');
+ return next;
+ end loop;
+end;
+$$;
+
+create or replace function explain_analyze_inc_sort_nodes(query text)
+returns jsonb language plpgsql
+as
+$$
+declare
+ elements jsonb;
+ element jsonb;
+ matching_nodes jsonb := '[]'::jsonb;
+begin
+ execute 'explain (analyze, costs off, summary off, timing off, format ''json'') ' || query into strict elements;
+ while jsonb_array_length(elements) > 0 loop
+ element := elements->0;
+ elements := elements - 0;
+ case jsonb_typeof(element)
+ when 'array' then
+ if jsonb_array_length(element) > 0 then
+ elements := elements || element;
+ end if;
+ when 'object' then
+ if element ? 'Plan' then
+ elements := elements || jsonb_build_array(element->'Plan');
+ element := element - 'Plan';
+ else
+ if element ? 'Plans' then
+ elements := elements || jsonb_build_array(element->'Plans');
+ element := element - 'Plans';
+ end if;
+ if (element->>'Node Type')::text = 'Incremental Sort' then
+ matching_nodes := matching_nodes || element;
+ end if;
+ end if;
+ end case;
+ end loop;
+ return matching_nodes;
+end;
+$$;
+
+create or replace function explain_analyze_inc_sort_nodes_without_memory(query text)
+returns jsonb language plpgsql
+as
+$$
+declare
+ nodes jsonb := '[]'::jsonb;
+ node jsonb;
+ group_key text;
+ space_key text;
+begin
+ for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop
+ for group_key in select unnest(array['Full-sort Groups', 'Presorted Groups']::text[]) t loop
+ for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop
+ node := jsonb_set(node, array[group_key, space_key, 'Average Sort Space Used'], '"NN"', false);
+ node := jsonb_set(node, array[group_key, space_key, 'Maximum Sort Space Used'], '"NN"', false);
+ end loop;
+ end loop;
+ nodes := nodes || node;
+ end loop;
+ return nodes;
+end;
+$$;
+
+create or replace function explain_analyze_inc_sort_nodes_verify_invariants(query text)
+returns bool language plpgsql
+as
+$$
+declare
+ node jsonb;
+ group_stats jsonb;
+ group_key text;
+ space_key text;
+begin
+ for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop
+ for group_key in select unnest(array['Full-sort Groups', 'Presorted Groups']::text[]) t loop
+ group_stats := node->group_key;
+ for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop
+ if (group_stats->space_key->'Maximum Sort Space Used')::bigint < (group_stats->space_key->'Maximum Sort Space Used')::bigint then
+ raise exception '% has invalid max space < average space', group_key;
+ end if;
+ end loop;
+ end loop;
+ end loop;
+ return true;
+end;
+$$;
+
+-- A single large group tested around each mode transition point.
+insert into t(a, b) select 1, i from generate_series(1, 100) n(i);
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 31;
+select * from (select * from t order by a) s order by a, b limit 31;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 32;
+select * from (select * from t order by a) s order by a, b limit 32;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 33;
+select * from (select * from t order by a) s order by a, b limit 33;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 65;
+select * from (select * from t order by a) s order by a, b limit 65;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 66;
+select * from (select * from t order by a) s order by a, b limit 66;
+delete from t;
+
+-- An initial large group followed by a small group.
+insert into t(a, b) select (case when i < 50 then 1 else 2 end), i from generate_series(1, 100) n(i);
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 55;
+select * from (select * from t order by a) s order by a, b limit 55;
+-- Test EXPLAIN ANALYZE with only a fullsort group.
+select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 55');
+select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 55'));
+select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 55');
+delete from t;
+
+-- An initial small group followed by a large group.
+insert into t(a, b) select (case when i < 5 then i else 9 end), i from generate_series(1, 100) n(i);
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 70;
+select * from (select * from t order by a) s order by a, b limit 70;
+-- Test rescan.
+begin;
+-- We force the planner to choose a plan with incremental sort on the right side
+-- of a nested loop join node. That way we trigger the rescan code path.
+set local enable_hashjoin = off;
+set local enable_mergejoin = off;
+set local enable_material = off;
+set local enable_sort = off;
+explain (costs off) select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2);
+select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2);
+rollback;
+-- Test EXPLAIN ANALYZE with both fullsort and presorted groups.
+select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 70');
+select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 70'));
+select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 70');
+delete from t;
+
+-- Small groups of 10 tuples each tested around each mode transition point.
+insert into t(a, b) select i / 10, i from generate_series(1, 70) n(i);
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 31;
+select * from (select * from t order by a) s order by a, b limit 31;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 32;
+select * from (select * from t order by a) s order by a, b limit 32;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 33;
+select * from (select * from t order by a) s order by a, b limit 33;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 65;
+select * from (select * from t order by a) s order by a, b limit 65;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 66;
+select * from (select * from t order by a) s order by a, b limit 66;
+delete from t;
+
+-- Small groups of only 1 tuple each tested around each mode transition point.
+insert into t(a, b) select i, i from generate_series(1, 70) n(i);
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 31;
+select * from (select * from t order by a) s order by a, b limit 31;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 32;
+select * from (select * from t order by a) s order by a, b limit 32;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 33;
+select * from (select * from t order by a) s order by a, b limit 33;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 65;
+select * from (select * from t order by a) s order by a, b limit 65;
+explain (costs off) select * from (select * from t order by a) s order by a, b limit 66;
+select * from (select * from t order by a) s order by a, b limit 66;
+delete from t;
+
+drop table t;
+
+-- Incremental sort vs. parallel queries
+set min_parallel_table_scan_size = '1kB';
+set min_parallel_index_scan_size = '1kB';
+set parallel_setup_cost = 0;
+set parallel_tuple_cost = 0;
+
+create table t (a int, b int, c int);
+insert into t select mod(i,10),mod(i,10),i from generate_series(1,10000) s(i);
+create index on t (a);
+analyze t;
+
+set enable_incrementalsort = off;
+explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1;
+
+set enable_incrementalsort = on;
+explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1;
+
+drop table t;
diff --git a/src/test/regress/sql/partition_aggregate.sql b/src/test/regress/sql/partition_aggregate.sql
index ba4fed4d437..178f2079faa 100644
--- a/src/test/regress/sql/partition_aggregate.sql
+++ b/src/test/regress/sql/partition_aggregate.sql
@@ -12,6 +12,8 @@ SET enable_partitionwise_aggregate TO true;
SET enable_partitionwise_join TO true;
-- Disable parallel plans.
SET max_parallel_workers_per_gather TO 0;
+-- Disable incremental sort, which can influence selected plans due to fuzz factor.
+SET enable_incrementalsort TO off;
--
-- Tests for list partitioned tables.