From 64fe6022790920ef6edee475bfa162a961f148d8 Mon Sep 17 00:00:00 2001 From: Jeff Davis Date: Mon, 23 Mar 2020 13:56:28 -0700 Subject: [PATCH] Fixes for Disk-based Hash Aggregation. Justin Pryzby raised a couple issues with commit 1f39bce0. Fixed. Also, tweak the way the size of a hash entry is estimated and the number of buckets is estimated when calling BuildTupleHashTableExt(). Discussion: https://www.postgresql.org/message-id/20200319064222.GR26184@telsasoft.com --- src/backend/commands/explain.c | 2 +- src/backend/executor/nodeAgg.c | 19 +++++++------------ 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 58141d8393..ff2f45cfb2 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -2778,7 +2778,7 @@ static void show_hashagg_info(AggState *aggstate, ExplainState *es) { Agg *agg = (Agg *)aggstate->ss.ps.plan; - long memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024; + int64 memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024; Assert(IsA(aggstate, AggState)); diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 44c159ab2a..fbc0480fc6 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -1873,17 +1873,12 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) aggstate->hash_disk_used = disk_used; } - /* - * Update hashentrysize estimate based on contents. Don't include meta_mem - * in the memory used, because empty buckets would inflate the per-entry - * cost. An underestimate of the per-entry size is better than an - * overestimate, because an overestimate could compound with each level of - * recursion. - */ + /* update hashentrysize estimate based on contents */ if (aggstate->hash_ngroups_current > 0) { aggstate->hashentrysize = - hash_mem / (double)aggstate->hash_ngroups_current; + sizeof(TupleHashEntryData) + + (hash_mem / (double)aggstate->hash_ngroups_current); } } @@ -1899,10 +1894,10 @@ hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory) max_nbuckets = memory / hashentrysize; /* - * Leave room for slop to avoid a case where the initial hash table size - * exceeds the memory limit (though that may still happen in edge cases). + * Underestimating is better than overestimating. Too many buckets crowd + * out space for group keys and transition state values. */ - max_nbuckets *= 0.75; + max_nbuckets >>= 1; if (nbuckets > max_nbuckets) nbuckets = max_nbuckets; @@ -3548,7 +3543,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * reasonable. */ for (i = 0; i < aggstate->num_hashes; i++) - totalGroups = aggstate->perhash[i].aggnode->numGroups; + totalGroups += aggstate->perhash[i].aggnode->numGroups; hash_agg_set_limits(aggstate->hashentrysize, totalGroups, 0, &aggstate->hash_mem_limit, -- 2.39.5