summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Lane2015-10-04 18:16:59 +0000
committerTom Lane2015-10-04 18:17:28 +0000
commit2647b245d2360a5545f078a2943933eb50d57e66 (patch)
tree9354458e2f1f449be2caf38bf7c9b06eecdb8545
parent47ac95f378773a492b26ef2efe0115ac9a166e5a (diff)
Fix possible "invalid memory alloc request size" failure in nodeHash.c.
Limit the size of the hashtable pointer array to not more than MaxAllocSize. We've seen reports of failures due to this in HEAD/9.5, and it seems possible in older branches as well. The change in NTUP_PER_BUCKET in 9.5 may have made the problem more likely, but surely it didn't introduce it. Tomas Vondra, slightly modified by me
-rw-r--r--src/backend/executor/nodeHash.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 1597211ad89..492e6e0d332 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -459,10 +459,12 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
* Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
* memory is filled. Set nbatch to the smallest power of 2 that appears
* sufficient. The Min() steps limit the results so that the pointer
- * arrays we'll try to allocate do not exceed work_mem.
+ * arrays we'll try to allocate do not exceed work_mem nor MaxAllocSize.
*/
- max_pointers = (work_mem * 1024L) / sizeof(void *);
+ max_pointers = (work_mem * 1024L) / sizeof(HashJoinTuple);
+ max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
/* also ensure we avoid integer overflow in nbatch and nbuckets */
+ /* (this step is redundant given the current value of MaxAllocSize) */
max_pointers = Min(max_pointers, INT_MAX / 2);
if (inner_rel_bytes > hash_table_bytes)