summaryrefslogtreecommitdiff
path: root/src/backend/utils
diff options
context:
space:
mode:
authorTom Lane2000-02-26 05:25:55 +0000
committerTom Lane2000-02-26 05:25:55 +0000
commit08b1040374a598fb8e720ce8b9e1844cdcff33d3 (patch)
tree4017fc96cf052a933fb772bb3aefdee719e3e125 /src/backend/utils
parentc05abfb1a845e1bf5e66638ee2b83609468ebfd6 (diff)
Shared-memory hashtables have non-extensible directories, which means
it's a good idea to choose the directory size based on the expected number of entries. But ShmemInitHash was using a hard-wired constant. Boo hiss. This accounts for recent report of postmaster failure when asking for 64K or more buffers.
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/hash/dynahash.c35
1 files changed, 30 insertions, 5 deletions
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 0cb235b62bc..341e6027a12 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.28 2000/01/26 05:57:24 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.29 2000/02/26 05:25:54 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -328,10 +328,7 @@ init_htab(HTAB *hashp, int nelem)
{
*segp = seg_alloc(hashp);
if (*segp == (SEG_OFFSET) 0)
- {
- hash_destroy(hashp);
- return 0;
- }
+ return -1;
}
#if HASH_DEBUG
@@ -392,6 +389,34 @@ hash_estimate_size(long num_entries, long keysize, long datasize)
return size;
}
+/*
+ * Select an appropriate directory size for a hashtable with the given
+ * maximum number of entries.
+ * This is only needed for hashtables in shared memory, whose directories
+ * cannot be expanded dynamically.
+ * NB: assumes that all hash structure parameters have default values!
+ *
+ * XXX this had better agree with the behavior of init_htab()...
+ */
+long
+hash_select_dirsize(long num_entries)
+{
+ long nBuckets,
+ nSegments,
+ nDirEntries;
+
+ /* estimate number of buckets wanted */
+ nBuckets = 1L << my_log2((num_entries - 1) / DEF_FFACTOR + 1);
+ /* # of segments needed for nBuckets */
+ nSegments = 1L << my_log2((nBuckets - 1) / DEF_SEGSIZE + 1);
+ /* directory entries */
+ nDirEntries = DEF_DIRSIZE;
+ while (nDirEntries < nSegments)
+ nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
+
+ return nDirEntries;
+}
+
/********************** DESTROY ROUTINES ************************/