sb_region.c/h -> aregion.c/h
authorRobert Haas <rhaas@postgresql.org>
Mon, 12 May 2014 18:27:11 +0000 (14:27 -0400)
committerRobert Haas <rhaas@postgresql.org>
Mon, 12 May 2014 18:27:11 +0000 (14:27 -0400)
src/backend/utils/mmgr/Makefile
src/backend/utils/mmgr/aregion.c [moved from src/backend/utils/mmgr/sb_region.c with 74% similarity]
src/backend/utils/mmgr/freepage.c
src/backend/utils/mmgr/sb_alloc.c
src/include/utils/aregion.h [moved from src/include/utils/sb_region.h with 55% similarity]
src/include/utils/sb_alloc.h

index c4d61e4d312175d7f8b13576699c2d292b4d422a..e686a90ad7f3ab37b39cafbeae3be6730af31199 100644 (file)
@@ -12,7 +12,6 @@ subdir = src/backend/utils/mmgr
 top_builddir = ../../../..
 include $(top_builddir)/src/Makefile.global
 
-OBJS = aset.o freepage.o mcxt.o portalmem.o sb_alloc.o balloc_map.o \
-       sb_region.o
+OBJS = aregion.o aset.o freepage.o mcxt.o portalmem.o sb_alloc.o balloc_map.o
 
 include $(top_srcdir)/src/backend/common.mk
similarity index 74%
rename from src/backend/utils/mmgr/sb_region.c
rename to src/backend/utils/mmgr/aregion.c
index ed4cd8311fb85abb1a22291fc07a3b6b47a279af..2ef538e71ba5eb988fca19d86e88881c8ee1f162 100644 (file)
@@ -1,6 +1,6 @@
 /*-------------------------------------------------------------------------
  *
- * sb_region.c
+ * aregion.c
  *       Superblock allocator memory region manager.
  *
  * The superblock allocator operates on ranges of pages managed by a
  * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * src/backend/utils/mmgr/sb_region.c
+ * src/backend/utils/mmgr/aregion.c
  *
  *-------------------------------------------------------------------------
  */
 
 #include "postgres.h"
 
-#include "utils/sb_region.h"
+#include "utils/aregion.h"
 
 /*
  * On 64-bit systems, we use a two-level radix tree to find the data for
  * covers 2^44 bytes of address space (16TB), we expect overflows of the
  * four-entry cache to happen essentially never.
  */
-#define SB_LOOKUP_ROOT_BITS                    20
-#define SB_LOOKUP_ROOT_ENTRIES         (1 << SB_LOOKUP_ROOT_BITS)
-#define SB_LOOKUP_ROOT_CACHE_SIZE      4
-#define SB_LOOKUP_L2_BITS                      12
-#define SB_LOOKUP_L2_ENTRIES           (1 << SB_LOOKUP_L2_BITS)
+#define AREGION_LOOKUP_ROOT_BITS                       20
+#define AREGION_LOOKUP_ROOT_ENTRIES            (1 << AREGION_LOOKUP_ROOT_BITS)
+#define AREGION_LOOKUP_ROOT_CACHE_SIZE 4
+#define AREGION_LOOKUP_L2_BITS                 12
+#define AREGION_LOOKUP_L2_ENTRIES              (1 << AREGION_LOOKUP_L2_BITS)
 
 /* Lookup data for a 4GB range of address space. */
 typedef struct
 {
        int             nused;
        int             nallocated;
-       sb_region **region;
-} sb_lookup_leaf;
+       AllocatorRegion **region;
+} AllocatorRegionLookupLeaf;
 
 /* Lookup data for a 16TB range of address space, direct mapped. */
 typedef struct
 {
-       sb_lookup_leaf *leaf[SB_LOOKUP_L2_ENTRIES];
-} sb_lookup_l2;
+       AllocatorRegionLookupLeaf *leaf[AREGION_LOOKUP_L2_ENTRIES];
+} AllocatorRegionLookupL2;
 
 /* Lookup data for an entire 64-bit address space. */
 typedef struct
 {
-       uint32  cache_key[SB_LOOKUP_ROOT_CACHE_SIZE];
-       sb_lookup_l2 *cache_value[SB_LOOKUP_ROOT_CACHE_SIZE];
-       sb_lookup_l2 **l2;
-} sb_lookup_root;
+       uint32  cache_key[AREGION_LOOKUP_ROOT_CACHE_SIZE];
+       AllocatorRegionLookupL2 *cache_value[AREGION_LOOKUP_ROOT_CACHE_SIZE];
+       AllocatorRegionLookupL2 **l2;
+} AllocatorRegionLookupRoot;
 
 /* Toplevel address lookup structure. */
 #if SIZEOF_SIZE_T > 4
-static sb_lookup_root lookup_root;
+static AllocatorRegionLookupRoot lookup_root;
 #else
-static sb_lookup_leaf lookup_root_leaf;
+static AllocatorRegionLookupLeaf lookup_root_leaf;
 #endif
 
 /*
@@ -102,52 +102,54 @@ static dlist_head private_freelist[NUM_PRIVATE_FREELISTS];
  * we add a bit of space for bookkeeping.  These values are in units of
  * FPM_PAGE_SIZE.
  */
-#define SB_REGION_INITSIZE             (16 * SB_PAGES_PER_SUPERBLOCK)
-#define SB_REGION_MAXSIZE              ((64 * 1024 * 1024) / FPM_PAGE_SIZE)
+#define AREGION_INITSIZE               (16 * BLOCK_ALLOCATOR_PAGES_PER_CHUNK)
+#define AREGION_MAXSIZE                        ((64 * 1024 * 1024) / FPM_PAGE_SIZE)
 
-static Size sb_private_pages_allocated = 0;
-static Size sb_private_bytes_allocated = 0;
-static Size sb_peak_private_bytes_allocated = 0;
+static Size aregion_private_pages_allocated = 0;
+static Size aregion_private_bytes_allocated = 0;
+static Size aregion_peak_private_bytes_allocated = 0;
 
 /* Static functions. */
-static bool sb_adjust_lookup(sb_region *region, bool insert);
-static bool sb_adjust_lookup_leaf(sb_lookup_leaf *leaf, sb_region *region,
-                                         bool insert);
-static void sb_dump_regions_leaf(sb_region *last_region, sb_lookup_leaf *leaf);
+static bool AllocatorRegionAdjustLookup(AllocatorRegion *region, bool insert);
+static bool AllocatorRegionAdjustLookupLeaf(AllocatorRegionLookupLeaf *leaf,
+                                                               AllocatorRegion *region, bool insert);
+static void DumpAllocatorRegionsLeaf(AllocatorRegion *last_region,
+                                                AllocatorRegionLookupLeaf *leaf);
 #if SIZEOF_SIZE_T > 4
-static sb_lookup_leaf *sb_find_leaf(Size highbits, bool insert);
+static AllocatorRegionLookupLeaf *AllocatorRegionFindLeaf(Size highbits,
+                                               bool insert);
 #endif
 static void *system_calloc(Size count, Size s);
 static void system_free(void *p, Size s);
 static void *system_malloc(Size s);
 
 /*
- * Dump debugging information for sb_region objects.
+ * Dump debugging information for AllocatorRegion objects.
  */
 void
-sb_dump_regions(void)
+DumpAllocatorRegions(void)
 {
 #if SIZEOF_SIZE_T > 4
-       sb_region *last_region = NULL;
+       AllocatorRegion *last_region = NULL;
 
        if (lookup_root.l2 != NULL)
        {
                int i;
                int j;
 
-               for (i = 0; i < SB_LOOKUP_ROOT_ENTRIES; ++i)
+               for (i = 0; i < AREGION_LOOKUP_ROOT_ENTRIES; ++i)
                {
-                       sb_lookup_l2 *l2 = lookup_root.l2[i];
+                       AllocatorRegionLookupL2 *l2 = lookup_root.l2[i];
 
                        if (l2 == NULL)
                                continue;
-                       for (j = 0; j < SB_LOOKUP_L2_ENTRIES; ++j)
+                       for (j = 0; j < AREGION_LOOKUP_L2_ENTRIES; ++j)
                        {
-                               sb_lookup_leaf *leaf = l2->leaf[j];
+                               AllocatorRegionLookupLeaf *leaf = l2->leaf[j];
 
                                if (leaf != NULL)
                                {
-                                       sb_dump_regions_leaf(last_region, leaf);
+                                       DumpAllocatorRegionsLeaf(last_region, leaf);
                                        last_region = leaf->region[leaf->nused - 1];
                                }
                        }
@@ -163,10 +165,10 @@ sb_dump_regions(void)
                        int             i;
                        int             j;
                        int             n = -1;
-                       sb_lookup_l2 *l2;
+                       AllocatorRegionLookupL2 *l2;
 
                        /* Find next L2 entry to visit. */
-                       for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i)
+                       for (i = 0; i < AREGION_LOOKUP_ROOT_CACHE_SIZE; ++i)
                        {
                                if (lookup_root.cache_value[i] != NULL &&
                                        (first || lookup_root.cache_key[i] > highbits))
@@ -179,36 +181,37 @@ sb_dump_regions(void)
 
                        /* Dump this L2 entry. */
                        l2 = lookup_root.cache_value[n];
-                       for (j = 0; j < SB_LOOKUP_L2_ENTRIES; ++j)
+                       for (j = 0; j < AREGION_LOOKUP_L2_ENTRIES; ++j)
                        {
-                               sb_lookup_leaf *leaf = l2->leaf[j];
+                               AllocatorRegionLookupLeaf *leaf = l2->leaf[j];
 
                                if (leaf != NULL)
                                {
-                                       sb_dump_regions_leaf(last_region, leaf);
+                                       DumpAllocatorRegionsLeaf(last_region, leaf);
                                        last_region = leaf->region[leaf->nused - 1];
                                }
                        }
                }
        }
 #else
-       sb_dump_regions_leaf(NULL, lookup_root_leaf);
+       DumpAllocatorRegionsLeaf(NULL, lookup_root_leaf);
 #endif
 
        fprintf(stderr, "== overall statistics ==\n");
        fprintf(stderr, "private bytes now: %zu, peak %zu\n",
-               sb_private_bytes_allocated,
-               Max(sb_private_bytes_allocated, sb_peak_private_bytes_allocated));
+               aregion_private_bytes_allocated,
+               Max(aregion_private_bytes_allocated,
+                       aregion_peak_private_bytes_allocated));
 }
 
 /*
  * Find the region to which a pointer belongs.
  */
-sb_region *
-sb_lookup_region(void *ptr)
+AllocatorRegion *
+LookupAllocatorRegion(void *ptr)
 {
        Size p = (Size) ptr;
-       sb_lookup_leaf *leaf = NULL;
+       AllocatorRegionLookupLeaf *leaf = NULL;
        int             high, low;
 
        /*
@@ -220,14 +223,14 @@ sb_lookup_region(void *ptr)
        {
                Size    highbits = p >> 32;
                static Size last_highbits = 0;
-               static sb_lookup_leaf *last_leaf = NULL;
+               static AllocatorRegionLookupLeaf *last_leaf = NULL;
 
                /* Quick test to see if we're in same range as before. */
                if (last_highbits == highbits && last_leaf != NULL)
                        leaf = last_leaf;
                else
                {
-                       leaf = sb_find_leaf(highbits, false);
+                       leaf = AllocatorRegionFindLeaf(highbits, false);
 
                        /* No lookup table for this 4GB range?  OK, no matching region. */
                        if (leaf == NULL)
@@ -242,13 +245,13 @@ sb_lookup_region(void *ptr)
        leaf = &lookup_root_leaf;
 #endif
 
-       /* Now we use binary search on the sb_lookup_leaf. */
+       /* Now we use binary search on the AllocatorRegionLookupLeaf. */
        high = leaf->nused;
        low = 0;
        while (low < high)
        {
                int mid;
-               sb_region *region;
+               AllocatorRegion *region;
 
                mid = (high + low) / 2;
                region = leaf->region[mid];
@@ -268,15 +271,15 @@ sb_lookup_region(void *ptr)
  * function.  We search the existing backend-private regions for one capable
  * of satisfying the request; if none found, we must create a new region.
  */
-sb_region *
-sb_private_region_for_allocator(Size npages)
+AllocatorRegion *
+GetRegionForPrivateAllocation(Size npages)
 {
        int freelist = Min(fls(npages), NUM_PRIVATE_FREELISTS);
        Size    new_region_net_pages;
        Size    metadata_bytes;
        char   *region_start;
        Size    region_size;
-       sb_region *region;
+       AllocatorRegion *region;
 
        Assert(npages > 0);
 
@@ -287,10 +290,10 @@ sb_private_region_for_allocator(Size npages)
 
                dlist_foreach_modify(iter, &private_freelist[freelist])
                {
-                       sb_region  *region;
+                       AllocatorRegion  *region;
                        Size    largest;
 
-                       region = dlist_container(sb_region, fl_node, iter.cur);
+                       region = dlist_container(AllocatorRegion, fl_node, iter.cur);
 
                        /*
                         * Quickly skip regions which appear to have enough space to
@@ -348,9 +351,9 @@ sb_private_region_for_allocator(Size npages)
         * to satisfy the request, so we'll need to create a new one.  First
         * step is to figure out how many pages we should try to obtain.
         */
-       for (new_region_net_pages = SB_REGION_INITSIZE;
-                new_region_net_pages < sb_private_pages_allocated &&
-                new_region_net_pages < SB_REGION_MAXSIZE; new_region_net_pages *= 2)
+       for (new_region_net_pages = AREGION_INITSIZE;
+                new_region_net_pages < aregion_private_pages_allocated &&
+                new_region_net_pages < AREGION_MAXSIZE; new_region_net_pages *= 2)
                ;
        if (new_region_net_pages < npages)
                new_region_net_pages = npages;
@@ -362,7 +365,7 @@ sb_private_region_for_allocator(Size npages)
                 * Compute space required for metadata and determine raw allocation
                 * size.
                 */
-               metadata_bytes = MAXALIGN(sizeof(sb_region));
+               metadata_bytes = MAXALIGN(sizeof(AllocatorRegion));
                metadata_bytes += MAXALIGN(sizeof(FreePageManager));
                metadata_bytes +=
                        MAXALIGN(BlockAllocatorMapSize(NULL, new_region_net_pages));
@@ -390,15 +393,15 @@ sb_private_region_for_allocator(Size npages)
         * want to exit quickly and, in particular, without deallocating the
         * region.
         */
-       region = (sb_region *) region_start;
+       region = (AllocatorRegion *) region_start;
        region->region_start = region_start;
        region->region_size = region_size;
        region->usable_pages = new_region_net_pages;
-       sb_private_pages_allocated += region->usable_pages;
+       aregion_private_pages_allocated += region->usable_pages;
        region->seg = NULL;
        region->allocator = NULL;
        region->fpm = (FreePageManager *)
-               (region_start + MAXALIGN(sizeof(sb_region)));
+               (region_start + MAXALIGN(sizeof(AllocatorRegion)));
        region->pagemap = (BlockAllocatorMap *)
                (((char *) region->fpm) + MAXALIGN(sizeof(FreePageManager)));
        region->contiguous_pages = new_region_net_pages + 1;
@@ -413,7 +416,7 @@ sb_private_region_for_allocator(Size npages)
        region->contiguous_pages = new_region_net_pages; /* Now fix the value. */
        freelist = Min(fls(new_region_net_pages), NUM_PRIVATE_FREELISTS);
        dlist_push_head(&private_freelist[freelist], &region->fl_node);
-       sb_adjust_lookup(region, true);
+       AllocatorRegionAdjustLookup(region, true);
 
        /* Time to rock and roll. */
        return region;
@@ -426,7 +429,7 @@ sb_private_region_for_allocator(Size npages)
  * and otherwise to 
  */
 void
-sb_report_contiguous_freespace(sb_region *region, Size npages)
+ReportRegionContiguousFreespace(AllocatorRegion *region, Size npages)
 {
        int             old_freelist;
        int             new_freelist;
@@ -445,7 +448,7 @@ sb_report_contiguous_freespace(sb_region *region, Size npages)
                return;
 
        /*
-        * If the entire region is free, deallocate it.  The sb_region,
+        * If the entire region is free, deallocate it.  The AllocatorRegion,
         * FreePageManager, and BlockAllocatorMap for the region are stored
         * within it, so they all go away when we free the managed space.
         */
@@ -455,14 +458,14 @@ sb_report_contiguous_freespace(sb_region *region, Size npages)
                Size    region_size = region->region_size;
 
                /* Pull the region out of the lookup table. */
-               sb_adjust_lookup(region, false);
+               AllocatorRegionAdjustLookup(region, false);
 
                /* Remove the region object from the private freelist. */
                dlist_delete(&region->fl_node);
 
                /* Decrement count of private pages allocated. */
-               Assert(sb_private_pages_allocated >= region->usable_pages);
-               sb_private_pages_allocated -= region->usable_pages;
+               Assert(aregion_private_pages_allocated >= region->usable_pages);
+               aregion_private_pages_allocated -= region->usable_pages;
 
                /* Return the managed space to the operating system. */
                system_free(region_start, region_size);
@@ -488,7 +491,7 @@ sb_report_contiguous_freespace(sb_region *region, Size npages)
  * exhaustion; delete always succeeds.
  */
 static bool
-sb_adjust_lookup(sb_region *region, bool insert)
+AllocatorRegionAdjustLookup(AllocatorRegion *region, bool insert)
 {
        bool    ok = true;
 
@@ -507,7 +510,7 @@ sb_adjust_lookup(sb_region *region, bool insert)
 
        for (i = tabstart; i <= tabstop; ++i)
        {
-               sb_lookup_leaf *leaf = sb_find_leaf(i, insert);
+               AllocatorRegionLookupLeaf *leaf = AllocatorRegionFindLeaf(i, insert);
 
                /*
                 * Finding the leaf might fail if we're inserting and can't allocate
@@ -518,7 +521,7 @@ sb_adjust_lookup(sb_region *region, bool insert)
                if (leaf == NULL)
                        ok = false;
                else
-                       ok = sb_adjust_lookup_leaf(leaf, region, insert);
+                       ok = AllocatorRegionAdjustLookupLeaf(leaf, region, insert);
 
                if (!ok)
                {
@@ -526,24 +529,26 @@ sb_adjust_lookup(sb_region *region, bool insert)
                        ok = false;
                        tabstop = i - 1;
                        for (i = tabstart; i <= tabstop; ++i)
-                               sb_adjust_lookup_leaf(sb_find_leaf(i, false), region, false);
+                               AllocatorRegionAdjustLookupLeaf(AllocatorRegionFindLeaf(i,
+                                                                                               false), region, false);
                        break;
                }
        }
 #else
-       ok = sb_adjust_lookup_leaf(&lookup_root_leaf, region, insert);
+       ok = AllocatorRegionAdjustLookupLeaf(&lookup_root_leaf, region, insert);
 #endif
 
        return ok;
 }
 
 /*
- * Insert a region into, or remove a region from, a particular sb_lookup_leaf.
+ * Insert a region into, or remove a region from, a particular lookup leaf.
  * Returns true on success and false if we fail due to memory exhaustion;
  * delete always succeeds.
  */
 static bool
-sb_adjust_lookup_leaf(sb_lookup_leaf *leaf, sb_region *region, bool insert)
+AllocatorRegionAdjustLookupLeaf(AllocatorRegionLookupLeaf *leaf,
+                                                               AllocatorRegion *region, bool insert)
 {
        int             high, low;
 
@@ -551,27 +556,27 @@ sb_adjust_lookup_leaf(sb_lookup_leaf *leaf, sb_region *region, bool insert)
        if (insert && leaf->nused >= leaf->nallocated)
        {
                Size    newsize;
-               sb_region **newtab;
+               AllocatorRegion **newtab;
 
                newsize = leaf->nallocated == 0 ? 16 : leaf->nallocated * 2;
-               newtab = system_malloc(sizeof(sb_region *) * newsize);
+               newtab = system_malloc(sizeof(AllocatorRegion *) * newsize);
                if (newtab == NULL)
                        return false;
                if (leaf->nused > 0)
-                       memcpy(newtab, leaf->region, sizeof(sb_region *) * leaf->nused);
+                       memcpy(newtab, leaf->region, sizeof(AllocatorRegion *) * leaf->nused);
                if (leaf->region != NULL)
-                       system_free(leaf->region, sizeof(sb_region *) * leaf->nallocated);
+                       system_free(leaf->region, sizeof(AllocatorRegion *) * leaf->nallocated);
                leaf->nallocated = newsize;
                leaf->region = newtab;
        }
 
-       /* Use binary search on the sb_lookup_leaf. */
+       /* Use binary search on the AllocatorRegionLookupLeaf. */
        high = leaf->nused;
        low = 0;
        while (low < high)
        {
                int mid;
-               sb_region *candidate;
+               AllocatorRegion *candidate;
 
                mid = (high + low) / 2;
                candidate = leaf->region[mid];
@@ -590,7 +595,7 @@ sb_adjust_lookup_leaf(sb_lookup_leaf *leaf, sb_region *region, bool insert)
                                leaf->region[low]->region_start > region->region_start);
                if (low < leaf->nused)
                        memmove(&leaf->region[low + 1], &leaf->region[low],
-                                       sizeof(sb_region *) * (leaf->nused - low));
+                                       sizeof(AllocatorRegion *) * (leaf->nused - low));
                leaf->region[low] = region;
                ++leaf->nused;
        }
@@ -599,7 +604,7 @@ sb_adjust_lookup_leaf(sb_lookup_leaf *leaf, sb_region *region, bool insert)
                Assert(leaf->region[low] == region);
                if (low < leaf->nused - 1)
                        memmove(&leaf->region[low], &leaf->region[low + 1],
-                                       sizeof(sb_region *) * (leaf->nused - low - 1));
+                                       sizeof(AllocatorRegion *) * (leaf->nused - low - 1));
                --leaf->nused;          
        }
 
@@ -607,17 +612,18 @@ sb_adjust_lookup_leaf(sb_lookup_leaf *leaf, sb_region *region, bool insert)
 }
 
 /*
- * Dump debugging information for the regions covered by a single
- * sb_lookup_leaf.  Skip the first one if it's the same as last_region.
+ * Dump debugging information for the regions covered by a single lookup
+ * leaf.  Skip the first one if it's the same as last_region.
  */
 static void
-sb_dump_regions_leaf(sb_region *last_region, sb_lookup_leaf *leaf)
+DumpAllocatorRegionsLeaf(AllocatorRegion *last_region,
+                                                AllocatorRegionLookupLeaf *leaf)
 {
        int i;
 
        for (i = 0; i < leaf->nused; ++i)
        {
-               sb_region *region = leaf->region[i];
+               AllocatorRegion *region = leaf->region[i];
 
                if (i == 0 && region == last_region)
                        continue;
@@ -629,19 +635,20 @@ sb_dump_regions_leaf(sb_region *last_region, sb_lookup_leaf *leaf)
 }
 
 #if SIZEOF_SIZE_T > 4
-static sb_lookup_leaf *
-sb_find_leaf(Size highbits, bool insert)
+static AllocatorRegionLookupLeaf *
+AllocatorRegionFindLeaf(Size highbits, bool insert)
 {
        Size    rootbits;
-       sb_lookup_l2 *l2 = NULL;
-       sb_lookup_leaf **leafptr;
+       AllocatorRegionLookupL2 *l2 = NULL;
+       AllocatorRegionLookupLeaf **leafptr;
        int     i;
        int unused = -1;
 
-       rootbits = (highbits >> SB_LOOKUP_L2_BITS) & (SB_LOOKUP_ROOT_ENTRIES - 1);
+       rootbits = (highbits >> AREGION_LOOKUP_L2_BITS) &
+               (AREGION_LOOKUP_ROOT_ENTRIES - 1);
 
        /* Check for L2 entry in toplevel cache. */
-       for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i)
+       for (i = 0; i < AREGION_LOOKUP_ROOT_CACHE_SIZE; ++i)
        {
                if (lookup_root.cache_value[i] == NULL)
                        unused = i;
@@ -652,7 +659,7 @@ sb_find_leaf(Size highbits, bool insert)
        /* If no hit, check the full L2 loookup table, if it's been initialized. */
        if (l2 == NULL && lookup_root.l2 != NULL)
        {
-               rootbits &= SB_LOOKUP_ROOT_ENTRIES - 1;
+               rootbits &= AREGION_LOOKUP_ROOT_ENTRIES - 1;
                l2 = lookup_root.l2[rootbits];
 
                /* Pull entry into cache. */
@@ -662,7 +669,7 @@ sb_find_leaf(Size highbits, bool insert)
                         * No need to be smart about replacement policy; we expect to
                         * arrive here virtually never.
                         */
-                       i = highbits % SB_LOOKUP_ROOT_CACHE_SIZE;
+                       i = highbits % AREGION_LOOKUP_ROOT_CACHE_SIZE;
                        lookup_root.cache_key[i] = highbits;
                        lookup_root.cache_value[i] = l2;
                }
@@ -673,7 +680,7 @@ sb_find_leaf(Size highbits, bool insert)
        {
                if (!insert)
                        return NULL;
-               l2 = system_calloc(1, sizeof(sb_lookup_l2));
+               l2 = system_calloc(1, sizeof(AllocatorRegionLookupL2));
                if (l2 == NULL)
                        return NULL;
                if (unused != -1)
@@ -685,23 +692,23 @@ sb_find_leaf(Size highbits, bool insert)
                        lookup_root.l2[rootbits] = l2;
                else
                {
-                       lookup_root.l2 = system_calloc(SB_LOOKUP_ROOT_ENTRIES,
-                                                                       sizeof(sb_lookup_l2 *));
+                       lookup_root.l2 = system_calloc(AREGION_LOOKUP_ROOT_ENTRIES,
+                                                                       sizeof(AllocatorRegionLookupL2 *));
                        if (lookup_root.l2 == NULL)
                        {
-                               system_free(l2, sizeof(sb_lookup_l2));
+                               system_free(l2, sizeof(AllocatorRegionLookupL2));
                                return NULL;
                        }
-                       for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i)
+                       for (i = 0; i < AREGION_LOOKUP_ROOT_CACHE_SIZE; ++i)
                                lookup_root.l2[lookup_root.cache_key[i]] =
                                        lookup_root.cache_value[i];
                }
        }
 
        /* Find slot for entry, and try to initialize it if needed. */
-       leafptr = &l2->leaf[highbits & (SB_LOOKUP_L2_ENTRIES - 1)];
+       leafptr = &l2->leaf[highbits & (AREGION_LOOKUP_L2_ENTRIES - 1)];
        if (insert && *leafptr == NULL)
-               *leafptr = system_calloc(1, sizeof(sb_lookup_leaf));
+               *leafptr = system_calloc(1, sizeof(AllocatorRegionLookupLeaf));
 
        return *leafptr;
 }
@@ -716,7 +723,7 @@ system_calloc(Size count, Size s)
        void *p = calloc(count, s);
 
        if (p != NULL)
-               sb_private_bytes_allocated += count * s;
+               aregion_private_bytes_allocated += count * s;
        return p;
 }
 
@@ -727,9 +734,9 @@ static void
 system_free(void *p, Size s)
 {
        free(p);
-       if (sb_private_bytes_allocated > sb_peak_private_bytes_allocated)
-               sb_peak_private_bytes_allocated = sb_private_bytes_allocated;
-       sb_private_bytes_allocated -= s;
+       if (aregion_private_bytes_allocated > aregion_peak_private_bytes_allocated)
+               aregion_peak_private_bytes_allocated = aregion_private_bytes_allocated;
+       aregion_private_bytes_allocated -= s;
 }
 
 /*
@@ -741,6 +748,6 @@ system_malloc(Size s)
        void *p = malloc(s);
 
        if (p != NULL)
-               sb_private_bytes_allocated += s;
+               aregion_private_bytes_allocated += s;
        return p;
 }
index 0fdd758752f7698eb3e9586a7191ffdaea8cd2f3..0005c3ea55163189c4dba8a2027d8ecc2a7f3d49 100644 (file)
@@ -15,7 +15,7 @@
 #include "postgres.h"
 #include "lib/stringinfo.h"
 #include "miscadmin.h"
-#include "utils/sb_region.h"
+#include "utils/aregion.h"
 
 /* Magic numbers to identify various page types */
 #define FREE_PAGE_SPAN_LEADER_MAGIC            0xea4020f0
@@ -197,11 +197,11 @@ FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
        contiguous_pages = FreePageBtreeCleanup(fpm);
        if (lock == NULL && contiguous_pages > fpm->largest_reported_chunk)
        {
-               sb_region *region = sb_lookup_region(fpm);
+               AllocatorRegion *region = LookupAllocatorRegion(fpm);
 
                if (region != NULL && region->seg == NULL)
                {
-                       sb_report_contiguous_freespace(region, contiguous_pages);
+                       ReportRegionContiguousFreespace(region, contiguous_pages);
                        fpm->largest_reported_chunk = contiguous_pages;
                }
                else
@@ -307,12 +307,12 @@ FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
         */
        if (lock == NULL && contiguous_pages > fpm->largest_reported_chunk)
        {
-               sb_region *region = sb_lookup_region(fpm);
+               AllocatorRegion *region = LookupAllocatorRegion(fpm);
 
                if (region != NULL && region->seg == NULL)
                {
                        fpm->largest_reported_chunk = contiguous_pages;
-                       sb_report_contiguous_freespace(region, contiguous_pages);
+                       ReportRegionContiguousFreespace(region, contiguous_pages);
                }
                else
                {
index 4674f9709d647bd9983efec856d2075ed2584c17..83dcc15915eca711a6fffa13da64ec32ea864cc5 100644 (file)
@@ -14,7 +14,7 @@
 #include "postgres.h"
 
 #include "miscadmin.h"
-#include "utils/sb_region.h"
+#include "utils/aregion.h"
 
 typedef struct sb_heap sb_heap;
 typedef struct sb_span sb_span;
@@ -56,8 +56,8 @@ struct sb_span
        uint16          fclass;                 /* Current fullness class. */
 };
 
-#define SB_SPAN_NOTHING_FREE           ((uint16) -1)
-#define SB_SUPERBLOCK_SIZE                     (SB_PAGES_PER_SUPERBLOCK * FPM_PAGE_SIZE)
+#define SB_SPAN_NOTHING_FREE   ((uint16) -1)
+#define SB_SUPERBLOCK_SIZE     (BLOCK_ALLOCATOR_PAGES_PER_CHUNK * FPM_PAGE_SIZE)
 
 /*
  * Small allocations are handled by dividing a relatively large chunk of
@@ -155,9 +155,9 @@ struct sb_allocator
 };
 
 /* Helper functions. */
-static char *sb_alloc_guts(char *base, sb_region *region,
+static char *sb_alloc_guts(char *base, AllocatorRegion *region,
                          sb_allocator *a, int size_class);
-static bool sb_ensure_active_superblock(char *base, sb_region *region,
+static bool sb_ensure_active_superblock(char *base, AllocatorRegion *region,
                                                        sb_allocator *a, sb_heap *heap,
                                                        int size_class);
 static void sb_init_span(char *base, sb_span *span, sb_heap *heap,
@@ -206,7 +206,7 @@ sb_create_private_allocator(void)
 void *
 sb_alloc(sb_allocator *a, Size size, int flags)
 {
-       sb_region *region = NULL;
+       AllocatorRegion *region = NULL;
        char *base = NULL;
        uint16  size_class;
        char   *result;
@@ -222,9 +222,9 @@ sb_alloc(sb_allocator *a, Size size, int flags)
         */
        if (!a->private)
        {
-               region = sb_lookup_region(a);
+               region = LookupAllocatorRegion(a);
                if (region == NULL)
-                       elog(ERROR, "sb_region not found");
+                       elog(ERROR, "AllocatorRegion not found");
                base = region->region_start;
        }
 
@@ -250,7 +250,7 @@ sb_alloc(sb_allocator *a, Size size, int flags)
 
                /* Find a region from which to allocate. */
                if (region == NULL)
-                       region = sb_private_region_for_allocator(npages);
+                       region = GetRegionForPrivateAllocation(npages);
 
                /* Here's where we try to perform the actual allocation. */
                if (region == NULL ||
@@ -317,7 +317,7 @@ sb_alloc(sb_allocator *a, Size size, int flags)
 void
 sb_free(void *ptr)
 {
-       sb_region *region;
+       AllocatorRegion *region;
        char   *fpm_base;
        char   *base = NULL;
        sb_span *span;
@@ -328,7 +328,7 @@ sb_free(void *ptr)
        uint16  size_class;
 
        /* Locate the containing superblock. */
-       region = sb_lookup_region(ptr);
+       region = LookupAllocatorRegion(ptr);
        fpm_base = fpm_segment_base(region->fpm);
        pageno = fpm_pointer_to_page(fpm_base, ptr);
        span = BlockAllocatorMapGet(region->pagemap, pageno);
@@ -482,7 +482,7 @@ sb_alloc_space(Size size)
  * the only thing we can really reflect here is the fact that allocations
  * will be rounded up to the next larger size class (or, for large allocations,
  * to a full FPM page).  The storage overhead of the sb_span, BlockAllocatorMap,
- * sb_region, and FreePageManager structures is typically spread across
+ * AllocatorRegion, and FreePageManager structures is typically spread across
  * enough small allocations to make reflecting those costs here difficult.
  *
  * On the other hand, we also hope that the overhead in question is small
@@ -492,14 +492,14 @@ sb_alloc_space(Size size)
 Size
 sb_chunk_space(void *ptr)
 {
-       sb_region *region;
+       AllocatorRegion *region;
        char   *fpm_base;
        sb_span *span;
        Size    pageno;
        uint16  size_class;
 
        /* Locate the containing superblock. */
-       region = sb_lookup_region(ptr);
+       region = LookupAllocatorRegion(ptr);
        fpm_base = fpm_segment_base(region->fpm);
        pageno = fpm_pointer_to_page(fpm_base, ptr);
        span = BlockAllocatorMapGet(region->pagemap, pageno);
@@ -529,9 +529,9 @@ sb_reset_allocator(sb_allocator *a)
         */
        if (!a->private)
        {
-               sb_region *region = sb_lookup_region(a);
+               AllocatorRegion *region = LookupAllocatorRegion(a);
                if (region == NULL)
-                       elog(ERROR, "sb_region not found");
+                       elog(ERROR, "AllocatorRegion not found");
                base = region->region_start;
        }
 
@@ -546,7 +546,7 @@ sb_reset_allocator(sb_allocator *a)
 
                for (fclass = 0; fclass < SB_FULLNESS_CLASSES; ++fclass)
                {
-                       sb_region *region;
+                       AllocatorRegion *region;
                        char *superblock;
                        sb_span *span;
 
@@ -558,7 +558,7 @@ sb_reset_allocator(sb_allocator *a)
 
                                superblock = relptr_access(base, span->start);
                                nextspan = relptr_access(base, span->nextspan);
-                               region = sb_lookup_region(superblock);
+                               region = LookupAllocatorRegion(superblock);
                                Assert(region != NULL);
                                offset = superblock - fpm_segment_base(region->fpm);
                                Assert(offset % FPM_PAGE_SIZE == 0);
@@ -575,7 +575,7 @@ sb_reset_allocator(sb_allocator *a)
  * If necessary, steal or create another superblock.
  */
 static char *
-sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class)
+sb_alloc_guts(char *base, AllocatorRegion *region, sb_allocator *a, int size_class)
 {
        sb_heap *heap = &a->heaps[size_class];
        LWLock *lock = relptr_access(base, heap->lock);
@@ -657,7 +657,7 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class)
  * superblock that would otherwise become empty soon.
  */
 static bool
-sb_ensure_active_superblock(char *base, sb_region *region, sb_allocator *a,
+sb_ensure_active_superblock(char *base, AllocatorRegion *region, sb_allocator *a,
                                                        sb_heap *heap, int size_class)
 {
        Size    obsize = sb_size_classes[size_class];
@@ -755,20 +755,20 @@ sb_ensure_active_superblock(char *base, sb_region *region, sb_allocator *a,
         */
        if (size_class != SB_SCLASS_SPAN_OF_SPANS)
        {
-               sb_region *span_region = a->private ? NULL : region;
+               AllocatorRegion *span_region = a->private ? NULL : region;
 
                span = (sb_span *) sb_alloc_guts(base, span_region, a,
                                                                                 SB_SCLASS_SPAN_OF_SPANS);
                if (span == NULL)
                        return false;
-               npages = SB_PAGES_PER_SUPERBLOCK;
+               npages = BLOCK_ALLOCATOR_PAGES_PER_CHUNK;
        }
 
        /* Find a region from which to allocate the superblock. */
        if (region == NULL)
        {
                Assert(a->private);
-               region = sb_private_region_for_allocator(npages);
+               region = GetRegionForPrivateAllocation(npages);
        }
 
        /* Try to allocate the actual superblock. */
similarity index 55%
rename from src/include/utils/sb_region.h
rename to src/include/utils/aregion.h
index 152056b2177aa35e6cc4384d6459411956a32d5a..7081e7ff245ab492d1709a1c10431af7a2db990a 100644 (file)
@@ -1,18 +1,18 @@
 /*-------------------------------------------------------------------------
  *
- * sb_region.h
- *       Superblock allocator memory region manager.
+ * aregion.h
+ *       Allocator region manager.
  *
  * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * src/include/utils/sb_region.h
+ * src/include/utils/aregion.h
  *
  *-------------------------------------------------------------------------
  */
 
-#ifndef SB_REGION_H
-#define SB_REGION_H
+#ifndef AREGION_H
+#define AREGION_H
 
 #include "lib/ilist.h"
 #include "storage/dsm.h"
 #include "utils/sb_alloc.h"
 #include "utils/balloc_map.h"
 
-/* Pages per superblock (in units of FPM_PAGE_SIZE). */
-#define SB_PAGES_PER_SUPERBLOCK                16
-
 /*
- * An sb_region is a backend-private object used to track allocatable regions
- * of memory, either backend-private or shared.
+ * An AllocatorRegion is a backend-private object used to track allocatable
+ * regions of memory, which can be either backend-private or shared.
  */
-typedef struct sb_region
+typedef struct AllocatorRegion
 {
        char *region_start;                     /* Address of region. */
        Size region_size;                       /* Number of bytes in region. */
@@ -39,33 +36,35 @@ typedef struct sb_region
        BlockAllocatorMap *pagemap;     /* Page map for region (if any). */
        Size contiguous_pages;          /* Last reported contiguous free pages. */
        dlist_node fl_node;                     /* Freelist links. */
-} sb_region;
+} AllocatorRegion;
 
 /*
- * An sb_shared_region is a shared-memory object containing the information
- * necessary to set up an sb_region object for an individual backend.
+ * An AllocatorSharedRegion is a shared-memory object containing the
+ * information necessary to set up AllocatorRegion and BlockAllocatorContext
+ * objects for an individual backend.  (If we ever have more than one allocator
+ * that can allocate from shared memory, this will need some revision.)
  */
-typedef struct sb_shared_region
+typedef struct AllocatorSharedRegion
 {
        relptr(FreePageManager) fpm;
        relptr(BlockAllocatorMap) pagemap;
        relptr(sb_allocator) allocator;
        int     lwlock_tranche_id;
        char lwlock_tranche_name[FLEXIBLE_ARRAY_MEMBER];
-} sb_shared_region;
+} AllocatorSharedRegion;
 
 /* Public API. */
-extern sb_shared_region *sb_create_shared_region(dsm_segment *seg,
+extern AllocatorSharedRegion *CreateAllocatorSharedRegion(dsm_segment *seg,
                                                shm_toc *toc, Size size,
                                                int lwlock_tranche_id,
                                                char *lwlock_tranche_name);
-extern sb_allocator *sb_attach_shared_region(dsm_segment *,
-                                               sb_shared_region *);
-extern void sb_dump_regions(void);
+extern sb_allocator *AttachAllocatorSharedRegion(dsm_segment *,
+                                               AllocatorSharedRegion *);
+extern void DumpAllocatorRegions(void);
 
 /* For internal use by cooperating modules. */
-extern sb_region *sb_lookup_region(void *);
-extern sb_region *sb_private_region_for_allocator(Size npages);
-extern void sb_report_contiguous_freespace(sb_region *, Size npages);
+extern AllocatorRegion *LookupAllocatorRegion(void *);
+extern AllocatorRegion *GetRegionForPrivateAllocation(Size npages);
+extern void ReportRegionContiguousFreespace(AllocatorRegion *, Size npages);
 
-#endif         /* SB_REGION_H */
+#endif         /* AREGION_H */
index 8fe92832a2206308fc911d108c13dd7834b656c4..1ce53340e52d1bf8895811f366f758ebef14d574 100644 (file)
@@ -19,6 +19,9 @@
 
 typedef struct sb_allocator sb_allocator;
 
+/* Number of pages (see FPM_PAGE_SIZE) per block-allocator chunk. */
+#define BLOCK_ALLOCATOR_PAGES_PER_CHUNK                16
+
 /* Allocation options. */
 #define SB_ALLOC_HUGE                          0x0001          /* allow >=1GB */
 #define SB_ALLOC_SOFT_FAIL                     0x0002          /* return NULL if no mem */