/*-------------------------------------------------------------------------
*
- * sb_region.c
+ * aregion.c
* Superblock allocator memory region manager.
*
* The superblock allocator operates on ranges of pages managed by a
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * src/backend/utils/mmgr/sb_region.c
+ * src/backend/utils/mmgr/aregion.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
-#include "utils/sb_region.h"
+#include "utils/aregion.h"
/*
* On 64-bit systems, we use a two-level radix tree to find the data for
* covers 2^44 bytes of address space (16TB), we expect overflows of the
* four-entry cache to happen essentially never.
*/
-#define SB_LOOKUP_ROOT_BITS 20
-#define SB_LOOKUP_ROOT_ENTRIES (1 << SB_LOOKUP_ROOT_BITS)
-#define SB_LOOKUP_ROOT_CACHE_SIZE 4
-#define SB_LOOKUP_L2_BITS 12
-#define SB_LOOKUP_L2_ENTRIES (1 << SB_LOOKUP_L2_BITS)
+#define AREGION_LOOKUP_ROOT_BITS 20
+#define AREGION_LOOKUP_ROOT_ENTRIES (1 << AREGION_LOOKUP_ROOT_BITS)
+#define AREGION_LOOKUP_ROOT_CACHE_SIZE 4
+#define AREGION_LOOKUP_L2_BITS 12
+#define AREGION_LOOKUP_L2_ENTRIES (1 << AREGION_LOOKUP_L2_BITS)
/* Lookup data for a 4GB range of address space. */
typedef struct
{
int nused;
int nallocated;
- sb_region **region;
-} sb_lookup_leaf;
+ AllocatorRegion **region;
+} AllocatorRegionLookupLeaf;
/* Lookup data for a 16TB range of address space, direct mapped. */
typedef struct
{
- sb_lookup_leaf *leaf[SB_LOOKUP_L2_ENTRIES];
-} sb_lookup_l2;
+ AllocatorRegionLookupLeaf *leaf[AREGION_LOOKUP_L2_ENTRIES];
+} AllocatorRegionLookupL2;
/* Lookup data for an entire 64-bit address space. */
typedef struct
{
- uint32 cache_key[SB_LOOKUP_ROOT_CACHE_SIZE];
- sb_lookup_l2 *cache_value[SB_LOOKUP_ROOT_CACHE_SIZE];
- sb_lookup_l2 **l2;
-} sb_lookup_root;
+ uint32 cache_key[AREGION_LOOKUP_ROOT_CACHE_SIZE];
+ AllocatorRegionLookupL2 *cache_value[AREGION_LOOKUP_ROOT_CACHE_SIZE];
+ AllocatorRegionLookupL2 **l2;
+} AllocatorRegionLookupRoot;
/* Toplevel address lookup structure. */
#if SIZEOF_SIZE_T > 4
-static sb_lookup_root lookup_root;
+static AllocatorRegionLookupRoot lookup_root;
#else
-static sb_lookup_leaf lookup_root_leaf;
+static AllocatorRegionLookupLeaf lookup_root_leaf;
#endif
/*
* we add a bit of space for bookkeeping. These values are in units of
* FPM_PAGE_SIZE.
*/
-#define SB_REGION_INITSIZE (16 * SB_PAGES_PER_SUPERBLOCK)
-#define SB_REGION_MAXSIZE ((64 * 1024 * 1024) / FPM_PAGE_SIZE)
+#define AREGION_INITSIZE (16 * BLOCK_ALLOCATOR_PAGES_PER_CHUNK)
+#define AREGION_MAXSIZE ((64 * 1024 * 1024) / FPM_PAGE_SIZE)
-static Size sb_private_pages_allocated = 0;
-static Size sb_private_bytes_allocated = 0;
-static Size sb_peak_private_bytes_allocated = 0;
+static Size aregion_private_pages_allocated = 0;
+static Size aregion_private_bytes_allocated = 0;
+static Size aregion_peak_private_bytes_allocated = 0;
/* Static functions. */
-static bool sb_adjust_lookup(sb_region *region, bool insert);
-static bool sb_adjust_lookup_leaf(sb_lookup_leaf *leaf, sb_region *region,
- bool insert);
-static void sb_dump_regions_leaf(sb_region *last_region, sb_lookup_leaf *leaf);
+static bool AllocatorRegionAdjustLookup(AllocatorRegion *region, bool insert);
+static bool AllocatorRegionAdjustLookupLeaf(AllocatorRegionLookupLeaf *leaf,
+ AllocatorRegion *region, bool insert);
+static void DumpAllocatorRegionsLeaf(AllocatorRegion *last_region,
+ AllocatorRegionLookupLeaf *leaf);
#if SIZEOF_SIZE_T > 4
-static sb_lookup_leaf *sb_find_leaf(Size highbits, bool insert);
+static AllocatorRegionLookupLeaf *AllocatorRegionFindLeaf(Size highbits,
+ bool insert);
#endif
static void *system_calloc(Size count, Size s);
static void system_free(void *p, Size s);
static void *system_malloc(Size s);
/*
- * Dump debugging information for sb_region objects.
+ * Dump debugging information for AllocatorRegion objects.
*/
void
-sb_dump_regions(void)
+DumpAllocatorRegions(void)
{
#if SIZEOF_SIZE_T > 4
- sb_region *last_region = NULL;
+ AllocatorRegion *last_region = NULL;
if (lookup_root.l2 != NULL)
{
int i;
int j;
- for (i = 0; i < SB_LOOKUP_ROOT_ENTRIES; ++i)
+ for (i = 0; i < AREGION_LOOKUP_ROOT_ENTRIES; ++i)
{
- sb_lookup_l2 *l2 = lookup_root.l2[i];
+ AllocatorRegionLookupL2 *l2 = lookup_root.l2[i];
if (l2 == NULL)
continue;
- for (j = 0; j < SB_LOOKUP_L2_ENTRIES; ++j)
+ for (j = 0; j < AREGION_LOOKUP_L2_ENTRIES; ++j)
{
- sb_lookup_leaf *leaf = l2->leaf[j];
+ AllocatorRegionLookupLeaf *leaf = l2->leaf[j];
if (leaf != NULL)
{
- sb_dump_regions_leaf(last_region, leaf);
+ DumpAllocatorRegionsLeaf(last_region, leaf);
last_region = leaf->region[leaf->nused - 1];
}
}
int i;
int j;
int n = -1;
- sb_lookup_l2 *l2;
+ AllocatorRegionLookupL2 *l2;
/* Find next L2 entry to visit. */
- for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i)
+ for (i = 0; i < AREGION_LOOKUP_ROOT_CACHE_SIZE; ++i)
{
if (lookup_root.cache_value[i] != NULL &&
(first || lookup_root.cache_key[i] > highbits))
/* Dump this L2 entry. */
l2 = lookup_root.cache_value[n];
- for (j = 0; j < SB_LOOKUP_L2_ENTRIES; ++j)
+ for (j = 0; j < AREGION_LOOKUP_L2_ENTRIES; ++j)
{
- sb_lookup_leaf *leaf = l2->leaf[j];
+ AllocatorRegionLookupLeaf *leaf = l2->leaf[j];
if (leaf != NULL)
{
- sb_dump_regions_leaf(last_region, leaf);
+ DumpAllocatorRegionsLeaf(last_region, leaf);
last_region = leaf->region[leaf->nused - 1];
}
}
}
}
#else
- sb_dump_regions_leaf(NULL, lookup_root_leaf);
+ DumpAllocatorRegionsLeaf(NULL, lookup_root_leaf);
#endif
fprintf(stderr, "== overall statistics ==\n");
fprintf(stderr, "private bytes now: %zu, peak %zu\n",
- sb_private_bytes_allocated,
- Max(sb_private_bytes_allocated, sb_peak_private_bytes_allocated));
+ aregion_private_bytes_allocated,
+ Max(aregion_private_bytes_allocated,
+ aregion_peak_private_bytes_allocated));
}
/*
* Find the region to which a pointer belongs.
*/
-sb_region *
-sb_lookup_region(void *ptr)
+AllocatorRegion *
+LookupAllocatorRegion(void *ptr)
{
Size p = (Size) ptr;
- sb_lookup_leaf *leaf = NULL;
+ AllocatorRegionLookupLeaf *leaf = NULL;
int high, low;
/*
{
Size highbits = p >> 32;
static Size last_highbits = 0;
- static sb_lookup_leaf *last_leaf = NULL;
+ static AllocatorRegionLookupLeaf *last_leaf = NULL;
/* Quick test to see if we're in same range as before. */
if (last_highbits == highbits && last_leaf != NULL)
leaf = last_leaf;
else
{
- leaf = sb_find_leaf(highbits, false);
+ leaf = AllocatorRegionFindLeaf(highbits, false);
/* No lookup table for this 4GB range? OK, no matching region. */
if (leaf == NULL)
leaf = &lookup_root_leaf;
#endif
- /* Now we use binary search on the sb_lookup_leaf. */
+ /* Now we use binary search on the AllocatorRegionLookupLeaf. */
high = leaf->nused;
low = 0;
while (low < high)
{
int mid;
- sb_region *region;
+ AllocatorRegion *region;
mid = (high + low) / 2;
region = leaf->region[mid];
* function. We search the existing backend-private regions for one capable
* of satisfying the request; if none found, we must create a new region.
*/
-sb_region *
-sb_private_region_for_allocator(Size npages)
+AllocatorRegion *
+GetRegionForPrivateAllocation(Size npages)
{
int freelist = Min(fls(npages), NUM_PRIVATE_FREELISTS);
Size new_region_net_pages;
Size metadata_bytes;
char *region_start;
Size region_size;
- sb_region *region;
+ AllocatorRegion *region;
Assert(npages > 0);
dlist_foreach_modify(iter, &private_freelist[freelist])
{
- sb_region *region;
+ AllocatorRegion *region;
Size largest;
- region = dlist_container(sb_region, fl_node, iter.cur);
+ region = dlist_container(AllocatorRegion, fl_node, iter.cur);
/*
* Quickly skip regions which appear to have enough space to
* to satisfy the request, so we'll need to create a new one. First
* step is to figure out how many pages we should try to obtain.
*/
- for (new_region_net_pages = SB_REGION_INITSIZE;
- new_region_net_pages < sb_private_pages_allocated &&
- new_region_net_pages < SB_REGION_MAXSIZE; new_region_net_pages *= 2)
+ for (new_region_net_pages = AREGION_INITSIZE;
+ new_region_net_pages < aregion_private_pages_allocated &&
+ new_region_net_pages < AREGION_MAXSIZE; new_region_net_pages *= 2)
;
if (new_region_net_pages < npages)
new_region_net_pages = npages;
* Compute space required for metadata and determine raw allocation
* size.
*/
- metadata_bytes = MAXALIGN(sizeof(sb_region));
+ metadata_bytes = MAXALIGN(sizeof(AllocatorRegion));
metadata_bytes += MAXALIGN(sizeof(FreePageManager));
metadata_bytes +=
MAXALIGN(BlockAllocatorMapSize(NULL, new_region_net_pages));
* want to exit quickly and, in particular, without deallocating the
* region.
*/
- region = (sb_region *) region_start;
+ region = (AllocatorRegion *) region_start;
region->region_start = region_start;
region->region_size = region_size;
region->usable_pages = new_region_net_pages;
- sb_private_pages_allocated += region->usable_pages;
+ aregion_private_pages_allocated += region->usable_pages;
region->seg = NULL;
region->allocator = NULL;
region->fpm = (FreePageManager *)
- (region_start + MAXALIGN(sizeof(sb_region)));
+ (region_start + MAXALIGN(sizeof(AllocatorRegion)));
region->pagemap = (BlockAllocatorMap *)
(((char *) region->fpm) + MAXALIGN(sizeof(FreePageManager)));
region->contiguous_pages = new_region_net_pages + 1;
region->contiguous_pages = new_region_net_pages; /* Now fix the value. */
freelist = Min(fls(new_region_net_pages), NUM_PRIVATE_FREELISTS);
dlist_push_head(&private_freelist[freelist], ®ion->fl_node);
- sb_adjust_lookup(region, true);
+ AllocatorRegionAdjustLookup(region, true);
/* Time to rock and roll. */
return region;
* and otherwise to
*/
void
-sb_report_contiguous_freespace(sb_region *region, Size npages)
+ReportRegionContiguousFreespace(AllocatorRegion *region, Size npages)
{
int old_freelist;
int new_freelist;
return;
/*
- * If the entire region is free, deallocate it. The sb_region,
+ * If the entire region is free, deallocate it. The AllocatorRegion,
* FreePageManager, and BlockAllocatorMap for the region are stored
* within it, so they all go away when we free the managed space.
*/
Size region_size = region->region_size;
/* Pull the region out of the lookup table. */
- sb_adjust_lookup(region, false);
+ AllocatorRegionAdjustLookup(region, false);
/* Remove the region object from the private freelist. */
dlist_delete(®ion->fl_node);
/* Decrement count of private pages allocated. */
- Assert(sb_private_pages_allocated >= region->usable_pages);
- sb_private_pages_allocated -= region->usable_pages;
+ Assert(aregion_private_pages_allocated >= region->usable_pages);
+ aregion_private_pages_allocated -= region->usable_pages;
/* Return the managed space to the operating system. */
system_free(region_start, region_size);
* exhaustion; delete always succeeds.
*/
static bool
-sb_adjust_lookup(sb_region *region, bool insert)
+AllocatorRegionAdjustLookup(AllocatorRegion *region, bool insert)
{
bool ok = true;
for (i = tabstart; i <= tabstop; ++i)
{
- sb_lookup_leaf *leaf = sb_find_leaf(i, insert);
+ AllocatorRegionLookupLeaf *leaf = AllocatorRegionFindLeaf(i, insert);
/*
* Finding the leaf might fail if we're inserting and can't allocate
if (leaf == NULL)
ok = false;
else
- ok = sb_adjust_lookup_leaf(leaf, region, insert);
+ ok = AllocatorRegionAdjustLookupLeaf(leaf, region, insert);
if (!ok)
{
ok = false;
tabstop = i - 1;
for (i = tabstart; i <= tabstop; ++i)
- sb_adjust_lookup_leaf(sb_find_leaf(i, false), region, false);
+ AllocatorRegionAdjustLookupLeaf(AllocatorRegionFindLeaf(i,
+ false), region, false);
break;
}
}
#else
- ok = sb_adjust_lookup_leaf(&lookup_root_leaf, region, insert);
+ ok = AllocatorRegionAdjustLookupLeaf(&lookup_root_leaf, region, insert);
#endif
return ok;
}
/*
- * Insert a region into, or remove a region from, a particular sb_lookup_leaf.
+ * Insert a region into, or remove a region from, a particular lookup leaf.
* Returns true on success and false if we fail due to memory exhaustion;
* delete always succeeds.
*/
static bool
-sb_adjust_lookup_leaf(sb_lookup_leaf *leaf, sb_region *region, bool insert)
+AllocatorRegionAdjustLookupLeaf(AllocatorRegionLookupLeaf *leaf,
+ AllocatorRegion *region, bool insert)
{
int high, low;
if (insert && leaf->nused >= leaf->nallocated)
{
Size newsize;
- sb_region **newtab;
+ AllocatorRegion **newtab;
newsize = leaf->nallocated == 0 ? 16 : leaf->nallocated * 2;
- newtab = system_malloc(sizeof(sb_region *) * newsize);
+ newtab = system_malloc(sizeof(AllocatorRegion *) * newsize);
if (newtab == NULL)
return false;
if (leaf->nused > 0)
- memcpy(newtab, leaf->region, sizeof(sb_region *) * leaf->nused);
+ memcpy(newtab, leaf->region, sizeof(AllocatorRegion *) * leaf->nused);
if (leaf->region != NULL)
- system_free(leaf->region, sizeof(sb_region *) * leaf->nallocated);
+ system_free(leaf->region, sizeof(AllocatorRegion *) * leaf->nallocated);
leaf->nallocated = newsize;
leaf->region = newtab;
}
- /* Use binary search on the sb_lookup_leaf. */
+ /* Use binary search on the AllocatorRegionLookupLeaf. */
high = leaf->nused;
low = 0;
while (low < high)
{
int mid;
- sb_region *candidate;
+ AllocatorRegion *candidate;
mid = (high + low) / 2;
candidate = leaf->region[mid];
leaf->region[low]->region_start > region->region_start);
if (low < leaf->nused)
memmove(&leaf->region[low + 1], &leaf->region[low],
- sizeof(sb_region *) * (leaf->nused - low));
+ sizeof(AllocatorRegion *) * (leaf->nused - low));
leaf->region[low] = region;
++leaf->nused;
}
Assert(leaf->region[low] == region);
if (low < leaf->nused - 1)
memmove(&leaf->region[low], &leaf->region[low + 1],
- sizeof(sb_region *) * (leaf->nused - low - 1));
+ sizeof(AllocatorRegion *) * (leaf->nused - low - 1));
--leaf->nused;
}
}
/*
- * Dump debugging information for the regions covered by a single
- * sb_lookup_leaf. Skip the first one if it's the same as last_region.
+ * Dump debugging information for the regions covered by a single lookup
+ * leaf. Skip the first one if it's the same as last_region.
*/
static void
-sb_dump_regions_leaf(sb_region *last_region, sb_lookup_leaf *leaf)
+DumpAllocatorRegionsLeaf(AllocatorRegion *last_region,
+ AllocatorRegionLookupLeaf *leaf)
{
int i;
for (i = 0; i < leaf->nused; ++i)
{
- sb_region *region = leaf->region[i];
+ AllocatorRegion *region = leaf->region[i];
if (i == 0 && region == last_region)
continue;
}
#if SIZEOF_SIZE_T > 4
-static sb_lookup_leaf *
-sb_find_leaf(Size highbits, bool insert)
+static AllocatorRegionLookupLeaf *
+AllocatorRegionFindLeaf(Size highbits, bool insert)
{
Size rootbits;
- sb_lookup_l2 *l2 = NULL;
- sb_lookup_leaf **leafptr;
+ AllocatorRegionLookupL2 *l2 = NULL;
+ AllocatorRegionLookupLeaf **leafptr;
int i;
int unused = -1;
- rootbits = (highbits >> SB_LOOKUP_L2_BITS) & (SB_LOOKUP_ROOT_ENTRIES - 1);
+ rootbits = (highbits >> AREGION_LOOKUP_L2_BITS) &
+ (AREGION_LOOKUP_ROOT_ENTRIES - 1);
/* Check for L2 entry in toplevel cache. */
- for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i)
+ for (i = 0; i < AREGION_LOOKUP_ROOT_CACHE_SIZE; ++i)
{
if (lookup_root.cache_value[i] == NULL)
unused = i;
/* If no hit, check the full L2 loookup table, if it's been initialized. */
if (l2 == NULL && lookup_root.l2 != NULL)
{
- rootbits &= SB_LOOKUP_ROOT_ENTRIES - 1;
+ rootbits &= AREGION_LOOKUP_ROOT_ENTRIES - 1;
l2 = lookup_root.l2[rootbits];
/* Pull entry into cache. */
* No need to be smart about replacement policy; we expect to
* arrive here virtually never.
*/
- i = highbits % SB_LOOKUP_ROOT_CACHE_SIZE;
+ i = highbits % AREGION_LOOKUP_ROOT_CACHE_SIZE;
lookup_root.cache_key[i] = highbits;
lookup_root.cache_value[i] = l2;
}
{
if (!insert)
return NULL;
- l2 = system_calloc(1, sizeof(sb_lookup_l2));
+ l2 = system_calloc(1, sizeof(AllocatorRegionLookupL2));
if (l2 == NULL)
return NULL;
if (unused != -1)
lookup_root.l2[rootbits] = l2;
else
{
- lookup_root.l2 = system_calloc(SB_LOOKUP_ROOT_ENTRIES,
- sizeof(sb_lookup_l2 *));
+ lookup_root.l2 = system_calloc(AREGION_LOOKUP_ROOT_ENTRIES,
+ sizeof(AllocatorRegionLookupL2 *));
if (lookup_root.l2 == NULL)
{
- system_free(l2, sizeof(sb_lookup_l2));
+ system_free(l2, sizeof(AllocatorRegionLookupL2));
return NULL;
}
- for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i)
+ for (i = 0; i < AREGION_LOOKUP_ROOT_CACHE_SIZE; ++i)
lookup_root.l2[lookup_root.cache_key[i]] =
lookup_root.cache_value[i];
}
}
/* Find slot for entry, and try to initialize it if needed. */
- leafptr = &l2->leaf[highbits & (SB_LOOKUP_L2_ENTRIES - 1)];
+ leafptr = &l2->leaf[highbits & (AREGION_LOOKUP_L2_ENTRIES - 1)];
if (insert && *leafptr == NULL)
- *leafptr = system_calloc(1, sizeof(sb_lookup_leaf));
+ *leafptr = system_calloc(1, sizeof(AllocatorRegionLookupLeaf));
return *leafptr;
}
void *p = calloc(count, s);
if (p != NULL)
- sb_private_bytes_allocated += count * s;
+ aregion_private_bytes_allocated += count * s;
return p;
}
system_free(void *p, Size s)
{
free(p);
- if (sb_private_bytes_allocated > sb_peak_private_bytes_allocated)
- sb_peak_private_bytes_allocated = sb_private_bytes_allocated;
- sb_private_bytes_allocated -= s;
+ if (aregion_private_bytes_allocated > aregion_peak_private_bytes_allocated)
+ aregion_peak_private_bytes_allocated = aregion_private_bytes_allocated;
+ aregion_private_bytes_allocated -= s;
}
/*
void *p = malloc(s);
if (p != NULL)
- sb_private_bytes_allocated += s;
+ aregion_private_bytes_allocated += s;
return p;
}
#include "postgres.h"
#include "miscadmin.h"
-#include "utils/sb_region.h"
+#include "utils/aregion.h"
typedef struct sb_heap sb_heap;
typedef struct sb_span sb_span;
uint16 fclass; /* Current fullness class. */
};
-#define SB_SPAN_NOTHING_FREE ((uint16) -1)
-#define SB_SUPERBLOCK_SIZE (SB_PAGES_PER_SUPERBLOCK * FPM_PAGE_SIZE)
+#define SB_SPAN_NOTHING_FREE ((uint16) -1)
+#define SB_SUPERBLOCK_SIZE (BLOCK_ALLOCATOR_PAGES_PER_CHUNK * FPM_PAGE_SIZE)
/*
* Small allocations are handled by dividing a relatively large chunk of
};
/* Helper functions. */
-static char *sb_alloc_guts(char *base, sb_region *region,
+static char *sb_alloc_guts(char *base, AllocatorRegion *region,
sb_allocator *a, int size_class);
-static bool sb_ensure_active_superblock(char *base, sb_region *region,
+static bool sb_ensure_active_superblock(char *base, AllocatorRegion *region,
sb_allocator *a, sb_heap *heap,
int size_class);
static void sb_init_span(char *base, sb_span *span, sb_heap *heap,
void *
sb_alloc(sb_allocator *a, Size size, int flags)
{
- sb_region *region = NULL;
+ AllocatorRegion *region = NULL;
char *base = NULL;
uint16 size_class;
char *result;
*/
if (!a->private)
{
- region = sb_lookup_region(a);
+ region = LookupAllocatorRegion(a);
if (region == NULL)
- elog(ERROR, "sb_region not found");
+ elog(ERROR, "AllocatorRegion not found");
base = region->region_start;
}
/* Find a region from which to allocate. */
if (region == NULL)
- region = sb_private_region_for_allocator(npages);
+ region = GetRegionForPrivateAllocation(npages);
/* Here's where we try to perform the actual allocation. */
if (region == NULL ||
void
sb_free(void *ptr)
{
- sb_region *region;
+ AllocatorRegion *region;
char *fpm_base;
char *base = NULL;
sb_span *span;
uint16 size_class;
/* Locate the containing superblock. */
- region = sb_lookup_region(ptr);
+ region = LookupAllocatorRegion(ptr);
fpm_base = fpm_segment_base(region->fpm);
pageno = fpm_pointer_to_page(fpm_base, ptr);
span = BlockAllocatorMapGet(region->pagemap, pageno);
* the only thing we can really reflect here is the fact that allocations
* will be rounded up to the next larger size class (or, for large allocations,
* to a full FPM page). The storage overhead of the sb_span, BlockAllocatorMap,
- * sb_region, and FreePageManager structures is typically spread across
+ * AllocatorRegion, and FreePageManager structures is typically spread across
* enough small allocations to make reflecting those costs here difficult.
*
* On the other hand, we also hope that the overhead in question is small
Size
sb_chunk_space(void *ptr)
{
- sb_region *region;
+ AllocatorRegion *region;
char *fpm_base;
sb_span *span;
Size pageno;
uint16 size_class;
/* Locate the containing superblock. */
- region = sb_lookup_region(ptr);
+ region = LookupAllocatorRegion(ptr);
fpm_base = fpm_segment_base(region->fpm);
pageno = fpm_pointer_to_page(fpm_base, ptr);
span = BlockAllocatorMapGet(region->pagemap, pageno);
*/
if (!a->private)
{
- sb_region *region = sb_lookup_region(a);
+ AllocatorRegion *region = LookupAllocatorRegion(a);
if (region == NULL)
- elog(ERROR, "sb_region not found");
+ elog(ERROR, "AllocatorRegion not found");
base = region->region_start;
}
for (fclass = 0; fclass < SB_FULLNESS_CLASSES; ++fclass)
{
- sb_region *region;
+ AllocatorRegion *region;
char *superblock;
sb_span *span;
superblock = relptr_access(base, span->start);
nextspan = relptr_access(base, span->nextspan);
- region = sb_lookup_region(superblock);
+ region = LookupAllocatorRegion(superblock);
Assert(region != NULL);
offset = superblock - fpm_segment_base(region->fpm);
Assert(offset % FPM_PAGE_SIZE == 0);
* If necessary, steal or create another superblock.
*/
static char *
-sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class)
+sb_alloc_guts(char *base, AllocatorRegion *region, sb_allocator *a, int size_class)
{
sb_heap *heap = &a->heaps[size_class];
LWLock *lock = relptr_access(base, heap->lock);
* superblock that would otherwise become empty soon.
*/
static bool
-sb_ensure_active_superblock(char *base, sb_region *region, sb_allocator *a,
+sb_ensure_active_superblock(char *base, AllocatorRegion *region, sb_allocator *a,
sb_heap *heap, int size_class)
{
Size obsize = sb_size_classes[size_class];
*/
if (size_class != SB_SCLASS_SPAN_OF_SPANS)
{
- sb_region *span_region = a->private ? NULL : region;
+ AllocatorRegion *span_region = a->private ? NULL : region;
span = (sb_span *) sb_alloc_guts(base, span_region, a,
SB_SCLASS_SPAN_OF_SPANS);
if (span == NULL)
return false;
- npages = SB_PAGES_PER_SUPERBLOCK;
+ npages = BLOCK_ALLOCATOR_PAGES_PER_CHUNK;
}
/* Find a region from which to allocate the superblock. */
if (region == NULL)
{
Assert(a->private);
- region = sb_private_region_for_allocator(npages);
+ region = GetRegionForPrivateAllocation(npages);
}
/* Try to allocate the actual superblock. */