#include "fmgr.h"
#include "utils/memutils.h"
-#include "utils/sb_alloc.h"
-#include "utils/sb_region.h"
+#include "utils/aregion.h"
+#include "utils/balloc.h"
typedef struct llnode
{
int64 count = PG_GETARG_INT64(1);
int64 i;
int64 *p;
- sb_allocator *a;
+ BlockAllocatorContext *context;
- a = sb_create_private_allocator();
+ context = BlockAllocatorContextCreate();
for (i = 0; i < count; ++i)
{
- p = sb_alloc(a, size, 0);
+ p = BlockAllocatorAlloc(context, size, 0);
*p = i;
}
- sb_reset_allocator(a);
- sb_dump_regions();
+ BlockAllocatorReset(context);
+ DumpAllocatorRegions();
PG_RETURN_VOID();
}
int64 i;
llnode *h = NULL;
llnode *p;
- sb_allocator *a;
+ BlockAllocatorContext *context;
if (size < sizeof(llnode))
elog(ERROR, "size too small");
- a = sb_create_private_allocator();
+ context = BlockAllocatorContextCreate();
for (i = 0; i < count; ++i)
{
- p = sb_alloc(a, size, 0);
+ p = BlockAllocatorAlloc(context, size, 0);
p->next = h;
h = p;
}
while (h != NULL)
{
p = h->next;
- sb_free(h);
+ BlockAllocatorFree(h);
h = p;
}
- sb_dump_regions();
- sb_reset_allocator(a);
+ DumpAllocatorRegions();
+ BlockAllocatorReset(context);
PG_RETURN_VOID();
}
#include "storage/smgr.h"
#include "tcop/tcopprot.h"
#include "utils/rel.h"
-#include "utils/sb_alloc.h"
+#include "utils/balloc.h"
#include "utils/tuplesort.h"
{
_bt_buildadd(wstate, state, itup);
if (should_free)
- sb_free(itup);
+ BlockAllocatorFree(itup);
itup = tuplesort_getindextuple(btspool->sortstate,
true, &should_free);
}
{
_bt_buildadd(wstate, state, itup2);
if (should_free2)
- sb_free(itup2);
+ BlockAllocatorFree(itup2);
itup2 = tuplesort_getindextuple(btspool2->sortstate,
true, &should_free2);
}
_bt_buildadd(wstate, state, itup);
if (should_free)
- sb_free(itup);
+ BlockAllocatorFree(itup);
}
}
top_builddir = ../../../..
include $(top_builddir)/src/Makefile.global
-OBJS = aregion.o aset.o freepage.o mcxt.o portalmem.o sb_alloc.o balloc_map.o
+OBJS = aregion.o aset.o balloc.o freepage.o mcxt.o portalmem.o balloc_map.o
include $(top_srcdir)/src/backend/common.mk
/*-------------------------------------------------------------------------
*
* aregion.c
- * Superblock allocator memory region manager.
+ * Management of allocator memory regions.
*
- * The superblock allocator operates on ranges of pages managed by a
+ * The block allocator operates on ranges of pages managed by a
* FreePageManager and reverse-mapped by a BlockAllocatorMap. When it's
* asked to free an object, it just gets a pointer address; our job is to
* figure out which page range contains that object and locate the
* FreePageManager, BlockAllocatorMap, and other metadata that the superblock
* allocator will need to do its thing. Moreover, when allocating an
- * object, the caller is only required to provide the superblock allocator
- * with a pointer to the sb_allocator object, which could be in either
+ * object, the caller is only required to provide the block allocator
+ * with a pointer to the BlockAllocatorContext object, which could be in either
* shared or backend-private memory; our job again is to know which it
* is and provide pointers to the appropriate supporting data structures.
* To do all this, we have to keep track of where all dynamic shared memory
}
/*
- * When a backend-private sb_allocator needs more memory, it calls this
- * function. We search the existing backend-private regions for one capable
- * of satisfying the request; if none found, we must create a new region.
+ * Provide a backend-private AllocatorRegion capable of satisfying a request
+ * for a given number of pages. If no existing region has enough contiguous
+ * freespace, we'll create a new one.
*/
AllocatorRegion *
GetRegionForPrivateAllocation(Size npages)
region->usable_pages = new_region_net_pages;
aregion_private_pages_allocated += region->usable_pages;
region->seg = NULL;
- region->allocator = NULL;
+ region->context = NULL;
region->fpm = (FreePageManager *)
(region_start + MAXALIGN(sizeof(AllocatorRegion)));
region->pagemap = (BlockAllocatorMap *)
/* This should only be called for private regions. */
Assert(region->seg == NULL);
- Assert(region->allocator == NULL);
+ Assert(region->context == NULL);
/*
* If there have been allocations from the region since the last report,
/*-------------------------------------------------------------------------
*
- * sb_alloc.c
+ * balloc.c
* Superblock-based memory allocator.
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * src/backend/utils/mmgr/sb_alloc.c
+ * src/backend/utils/mmgr/balloc.c
*
*-------------------------------------------------------------------------
*/
* For large objects, we just stick all of the allocations in fullness class
* 0. Since we can just return the space directly to the free page manager,
* we don't really need them on a list at all, except that if someone wants
- * to bulk release everything allocated using this sb_allocator, we have no
- * other way of finding them.
+ * to bulk release everything allocated using this BlockAllocatorContext, we
+ * have no other way of finding them.
*/
#define SB_FULLNESS_CLASSES 4
};
/*
- * An sb_allocator is basically just a group of heaps, one per size class.
- * If locking is required, then we've also got an array of LWLocks, one per
- * heap.
+ * A BlockAllocatorContext is basically just a group of heaps, one per size
+ * class. If locking is required, then we've also got an array of LWLocks,
+ * one per heap.
*/
-struct sb_allocator
+struct BlockAllocatorContext
{
bool private;
relptr(LWLock) locks;
};
/* Helper functions. */
-static char *sb_alloc_guts(char *base, AllocatorRegion *region,
- sb_allocator *a, int size_class);
+static char *BlockAllocatorAllocGuts(char *base, AllocatorRegion *region,
+ BlockAllocatorContext *context, int size_class);
static bool sb_ensure_active_superblock(char *base, AllocatorRegion *region,
- sb_allocator *a, sb_heap *heap,
+ BlockAllocatorContext *context, sb_heap *heap,
int size_class);
static void sb_init_span(char *base, sb_span *span, sb_heap *heap,
char *ptr, Size npages, uint16 size_class);
-static void sb_out_of_memory_error(sb_allocator *a);
+static void sb_out_of_memory_error(BlockAllocatorContext *context);
static bool sb_transfer_first_span(char *base, sb_heap *heap,
int fromclass, int toclass);
static void sb_unlink_span(char *base, sb_heap *heap, sb_span *span);
/*
* Create a backend-private allocator.
*/
-sb_allocator *
-sb_create_private_allocator(void)
+BlockAllocatorContext *
+BlockAllocatorContextCreate(void)
{
Size allocator_size;
int heapno;
int fclass;
- sb_allocator *a;
+ BlockAllocatorContext *context;
char *base = NULL;
- allocator_size = offsetof(sb_allocator, heaps);
+ allocator_size = offsetof(BlockAllocatorContext, heaps);
allocator_size += sizeof(sb_heap) * SB_NUM_SIZE_CLASSES;
- a = malloc(allocator_size);
- if (a == NULL)
+ context = malloc(allocator_size);
+ if (context == NULL)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- a->private = true;
+ context->private = true;
for (heapno = 0; heapno < SB_NUM_SIZE_CLASSES; ++heapno)
{
- sb_heap *heap = &a->heaps[heapno];
+ sb_heap *heap = &context->heaps[heapno];
relptr_store(base, heap->lock, (LWLock *) NULL);
for (fclass = 0; fclass < SB_FULLNESS_CLASSES; ++fclass)
relptr_store(base, heap->spans[fclass], (sb_span *) NULL);
}
- return a;
+ return context;
}
/*
* Allocate memory.
*/
void *
-sb_alloc(sb_allocator *a, Size size, int flags)
+BlockAllocatorAlloc(BlockAllocatorContext *context, Size size, int flags)
{
AllocatorRegion *region = NULL;
char *base = NULL;
* a region; we'll only need to grab a region if we can't allocate out of
* an existing superblock.
*/
- if (!a->private)
+ if (!context->private)
{
- region = LookupAllocatorRegion(a);
+ region = LookupAllocatorRegion(context);
if (region == NULL)
elog(ERROR, "AllocatorRegion not found");
base = region->region_start;
Size npages = fpm_size_to_pages(size);
Size first_page;
sb_span *span;
- sb_heap *heap = &a->heaps[SB_SCLASS_SPAN_LARGE];
+ sb_heap *heap = &context->heaps[SB_SCLASS_SPAN_LARGE];
LWLock *lock = relptr_access(base, heap->lock);
void *ptr;
/* Obtain a span object. */
- span = (sb_span *) sb_alloc_guts(base, region, a,
- SB_SCLASS_SPAN_OF_SPANS);
+ span = (sb_span *) BlockAllocatorAllocGuts(base, region, context,
+ SB_SCLASS_SPAN_OF_SPANS);
if (span == NULL)
{
if ((flags & SB_ALLOC_SOFT_FAIL) == 0)
- sb_out_of_memory_error(a);
+ sb_out_of_memory_error(context);
return NULL;
}
{
/* XXX. Free the span. */
if ((flags & SB_ALLOC_SOFT_FAIL) == 0)
- sb_out_of_memory_error(a);
+ sb_out_of_memory_error(context);
return NULL;
}
ptr = fpm_page_to_pointer(fpm_segment_base(region->fpm), first_page);
Assert(size_class == 0 || size > sb_size_classes[size_class - 1]);
/* Attempt the actual allocation. */
- result = sb_alloc_guts(base, region, a, size_class);
+ result = BlockAllocatorAllocGuts(base, region, context, size_class);
if (result == NULL && (flags & SB_ALLOC_SOFT_FAIL) == 0)
- sb_out_of_memory_error(a);
+ sb_out_of_memory_error(context);
return result;
}
* Free memory allocated via sb_alloc.
*/
void
-sb_free(void *ptr)
+BlockAllocatorFree(void *ptr)
{
AllocatorRegion *region;
char *fpm_base;
first_page = fpm_pointer_to_page(fpm_base,
relptr_access(base, span->start));
FreePageManagerPut(region->fpm, first_page, span->npages);
- sb_free(span);
+ BlockAllocatorFree(span);
/* We're done, but must release any lock first. */
if (lock != NULL)
* type, we need to separately free the span object also.
*/
if (size_class != SB_SCLASS_SPAN_OF_SPANS)
- sb_free(span);
+ BlockAllocatorFree(span);
}
/* If we locked the heap, release the lock. */
* allocation.
*/
Size
-sb_alloc_space(Size size)
+BlockAllocatorGetAllocSpace(Size size)
{
uint16 size_class;
* overhead of its own.
*/
Size
-sb_chunk_space(void *ptr)
+BlockAllocatorGetChunkSpace(void *ptr)
{
AllocatorRegion *region;
char *fpm_base;
* NB: It's not safe to do this while the allocator is in use!
*/
void
-sb_reset_allocator(sb_allocator *a)
+BlockAllocatorReset(BlockAllocatorContext *context)
{
char *base = NULL;
int heapno;
* For shared memory allocation, pointers are relative to the start of the
* region.
*/
- if (!a->private)
+ if (!context->private)
{
- AllocatorRegion *region = LookupAllocatorRegion(a);
+ AllocatorRegion *region = LookupAllocatorRegion(context);
if (region == NULL)
elog(ERROR, "AllocatorRegion not found");
base = region->region_start;
*/
for (heapno = SB_NUM_SIZE_CLASSES - 1; heapno >= 0; --heapno)
{
- sb_heap *heap = &a->heaps[heapno];
+ sb_heap *heap = &context->heaps[heapno];
int fclass;
for (fclass = 0; fclass < SB_FULLNESS_CLASSES; ++fclass)
* If necessary, steal or create another superblock.
*/
static char *
-sb_alloc_guts(char *base, AllocatorRegion *region, sb_allocator *a, int size_class)
+BlockAllocatorAllocGuts(char *base, AllocatorRegion *region,
+ BlockAllocatorContext *context, int size_class)
{
- sb_heap *heap = &a->heaps[size_class];
+ sb_heap *heap = &context->heaps[size_class];
LWLock *lock = relptr_access(base, heap->lock);
sb_span *active_sb;
char *superblock;
* fail the request.
*/
if (relptr_is_null(heap->spans[1])
- && !sb_ensure_active_superblock(base, region, a, heap, size_class))
+ && !sb_ensure_active_superblock(base, region, context,
+ heap, size_class))
{
if (lock != NULL)
LWLockRelease(lock);
* superblock that would otherwise become empty soon.
*/
static bool
-sb_ensure_active_superblock(char *base, AllocatorRegion *region, sb_allocator *a,
+sb_ensure_active_superblock(char *base, AllocatorRegion *region,
+ BlockAllocatorContext *context,
sb_heap *heap, int size_class)
{
Size obsize = sb_size_classes[size_class];
*/
if (size_class != SB_SCLASS_SPAN_OF_SPANS)
{
- AllocatorRegion *span_region = a->private ? NULL : region;
+ AllocatorRegion *span_region = context->private ? NULL : region;
- span = (sb_span *) sb_alloc_guts(base, span_region, a,
- SB_SCLASS_SPAN_OF_SPANS);
+ span = (sb_span *) BlockAllocatorAllocGuts(base, span_region, context,
+ SB_SCLASS_SPAN_OF_SPANS);
if (span == NULL)
return false;
npages = BLOCK_ALLOCATOR_PAGES_PER_CHUNK;
/* Find a region from which to allocate the superblock. */
if (region == NULL)
{
- Assert(a->private);
+ Assert(context->private);
region = GetRegionForPrivateAllocation(npages);
}
* Report an out-of-memory condition.
*/
static void
-sb_out_of_memory_error(sb_allocator *a)
+sb_out_of_memory_error(BlockAllocatorContext *context)
{
- if (a->private)
+ if (context->private)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/rel.h"
-#include "utils/sb_alloc.h"
+#include "utils/balloc.h"
#include "utils/sortsupport.h"
#include "utils/tuplesort.h"
int maxTapes; /* number of tapes (Knuth's T) */
int tapeRange; /* maxTapes-1 (Knuth's P) */
MemoryContext sortcontext; /* memory context holding all sort data */
- sb_allocator *sortallocator; /* superblock allocator holding sort data */
+ BlockAllocatorContext *sortallocator; /* block allocator for sort data */
LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
/*
{
Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
MemoryContext oldcontext;
- state->sortallocator = sb_create_private_allocator();
+ state->sortallocator = BlockAllocatorContextCreate();
oldcontext = MemoryContextSwitchTo(state->sortcontext);
IndexTuple newtuple;
/* copy the tuple into sort storage */
- newtuple = (IndexTuple) sb_alloc(state->sortallocator, tuplen, 0);
+ newtuple = (IndexTuple) BlockAllocatorAlloc(state->sortallocator, tuplen, 0);
memcpy(newtuple, tuple, tuplen);
- USEMEM(state, sb_alloc_space(tuplen));
+ USEMEM(state, BlockAllocatorGetAllocSpace(tuplen));
stup->tuple = (void *) newtuple;
/* set up first-column key value */
stup->datum1 = index_getattr(newtuple,
LogicalTapeWrite(state->tapeset, tapenum,
(void *) &tuplen, sizeof(tuplen));
- FREEMEM(state, sb_chunk_space(tuple));
- sb_free(tuple);
+ FREEMEM(state, BlockAllocatorGetChunkSpace(tuple));
+ BlockAllocatorFree(tuple);
}
static void
int tapenum, unsigned int len)
{
unsigned int tuplen = len - sizeof(unsigned int);
- IndexTuple tuple = (IndexTuple) sb_alloc(state->sortallocator, tuplen, 0);
+ IndexTuple tuple = (IndexTuple) BlockAllocatorAlloc(state->sortallocator, tuplen, 0);
- USEMEM(state, sb_chunk_space(tuple));
+ USEMEM(state, BlockAllocatorGetChunkSpace(tuple));
LogicalTapeReadExact(state->tapeset, tapenum,
tuple, tuplen);
if (state->randomAccess) /* need trailing length word? */
#include "storage/dsm.h"
#include "storage/shm_toc.h"
#include "utils/freepage.h"
-#include "utils/sb_alloc.h"
+#include "utils/balloc.h"
#include "utils/balloc_map.h"
/*
char *region_start; /* Address of region. */
Size region_size; /* Number of bytes in region. */
Size usable_pages; /* Number of usable pages in region. */
- dsm_segment *seg; /* If not backend-private, DSM handle. */
- sb_allocator *allocator; /* If not backend-private, shared allocator. */
+ dsm_segment *seg; /* DSM handle (if not private). */
+ BlockAllocatorContext *context; /* Shared allocator (if not private). */
FreePageManager *fpm; /* Free page manager for region (if any). */
BlockAllocatorMap *pagemap; /* Page map for region (if any). */
Size contiguous_pages; /* Last reported contiguous free pages. */
{
relptr(FreePageManager) fpm;
relptr(BlockAllocatorMap) pagemap;
- relptr(sb_allocator) allocator;
+ relptr(BlockAllocatorContext) allocator;
int lwlock_tranche_id;
char lwlock_tranche_name[FLEXIBLE_ARRAY_MEMBER];
} AllocatorSharedRegion;
shm_toc *toc, Size size,
int lwlock_tranche_id,
char *lwlock_tranche_name);
-extern sb_allocator *AttachAllocatorSharedRegion(dsm_segment *,
+extern BlockAllocatorContext *AttachAllocatorSharedRegion(dsm_segment *,
AllocatorSharedRegion *);
extern void DumpAllocatorRegions(void);
/*-------------------------------------------------------------------------
*
- * sb_alloc.h
- * Superblock-based memory allocator.
+ * balloc.h
+ * Block-based memory allocator.
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * src/include/utils/sb_alloc.h
+ * src/include/utils/balloc.h
*
*-------------------------------------------------------------------------
*/
-#ifndef SB_ALLOC_H
-#define SB_ALLOC_H
+#ifndef BALLOC_H
+#define BALLOC_H
#include "storage/lwlock.h"
#include "utils/relptr.h"
-typedef struct sb_allocator sb_allocator;
+typedef struct BlockAllocatorContext BlockAllocatorContext;
/* Number of pages (see FPM_PAGE_SIZE) per block-allocator chunk. */
#define BLOCK_ALLOCATOR_PAGES_PER_CHUNK 16
#define SB_ALLOC_SOFT_FAIL 0x0002 /* return NULL if no mem */
/* Functions to manipulate allocators. */
-extern sb_allocator *sb_create_private_allocator(void);
-extern void sb_reset_allocator(sb_allocator *a);
-extern void sb_destroy_private_allocator(sb_allocator *);
+extern BlockAllocatorContext *BlockAllocatorContextCreate(void);
+extern void BlockAllocatorReset(BlockAllocatorContext *);
+extern void BlockAllocatorDelete(BlockAllocatorContext *);
/* Functions to allocate and free memory. */
-extern void *sb_alloc(sb_allocator *, Size, int flags);
-extern void sb_free(void *ptr);
+extern void *BlockAllocatorAlloc(BlockAllocatorContext *, Size, int flags);
+extern void BlockAllocatorFree(void *ptr);
/* Reporting functions. */
-extern Size sb_alloc_space(Size size);
-extern Size sb_chunk_space(void *ptr);
+extern Size BlockAllocatorGetAllocSpace(Size size);
+extern Size BlockAllocatorGetChunkSpace(void *ptr);
-#endif /* SB_ALLOC_H */
+#endif /* BALLOC_H */