first_page = fpm_pointer_to_page(fpm_base,
relptr_access(base, span->start));
FreePageManagerPut(aregion->fpm, first_page, span->npages);
- pfree(span);
- /* We're done, but must release any lock first. */
+ /* Release any lock. */
if (lock != NULL)
LWLockRelease(lock);
+
+ /* Free the span describing this object. */
+ pfree(span);
+
+ return;
}
/* Put the object on the span's freelist. */
LWLockRelease(lock);
}
+/*
+ * Expand an existing chunk, reallocating it if necessary.
+ *
+ * For small objects, if the old and new size classes are the same, we can
+ * just return the existing pointer; otherwise, we reallocate. Since
+ * different size classes are stored in different heaps, there's really no
+ * way to do better.
+ *
+ * For a large object, things are more complicated. If the number of pages
+ * required stays the same, we need not do anything; if it shrinks, we can
+ * give the extra pages back to the FreePageManager. If it grows, we
+ * currently always reallocate. This could be improved by checking whether
+ * the requisite number of pages following the current allocation happen to
+ * be free, and if so grabbing them.
+ */
+void *
+BlockAllocatorRealloc(AllocatorRegion *aregion, void *ptr, Size size)
+{
+ char *fpm_base;
+ char *base;
+ BlockAllocatorSpan *span;
+ void *newptr;
+ Size pageno;
+ Size obsize;
+ Size npages;
+ uint16 size_class;
+ uint16 new_size_class;
+ BlockAllocatorHeap *heap;
+ BlockAllocatorHeap *heap0;
+ BlockAllocatorContext *context;
+
+ /* Size 0 just means that we should free the existing chunk. */
+ if (size == 0)
+ {
+ pfree(ptr);
+ return NULL;
+ }
+
+ /* Locate the containing span. */
+ fpm_base = fpm_segment_base(aregion->fpm);
+ pageno = fpm_pointer_to_page(fpm_base, ptr);
+ span = BlockAllocatorMapGet(aregion->pagemap, pageno);
+
+ /*
+ * Extract relevant details from span. We can read this information
+ * without a lock, because neither of these things can be changed (nor
+ * can the span be deallocated) while there's an outstanding allocation
+ * in the span, something we already know to be the case. If the user is
+ * freeing in one process and reallocating the same chunk in another
+ * process at the same time, that's a bug in their code, not ours.
+ */
+ size_class = span->size_class;
+ npages = span->npages;
+
+ /*
+ * If we don't need to reallocate, just return the existing pointer. And,
+ * if we do need to reallocate, figure out an upper bound on the amount of
+ * data we need to copy.
+ */
+ if (size_class == BA_SCLASS_SPAN_LARGE)
+ {
+ if (size > balloc_size_classes[lengthof(balloc_size_classes) - 1] &&
+ npages == fpm_size_to_pages(size))
+ return ptr;
+ obsize = npages * FPM_PAGE_SIZE;
+ }
+ else
+ {
+ /* Map allocation to a size class. */
+ if (size < lengthof(balloc_size_class_map) * BA_SIZE_CLASS_MAP_QUANTUM)
+ {
+ int mapidx;
+
+ mapidx = ((size + BA_SIZE_CLASS_MAP_QUANTUM - 1) /
+ BA_SIZE_CLASS_MAP_QUANTUM) - 1;
+ new_size_class = balloc_size_class_map[mapidx];
+ }
+ else
+ {
+ uint16 min;
+ uint16 max;
+
+ min = balloc_size_class_map[lengthof(balloc_size_class_map) - 1];
+ max = lengthof(balloc_size_classes) - 1;
+
+ while (min < max)
+ {
+ uint16 mid = (min + max) / 2;
+ uint16 class_size = balloc_size_classes[mid];
+
+ if (class_size < size)
+ min = mid + 1;
+ else
+ max = mid;
+ }
+
+ new_size_class = min;
+ }
+ Assert(size <= balloc_size_classes[new_size_class]);
+ Assert(size > balloc_size_classes[new_size_class - 1]);
+
+ if (size_class == new_size_class)
+ return ptr;
+ obsize = balloc_size_classes[new_size_class];
+ }
+
+ /* Ugly code to find context; see BlockAllocatorGetChunkContext. */
+ base = aregion->seg != NULL ? fpm_base : NULL;
+ heap = relptr_access(base, span->parent);
+ heap0 = heap - span->size_class;
+ context = (BlockAllocatorContext *)
+ (((char *) heap0) - offsetof(BlockAllocatorContext, heaps[0]));
+ Assert(IsA(context, BlockAllocatorContext));
+
+ /* Allocate new space. */
+ newptr = BlockAllocatorAlloc(&context->header, size);
+
+ /* Copy old data to new space. */
+ memcpy(newptr, ptr, Min(obsize, size));
+
+ /* Free old space. */
+ BlockAllocatorFree(aregion, ptr);
+
+ /* Return pointer to new space. */
+ return newptr;
+}
+
/*
* Return the size of the chunk that will be used to satisfy a given
* allocation.
BlockAllocatorGetChunkContext(AllocatorRegion *aregion, void *ptr)
{
char *fpm_base;
+ char *base;
BlockAllocatorSpan *span;
BlockAllocatorHeap *heap;
BlockAllocatorHeap *heap0;
span = BlockAllocatorMapGet(aregion->pagemap, pageno);
/* Follow the parent poiner to find the containing heap. */
- heap = relptr_access(fpm_base, span->parent);
+ base = aregion->seg != NULL ? fpm_base : NULL;
+ heap = relptr_access(base, span->parent);
/*
* Department of incredible ugliness: neither the heap nor the span
Assert(pointer != NULL);
Assert(pointer == (void *) MAXALIGN(pointer));
+ /*
+ * If allocator regions are in use, then this pointer might be within
+ * such a region, in which case it won't have a chunk header. So, we have
+ * to test for and handle that case first.
+ */
+ if (aregion_private_pages_allocated > 0)
+ {
+ AllocatorRegion *aregion;
+
+ aregion = LookupAllocatorRegion(pointer);
+ if (aregion != NULL)
+ return BlockAllocatorRealloc(aregion, pointer, size);
+ }
+
/*
* OK, it's probably safe to look at the chunk header.
*/
Assert(pointer != NULL);
Assert(pointer == (void *) MAXALIGN(pointer));
+ /*
+ * If allocator regions are in use, then this pointer might be within
+ * such a region, in which case it won't have a chunk header. So, we have
+ * to test for and handle that case first.
+ */
+ if (aregion_private_pages_allocated > 0)
+ {
+ AllocatorRegion *aregion;
+
+ aregion = LookupAllocatorRegion(pointer);
+ if (aregion != NULL)
+ return BlockAllocatorRealloc(aregion, pointer, size);
+ }
+
/*
* OK, it's probably safe to look at the chunk header.
*/
#include "executor/executor.h"
#include "miscadmin.h"
#include "pg_trace.h"
+#include "utils/balloc.h"
#include "utils/datum.h"
#include "utils/logtape.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/rel.h"
-#include "utils/balloc.h"
#include "utils/sortsupport.h"
#include "utils/tuplesort.h"
int maxTapes; /* number of tapes (Knuth's T) */
int tapeRange; /* maxTapes-1 (Knuth's P) */
MemoryContext sortcontext; /* memory context holding all sort data */
- MemoryContext sortallocator; /* block allocator for sort data */
LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
/*
* Create a working memory context for this sort operation. All data
* needed by the sort will live inside this context.
*/
- sortcontext = AllocSetContextCreate(CurrentMemoryContext,
- "TupleSort",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ sortcontext = BlockAllocatorContextCreate(CurrentMemoryContext,
+ "TupleSort");
/*
* Make the Tuplesortstate within the per-sort context. This way, we
{
Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
MemoryContext oldcontext;
- state->sortallocator = BlockAllocatorContextCreate(CurrentMemoryContext,
- "sort allocator");
oldcontext = MemoryContextSwitchTo(state->sortcontext);
#ifdef TRACE_SORT
if (trace_sort)
- {
- int64 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
- elog(LOG, "switching to external sort with %d tapes (%ld KB used): %s",
- maxTapes, spaceUsed, pg_rusage_show(&state->ru_start));
- }
+ elog(LOG, "switching to external sort with %d tapes: %s",
+ maxTapes, pg_rusage_show(&state->ru_start));
#endif
/*
IndexTuple newtuple;
/* copy the tuple into sort storage */
- newtuple = (IndexTuple) BlockAllocatorAlloc(state->sortallocator, tuplen);
+ newtuple = (IndexTuple) palloc(tuplen);
memcpy(newtuple, tuple, tuplen);
- USEMEM(state, BlockAllocatorGetAllocSpace(tuplen));
+ USEMEM(state, GetMemoryChunkSpace(newtuple));
stup->tuple = (void *) newtuple;
/* set up first-column key value */
stup->datum1 = index_getattr(newtuple,
int tapenum, unsigned int len)
{
unsigned int tuplen = len - sizeof(unsigned int);
- IndexTuple tuple;
+ IndexTuple tuple = (IndexTuple) palloc(tuplen);
- tuple = (IndexTuple) BlockAllocatorAlloc(state->sortallocator, tuplen);
USEMEM(state, GetMemoryChunkSpace(tuple));
LogicalTapeReadExact(state->tapeset, tapenum,
tuple, tuplen);