Add realloc; finish re-plumbing sort stuff.
authorRobert Haas <rhaas@postgresql.org>
Mon, 9 Jun 2014 17:12:45 +0000 (13:12 -0400)
committerRobert Haas <rhaas@postgresql.org>
Mon, 9 Jun 2014 17:12:45 +0000 (13:12 -0400)
src/backend/utils/mmgr/balloc.c
src/backend/utils/mmgr/mcxt.c
src/backend/utils/sort/tuplesort.c
src/include/utils/balloc.h

index c4407634123890355ea8695c772b04a94f6ab94a..b9e454b9d486944a71659eeb4f0d02b578795e40 100644 (file)
@@ -402,11 +402,15 @@ BlockAllocatorFree(AllocatorRegion *aregion, void *ptr)
                first_page = fpm_pointer_to_page(fpm_base,
                                                                                 relptr_access(base, span->start));
                FreePageManagerPut(aregion->fpm, first_page, span->npages);
-               pfree(span);
 
-               /* We're done, but must release any lock first. */
+               /* Release any lock. */
                if (lock != NULL)
                        LWLockRelease(lock);
+
+               /* Free the span describing this object. */
+               pfree(span);
+
+               return;
        }
 
        /* Put the object on the span's freelist. */
@@ -473,6 +477,133 @@ BlockAllocatorFree(AllocatorRegion *aregion, void *ptr)
                LWLockRelease(lock);
 }
 
+/*
+ * Expand an existing chunk, reallocating it if necessary.
+ *
+ * For small objects, if the old and new size classes are the same, we can
+ * just return the existing pointer; otherwise, we reallocate.  Since
+ * different size classes are stored in different heaps, there's really no
+ * way to do better.
+ *
+ * For a large object, things are more complicated.  If the number of pages
+ * required stays the same, we need not do anything; if it shrinks, we can
+ * give the extra pages back to the FreePageManager.  If it grows, we
+ * currently always reallocate.  This could be improved by checking whether
+ * the requisite number of pages following the current allocation happen to
+ * be free, and if so grabbing them.
+ */
+void *
+BlockAllocatorRealloc(AllocatorRegion *aregion, void *ptr, Size size)
+{
+       char   *fpm_base;
+       char   *base;
+       BlockAllocatorSpan *span;
+       void    *newptr;
+       Size    pageno;
+       Size    obsize;
+       Size    npages;
+       uint16  size_class;
+       uint16  new_size_class;
+       BlockAllocatorHeap *heap;
+       BlockAllocatorHeap *heap0;
+       BlockAllocatorContext *context;
+
+       /* Size 0 just means that we should free the existing chunk. */
+       if (size == 0)
+       {
+               pfree(ptr);
+               return NULL;
+       }
+
+       /* Locate the containing span. */
+       fpm_base = fpm_segment_base(aregion->fpm);
+       pageno = fpm_pointer_to_page(fpm_base, ptr);
+       span = BlockAllocatorMapGet(aregion->pagemap, pageno);
+
+       /*
+        * Extract relevant details from span.  We can read this information
+        * without a lock, because neither of these things can be changed (nor
+        * can the span be deallocated) while there's an outstanding allocation
+        * in the span, something we already know to be the case.  If the user is
+        * freeing in one process and reallocating the same chunk in another
+        * process at the same time, that's a bug in their code, not ours.
+        */
+       size_class = span->size_class;
+       npages = span->npages;
+
+       /*
+        * If we don't need to reallocate, just return the existing pointer.  And,
+        * if we do need to reallocate, figure out an upper bound on the amount of
+        * data we need to copy.
+        */
+       if (size_class == BA_SCLASS_SPAN_LARGE)
+       {
+               if (size > balloc_size_classes[lengthof(balloc_size_classes) - 1] &&
+                       npages == fpm_size_to_pages(size))
+                       return ptr;
+               obsize = npages * FPM_PAGE_SIZE;
+       }
+       else
+       {
+               /* Map allocation to a size class. */
+               if (size < lengthof(balloc_size_class_map) * BA_SIZE_CLASS_MAP_QUANTUM)
+               {
+                       int     mapidx;
+
+                       mapidx = ((size + BA_SIZE_CLASS_MAP_QUANTUM - 1) /
+                                               BA_SIZE_CLASS_MAP_QUANTUM) - 1;
+                       new_size_class = balloc_size_class_map[mapidx];
+               }
+               else
+               {
+                       uint16  min;
+                       uint16  max;
+
+                       min = balloc_size_class_map[lengthof(balloc_size_class_map) - 1];
+                       max = lengthof(balloc_size_classes) - 1;
+
+                       while (min < max)
+                       {
+                               uint16  mid = (min + max) / 2;
+                               uint16  class_size = balloc_size_classes[mid];
+
+                               if (class_size < size)
+                                       min = mid + 1;
+                               else
+                                       max = mid;
+                       }
+
+                       new_size_class = min;
+               }
+               Assert(size <= balloc_size_classes[new_size_class]);
+               Assert(size > balloc_size_classes[new_size_class - 1]);
+
+               if (size_class == new_size_class)
+                       return ptr;
+               obsize = balloc_size_classes[new_size_class];
+       }
+
+       /* Ugly code to find context; see BlockAllocatorGetChunkContext. */
+       base = aregion->seg != NULL ? fpm_base : NULL;
+       heap = relptr_access(base, span->parent);
+       heap0 = heap - span->size_class;
+       context = (BlockAllocatorContext *)
+               (((char *) heap0) - offsetof(BlockAllocatorContext, heaps[0]));
+       Assert(IsA(context, BlockAllocatorContext));
+
+       /* Allocate new space. */
+       newptr = BlockAllocatorAlloc(&context->header, size);
+
+       /* Copy old data to new space. */
+       memcpy(newptr, ptr, Min(obsize, size));
+
+       /* Free old space. */
+       BlockAllocatorFree(aregion, ptr);
+
+       /* Return pointer to new space. */
+       return newptr;
+}
+
 /*
  * Return the size of the chunk that will be used to satisfy a given
  * allocation.
@@ -565,6 +696,7 @@ MemoryContext
 BlockAllocatorGetChunkContext(AllocatorRegion *aregion, void *ptr)
 {
        char   *fpm_base;
+       char   *base;
        BlockAllocatorSpan *span;
        BlockAllocatorHeap *heap;
        BlockAllocatorHeap *heap0;
@@ -577,7 +709,8 @@ BlockAllocatorGetChunkContext(AllocatorRegion *aregion, void *ptr)
        span = BlockAllocatorMapGet(aregion->pagemap, pageno);
 
        /* Follow the parent poiner to find the containing heap. */
-       heap = relptr_access(fpm_base, span->parent);
+       base = aregion->seg != NULL ? fpm_base : NULL;
+       heap = relptr_access(base, span->parent);
 
        /*
         * Department of incredible ugliness: neither the heap nor the span
index 0835dd9246dfaf2405dae7a96f718fcb13a47cd2..447989bbd264a5daa1a8e94aa220e5f77605990d 100644 (file)
@@ -836,6 +836,20 @@ repalloc(void *pointer, Size size)
        Assert(pointer != NULL);
        Assert(pointer == (void *) MAXALIGN(pointer));
 
+       /*
+        * If allocator regions are in use, then this pointer might be within
+        * such a region, in which case it won't have a chunk header.  So, we have
+        * to test for and handle that case first.
+        */
+       if (aregion_private_pages_allocated > 0)
+       {
+               AllocatorRegion *aregion;
+
+               aregion = LookupAllocatorRegion(pointer);
+               if (aregion != NULL)
+                       return BlockAllocatorRealloc(aregion, pointer, size);
+       }
+
        /*
         * OK, it's probably safe to look at the chunk header.
         */
@@ -901,6 +915,20 @@ repalloc_huge(void *pointer, Size size)
        Assert(pointer != NULL);
        Assert(pointer == (void *) MAXALIGN(pointer));
 
+       /*
+        * If allocator regions are in use, then this pointer might be within
+        * such a region, in which case it won't have a chunk header.  So, we have
+        * to test for and handle that case first.
+        */
+       if (aregion_private_pages_allocated > 0)
+       {
+               AllocatorRegion *aregion;
+
+               aregion = LookupAllocatorRegion(pointer);
+               if (aregion != NULL)
+                       return BlockAllocatorRealloc(aregion, pointer, size);
+       }
+
        /*
         * OK, it's probably safe to look at the chunk header.
         */
index d4e9591ac9d3b0fb822c0707fb541fb984fff443..5dd1672c9658c2ab4ca5ebdfbf2cfa39df7151ba 100644 (file)
 #include "executor/executor.h"
 #include "miscadmin.h"
 #include "pg_trace.h"
+#include "utils/balloc.h"
 #include "utils/datum.h"
 #include "utils/logtape.h"
 #include "utils/lsyscache.h"
 #include "utils/memutils.h"
 #include "utils/pg_rusage.h"
 #include "utils/rel.h"
-#include "utils/balloc.h"
 #include "utils/sortsupport.h"
 #include "utils/tuplesort.h"
 
@@ -217,7 +217,6 @@ struct Tuplesortstate
        int                     maxTapes;               /* number of tapes (Knuth's T) */
        int                     tapeRange;              /* maxTapes-1 (Knuth's P) */
        MemoryContext sortcontext;      /* memory context holding all sort data */
-       MemoryContext sortallocator; /* block allocator for sort data */
        LogicalTapeSet *tapeset;        /* logtape.c object for tapes in a temp file */
 
        /*
@@ -544,11 +543,8 @@ tuplesort_begin_common(int workMem, bool randomAccess)
         * Create a working memory context for this sort operation. All data
         * needed by the sort will live inside this context.
         */
-       sortcontext = AllocSetContextCreate(CurrentMemoryContext,
-                                                                               "TupleSort",
-                                                                               ALLOCSET_DEFAULT_MINSIZE,
-                                                                               ALLOCSET_DEFAULT_INITSIZE,
-                                                                               ALLOCSET_DEFAULT_MAXSIZE);
+       sortcontext = BlockAllocatorContextCreate(CurrentMemoryContext,
+                                                                                         "TupleSort");
 
        /*
         * Make the Tuplesortstate within the per-sort context.  This way, we
@@ -730,8 +726,6 @@ tuplesort_begin_index_btree(Relation heapRel,
 {
        Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
        MemoryContext oldcontext;
-       state->sortallocator = BlockAllocatorContextCreate(CurrentMemoryContext,
-                                                                                                          "sort allocator");
 
        oldcontext = MemoryContextSwitchTo(state->sortcontext);
 
@@ -1852,11 +1846,8 @@ inittapes(Tuplesortstate *state)
 
 #ifdef TRACE_SORT
        if (trace_sort)
-       {
-               int64 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
-               elog(LOG, "switching to external sort with %d tapes (%ld KB used): %s",
-                        maxTapes, spaceUsed, pg_rusage_show(&state->ru_start));
-       }
+               elog(LOG, "switching to external sort with %d tapes: %s",
+                        maxTapes, pg_rusage_show(&state->ru_start));
 #endif
 
        /*
@@ -3351,9 +3342,9 @@ copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
        IndexTuple      newtuple;
 
        /* copy the tuple into sort storage */
-       newtuple = (IndexTuple) BlockAllocatorAlloc(state->sortallocator, tuplen);
+       newtuple = (IndexTuple) palloc(tuplen);
        memcpy(newtuple, tuple, tuplen);
-       USEMEM(state, BlockAllocatorGetAllocSpace(tuplen));
+       USEMEM(state, GetMemoryChunkSpace(newtuple));
        stup->tuple = (void *) newtuple;
        /* set up first-column key value */
        stup->datum1 = index_getattr(newtuple,
@@ -3386,9 +3377,8 @@ readtup_index(Tuplesortstate *state, SortTuple *stup,
                          int tapenum, unsigned int len)
 {
        unsigned int tuplen = len - sizeof(unsigned int);
-       IndexTuple      tuple;
+       IndexTuple      tuple = (IndexTuple) palloc(tuplen);
 
-       tuple = (IndexTuple) BlockAllocatorAlloc(state->sortallocator, tuplen);
        USEMEM(state, GetMemoryChunkSpace(tuple));
        LogicalTapeReadExact(state->tapeset, tapenum,
                                                 tuple, tuplen);
index 7845a1e28a7e5d91ac2ded3f41571d8b6ee3bb5d..983a06313bad16b0d7a22d16d528bfb9c0afee3d 100644 (file)
@@ -29,7 +29,8 @@ extern MemoryContext BlockAllocatorContextCreate(MemoryContext parent,
 /* MemoryContetMethods for block allocator */
 extern void *BlockAllocatorAlloc(MemoryContext, Size);
 extern void BlockAllocatorFree(struct AllocatorRegion *, void *ptr);
-/* REALLOC IS MISSING! */
+extern void *BlockAllocatorRealloc(struct AllocatorRegion *, void *ptr,
+                                         Size size);
 extern void BlockAllocatorInit(MemoryContext);
 extern void BlockAllocatorReset(MemoryContext);
 extern void BlockAllocatorDelete(MemoryContext);