static char *sb_alloc_guts(char *base, sb_region *region,
sb_allocator *a, int size_class);
static void sb_init_span(char *base, sb_span *span, sb_heap *heap,
- Size first_page, Size npages, uint16 size_class);
+ char *ptr, Size npages, uint16 size_class);
static void sb_out_of_memory_error(sb_allocator *a);
static bool sb_transfer_first_span(char *base, sb_heap *heap,
int fromclass, int toclass);
uint16 size_class;
char *result;
+ Assert(size > 0);
+
/*
* For shared memory allocation, pointers are relative to the start of the
* region, so finding out that information is essential. For
int heapno = heapproc * SB_NUM_SIZE_CLASSES + size_class;
sb_heap *heap = &a->heaps[heapno];
LWLock *lock = relptr_access(base, heap->lock);
+ void *ptr;
/* Obtain a span object. */
span = (sb_span *) sb_alloc_guts(base, region, a,
sb_out_of_memory_error(a);
return NULL;
}
+ ptr = fpm_page_to_pointer(fpm_segment_base(region->fpm), first_page);
/* Initialize span and pagemap. */
if (lock != NULL)
LWLockAcquire(lock, LW_EXCLUSIVE);
- sb_init_span(base, span, heap, first_page, npages, size_class);
+ sb_init_span(base, span, heap, ptr, npages, size_class);
if (lock != NULL)
LWLockRelease(lock);
- sb_map_set(region->pagemap, first_page, ((char *) span) - base);
+ sb_map_set(region->pagemap, first_page, span);
- return fpm_page_to_pointer(fpm_segment_base(region->fpm),
- first_page);
+ return ptr;
}
/* Map allocation to a size class. */
- if (size < lengthof(sb_size_class_map) * SB_SIZE_CLASS_MAP_QUANTUM)
+ if (size <= lengthof(sb_size_class_map) * SB_SIZE_CLASS_MAP_QUANTUM)
{
int mapidx;
- mapidx = (size + SB_SIZE_CLASS_MAP_QUANTUM - 1) /
- SB_SIZE_CLASS_MAP_QUANTUM;
+ mapidx = ((size + SB_SIZE_CLASS_MAP_QUANTUM - 1) /
+ SB_SIZE_CLASS_MAP_QUANTUM) - 1;
size_class = sb_size_class_map[mapidx];
}
else
size_class = min;
}
Assert(size <= sb_size_classes[size_class]);
+ Assert(size_class == 0 || size > sb_size_classes[size_class - 1]);
size_class += SB_SCLASS_FIRST_REGULAR;
/* Attempt the actual allocation. */
superblock = base + active_sb->first_page * FPM_PAGE_SIZE;
Assert(active_sb != NULL);
Assert(active_sb->nused < nmax);
- Assert(active_sb->nused < active_sb->ninitialized);
+ Assert(active_sb->nused <= active_sb->ninitialized);
if (active_sb->firstfree < nmax)
{
result = superblock + active_sb->firstfree * obsize;
}
/*
- * Allocate an object of the requeted size class from the given allocator.
+ * Allocate an object of the requested size class from the given allocator.
* If necessary, steal or create another superblock.
*/
static char *
Size npages = 1;
Size first_page;
Size i;
+ void *ptr;
/*
* Get an sb_span object to describe the new superblock... unless
*/
if (size_class != SB_SCLASS_SPAN_OF_SPANS)
{
- span = (sb_span *) sb_alloc_guts(base, region, a,
+ sb_region *span_region = a->private ? NULL : region;
+
+ span = (sb_span *) sb_alloc_guts(base, span_region, a,
SB_SCLASS_SPAN_OF_SPANS);
if (span == NULL)
return NULL;
/* XXX. Free the span, if any. */
return NULL;
}
+ ptr = fpm_page_to_pointer(fpm_segment_base(region->fpm),
+ first_page);
/*
* If this is a span-of-spans, carve the descriptor right out of
* the allocated space.
*/
if (size_class == SB_SCLASS_SPAN_OF_SPANS)
- {
- char *fpm_base = fpm_segment_base(region->fpm);
- span = (sb_span *) fpm_page_to_pointer(fpm_base, first_page);
- }
+ span = (sb_span *) ptr;
/* Initialize span and pagemap. */
- sb_init_span(base, span, heap, first_page, npages, size_class);
+ sb_init_span(base, span, heap, ptr, npages, size_class);
for (i = 0; i < npages; ++i)
- sb_map_set(region->pagemap, first_page + i,
- ((char *) span) - base);
+ sb_map_set(region->pagemap, first_page + i, span);
/* For a span-of-spans, record that we allocated ourselves. */
if (size_class == SB_SCLASS_SPAN_OF_SPANS)
if (lock != NULL)
LWLockRelease(lock);
- return NULL;
+ return result;
}
/*
* Add a new span to fullness class 1 of the indicated heap.
*/
static void
-sb_init_span(char *base, sb_span *span, sb_heap *heap, Size first_page,
+sb_init_span(char *base, sb_span *span, sb_heap *heap, char *ptr,
Size npages, uint16 size_class)
{
sb_span *head = relptr_access(base, heap->spans[1]);
relptr_store(base, span->parent, heap);
relptr_store(base, span->nextspan, head);
relptr_store(base, span->prevspan, (sb_span *) NULL);
- span->first_page = first_page;
+ relptr_store(base, heap->spans[1], span);
+ span->first_page = (ptr - base) / FPM_PAGE_SIZE;
span->npages = npages;
span->size_class = size_class;
span->ninitialized = 0;
struct sb_map
{
+ relptr(sb_map) self;
Size npages;
+ bool use64;
};
/* Map layout for segments less than 4GB. */
uint64 map[FLEXIBLE_ARRAY_MEMBER];
} sb_map64;
+#define sb_map_base(m) \
+ (((char *) m) - m->self.relptr_off)
+
/*
* Compute the amount of space required for an sb_map covering a given
- * number of pages. Note we assume that the maximum offset we'll be asked
- * to store is governed by that number of pages also.
+ * number of pages. Note that for shared memory (i.e. when base != NULL),
+ * we assume that the pointers will always point to addresses within that
+ * same segment, but for backend-private memory that might not be the case.
*/
Size
-sb_map_size(Size npages)
+sb_map_size(char *base, Size npages)
{
Size map_bytes;
- if (npages < maxpages_4b)
+ if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b))
map_bytes = add_size(offsetof(sb_map32, map),
mul_size(npages, sizeof(uint32)));
else
* been set.
*/
void
-sb_map_initialize(sb_map *m, Size npages)
+sb_map_initialize(sb_map *m, char *base, Size npages)
{
+ relptr_store(base, m->self, m);
m->npages = npages;
+ if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b))
+ m->use64 = false;
+ else
+ m->use64 = true;
}
/*
* Store a value into an sb_map.
*/
void
-sb_map_set(sb_map *m, Size pageno, Size offset)
+sb_map_set(sb_map *m, Size pageno, void *ptr)
{
+ char *base = sb_map_base(m);
Assert(pageno < m->npages);
- Assert(offset / FPM_PAGE_SIZE < m->npages);
- if (m->npages < maxpages_4b)
- ((sb_map32 *) m)->map[pageno] = (uint32) offset;
+ if (m->use64)
+ ((sb_map64 *) m)->map[pageno] = (uint64) (((char *) ptr) - base);
else
- ((sb_map64 *) m)->map[pageno] = (uint32) offset;
+ ((sb_map32 *) m)->map[pageno] = (uint32) (((char *) ptr) - base);
}
/*
* Get a value from an sb_map. Getting a value not previously stored will
* produce an undefined result, so don't do that.
*/
-Size
+void *
sb_map_get(sb_map *m, Size pageno)
{
+ char *base = sb_map_base(m);
Assert(pageno < m->npages);
- if (m->npages < maxpages_4b)
- return ((sb_map32 *) m)->map[pageno];
+ if (m->use64)
+ return base + ((sb_map64 *) m)->map[pageno];
else
- return ((sb_map64 *) m)->map[pageno];
+ return base + ((sb_map32 *) m)->map[pageno];
}