#define NUM_PRIVATE_FREELISTS 16
static dlist_head private_freelist[NUM_PRIVATE_FREELISTS];
+/*
+ * Constants to set the size of backend-private regions. Superblocks are
+ * 16 pages each (64kB), and we want a number of superblocks to fit inside
+ * each region, so these need to be pretty good-sized. The actual
+ * allocations will be a bit larger than the values indicated here, because
+ * we add a bit of space for bookkeeping. These values are in units of
+ * FPM_PAGE_SIZE.
+ */
+#define SB_REGION_INITSIZE (16 * SB_PAGES_PER_SUPERBLOCK)
+#define SB_REGION_MAXSIZE ((64 * 1024 * 1024) / FPM_PAGE_SIZE)
+
+static Size sb_private_pages_allocated;
+
/* Static functions. */
static bool sb_adjust_lookup(sb_region *region, bool insert);
static bool sb_adjust_lookup_leaf(sb_lookup_leaf *leaf, sb_region *region,
sb_private_region_for_allocator(Size npages)
{
int freelist = Min(fls(npages), NUM_PRIVATE_FREELISTS);
+ Size new_region_net_pages;
+ Size metadata_bytes;
+ char *region_start;
+ Size region_size;
+ sb_region *region;
Assert(npages > 0);
/*
* There is no existing backend-private region with enough freespace
- * to satisfy the allocation request. Create a new one.
+ * to satisfy the request, so we'll need to create a new one. First
+ * step is to figure out how many pages we should try to obtain.
*/
+ for (new_region_net_pages = SB_REGION_INITSIZE;
+ new_region_net_pages < sb_private_pages_allocated &&
+ new_region_net_pages < SB_REGION_MAXSIZE; new_region_net_pages *= 2)
+ ;
+ if (new_region_net_pages < npages)
+ new_region_net_pages = npages;
+
+ /* Try to allocate space from the operating system. */
+ for (;;)
+ {
+ /*
+ * Compute space required for metadata and determine raw allocation
+ * size.
+ */
+ metadata_bytes = MAXALIGN(sizeof(sb_region));
+ metadata_bytes += MAXALIGN(sizeof(FreePageManager));
+ metadata_bytes += MAXALIGN(sb_map_size(new_region_net_pages));
+ if (metadata_bytes % FPM_PAGE_SIZE != 0)
+ metadata_bytes += FPM_PAGE_SIZE - (metadata_bytes % FPM_PAGE_SIZE);
+ region_size = new_region_net_pages * FPM_PAGE_SIZE + metadata_bytes;
+
+ /* Try to allocate memory. */
+ region_start = malloc(region_size);
+ if (region_start != NULL)
+ break;
- /* XXX. Not implemented yet. */
+ /* Too big; if possible, loop and try a smaller allocation. */
+ if (new_region_net_pages == npages)
+ return NULL;
+ new_region_net_pages = Max(new_region_net_pages / 2, npages);
+ }
- return NULL;
+ /*
+ * Initialize region object.
+ *
+ * NB: We temporarily set region->contiguous_pages to a value one more
+ * than the actual number. This is because calling FreePageManagerPut
+ * will provoke a callback to sb_report_contiguous_freespace, which we
+ * want to exit quickly and, in particular, without deallocating the
+ * region.
+ */
+ region = (sb_region *) region_start;
+ region->region_start = region_start;
+ region->region_size = region_size;
+ region->usable_pages = new_region_net_pages;
+ region->seg = NULL;
+ region->allocator = NULL;
+ region->fpm = (FreePageManager *)
+ (region_start + MAXALIGN(sizeof(sb_region)));
+ region->pagemap = (sb_map *)
+ (((char *) region->fpm) + MAXALIGN(sizeof(FreePageManager)));
+ region->contiguous_pages = new_region_net_pages + 1;
+
+ /* Initialize supporting data structures. */
+ FreePageManagerInitialize(region->fpm, region->region_start, NULL, false);
+ FreePageManagerPut(region->fpm, metadata_bytes / FPM_PAGE_SIZE,
+ new_region_net_pages);
+ sb_map_initialize(region->pagemap, new_region_net_pages);
+ region->contiguous_pages = new_region_net_pages; /* Now fix the value. */
+ freelist = Min(fls(new_region_net_pages), NUM_PRIVATE_FREELISTS);
+ dlist_push_head(&private_freelist[freelist], ®ion->fl_node);
+ sb_adjust_lookup(region, true);
+
+ /* Time to rock and roll. */
+ return region;
}
/*
return;
/*
- * If the entire region is free, deallocate it. Any FreePageManager
- * or sb_map for this region is stored within the region itself, so
- * we needn't do anything special to get rid of them.
+ * If the entire region is free, deallocate it. The sb_region,
+ * FreePageManager, and sb_map for the region is stored within it, so
+ * they all go away when we free the managed space.
*/
if (npages == region->usable_pages)
{
- /* Return the managed space to the operating system. */
- free(region->region_start);
+ char *region_start = region->region_start;
/* Pull the region out of the lookup table. */
sb_adjust_lookup(region, false);
/* Remove the region object from the private freelist. */
dlist_delete(®ion->fl_node);
- /* Finally, free the region object itself. */
- free(region);
+ /* Decrement count of private pages allocated. */
+ Assert(sb_private_pages_allocated >= region->usable_pages);
+ sb_private_pages_allocated -= region->usable_pages;
+
+ /* Return the managed space to the operating system. */
+ free(region_start);
return;
}