*/
buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
InvalidBuffer, options, bistate,
- &vmbuffer, NULL);
+ &vmbuffer, NULL,
+ 0);
/*
* We're about to do the actual insert -- but check for conflict first, to
return tup;
}
+/*
+ * Helper for heap_multi_insert() that computes the number of entire pages
+ * that inserting the remaining heaptuples requires. Used to determine how
+ * much the relation needs to be extended by.
+ */
+static int
+heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
+{
+ size_t page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
+ int npages = 1;
+
+ for (int i = done; i < ntuples; i++)
+ {
+ size_t tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
+
+ if (page_avail < tup_sz)
+ {
+ npages++;
+ page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
+ }
+ page_avail -= tup_sz;
+ }
+
+ return npages;
+}
+
/*
* heap_multi_insert - insert multiple tuples into a heap
*
Size saveFreeSpace;
bool need_tuple_data = RelationIsLogicallyLogged(relation);
bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
+ bool starting_with_empty_page = false;
+ int npages = 0;
+ int npages_used = 0;
/* currently not needed (thus unsupported) for heap_multi_insert() */
Assert(!(options & HEAP_INSERT_NO_LOGICAL));
while (ndone < ntuples)
{
Buffer buffer;
- bool starting_with_empty_page;
bool all_visible_cleared = false;
bool all_frozen_set = false;
int nthispage;
CHECK_FOR_INTERRUPTS();
+ /*
+ * Compute number of pages needed to fit the to-be-inserted tuples in
+ * the worst case. This will be used to determine how much to extend
+ * the relation by in RelationGetBufferForTuple(), if needed. If we
+ * filled a prior page from scratch, we can just update our last
+ * computation, but if we started with a partially filled page,
+ * recompute from scratch, the number of potentially required pages
+ * can vary due to tuples needing to fit onto the page, page headers
+ * etc.
+ */
+ if (ndone == 0 || !starting_with_empty_page)
+ {
+ npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
+ saveFreeSpace);
+ npages_used = 0;
+ }
+ else
+ npages_used++;
+
/*
* Find buffer where at least the next tuple will fit. If the page is
* all-visible, this will also pin the requisite visibility map page.
*/
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
InvalidBuffer, options, bistate,
- &vmbuffer, NULL);
+ &vmbuffer, NULL,
+ npages - npages_used);
page = BufferGetPage(buffer);
starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
/* It doesn't fit, must use RelationGetBufferForTuple. */
newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
buffer, 0, NULL,
- &vmbuffer_new, &vmbuffer);
+ &vmbuffer_new, &vmbuffer,
+ 0);
/* We're all done. */
break;
}
* Returns pinned and exclusive-locked buffer of a page in given relation
* with free space >= given len.
*
+ * If num_pages is > 1, we will try to extend the relation by at least that
+ * many pages when we decide to extend the relation. This is more efficient
+ * for callers that know they will need multiple pages
+ * (e.g. heap_multi_insert()).
+ *
* If otherBuffer is not InvalidBuffer, then it references a previously
* pinned buffer of another page in the same relation; on return, this
* buffer will also be exclusive-locked. (This case is used by heap_update;
RelationGetBufferForTuple(Relation relation, Size len,
Buffer otherBuffer, int options,
BulkInsertState bistate,
- Buffer *vmbuffer, Buffer *vmbuffer_other)
+ Buffer *vmbuffer, Buffer *vmbuffer_other,
+ int num_pages)
{
bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
Buffer buffer = InvalidBuffer;