* overhead would be unchanged, that doesn't seem necessarily
* worthwhile.
*/
- if (PageIsAllVisible(BufferGetPage(buffer)) &&
+ if (PageIsAllVisible(page) &&
visibilitymap_clear(relation, block, vmbuffer,
VISIBILITYMAP_ALL_FROZEN))
cleared_all_frozen = true;
* first". To implement this, we must do RelationGetBufferForTuple
* while not holding the lock on the old page, and we must rely on it
* to get the locks on both pages in the correct order.
+ *
+ * Another consideration is that we need visibility map page pin(s) if
+ * we will have to clear the all-visible flag on either page. If we
+ * call RelationGetBufferForTuple, we rely on it to acquire any such
+ * pins; but if we don't, we have to handle that here. Hence we need
+ * a loop.
*/
- if (newtupsize > pagefree)
- {
- /* Assume there's no chance to put heaptup on same page. */
- newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
- buffer, 0, NULL,
- &vmbuffer_new, &vmbuffer);
- }
- else
+ for (;;)
{
+ if (newtupsize > pagefree)
+ {
+ /* It doesn't fit, must use RelationGetBufferForTuple. */
+ newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
+ buffer, 0, NULL,
+ &vmbuffer_new, &vmbuffer);
+ /* We're all done. */
+ break;
+ }
+ /* Acquire VM page pin if needed and we don't have it. */
+ if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
+ visibilitymap_pin(relation, block, &vmbuffer);
/* Re-acquire the lock on the old tuple's page. */
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/* Re-check using the up-to-date free space */
pagefree = PageGetHeapFreeSpace(page);
- if (newtupsize > pagefree)
+ if (newtupsize > pagefree ||
+ (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
{
/*
- * Rats, it doesn't fit anymore. We must now unlock and
- * relock to avoid deadlock. Fortunately, this path should
- * seldom be taken.
+ * Rats, it doesn't fit anymore, or somebody just now set the
+ * all-visible flag. We must now unlock and loop to avoid
+ * deadlock. Fortunately, this path should seldom be taken.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
- buffer, 0, NULL,
- &vmbuffer_new, &vmbuffer);
}
else
{
- /* OK, it fits here, so we're done. */
+ /* We're all done. */
newbuf = buffer;
+ break;
}
}
}
* happen if space is freed in that page after heap_update finds there's not
* enough there). In that case, the page will be pinned and locked only once.
*
- * For the vmbuffer and vmbuffer_other arguments, we avoid deadlock by
- * locking them only after locking the corresponding heap page, and taking
- * no further lwlocks while they are locked.
+ * We also handle the possibility that the all-visible flag will need to be
+ * cleared on one or both pages. If so, pin on the associated visibility map
+ * page must be acquired before acquiring buffer lock(s), to avoid possibly
+ * doing I/O while holding buffer locks. The pins are passed back to the
+ * caller using the input-output arguments vmbuffer and vmbuffer_other.
+ * Note that in some cases the caller might have already acquired such pins,
+ * which is indicated by these arguments not being InvalidBuffer on entry.
*
* We normally use FSM to help us find free space. However,
* if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
if (otherBuffer != InvalidBuffer)
{
Assert(otherBuffer != buffer);
+ targetBlock = BufferGetBlockNumber(buffer);
+ Assert(targetBlock > otherBlock);
if (unlikely(!ConditionalLockBuffer(otherBuffer)))
{
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * Because the buffer was unlocked for a while, it's possible,
- * although unlikely, that the page was filled. If so, just retry
- * from start.
+ * Because the buffers were unlocked for a while, it's possible,
+ * although unlikely, that an all-visible flag became set or that
+ * somebody used up the available space in the new page. We can
+ * use GetVisibilityMapPins to deal with the first case. In the
+ * second case, just retry from start.
*/
+ GetVisibilityMapPins(relation, otherBuffer, buffer,
+ otherBlock, targetBlock, vmbuffer_other,
+ vmbuffer);
+
if (len > PageGetHeapFreeSpace(page))
{
LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);