if (blkno == P_NEW)
elog(ERROR, "hash AM does not use P_NEW");
- buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO, NULL);
-
- LockBuffer(buf, HASH_WRITE);
+ buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO_AND_LOCK,
+ NULL);
/* ref count and lock type are correct */
if (BufferGetBlockNumber(buf) != blkno)
elog(ERROR, "unexpected hash relation size: %u, should be %u",
BufferGetBlockNumber(buf), blkno);
+ LockBuffer(buf, HASH_WRITE);
}
else
- buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO, NULL);
-
- LockBuffer(buf, HASH_WRITE);
+ {
+ buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
+ NULL);
+ }
/* ref count and lock type are correct */
{
XLogReadBufferForRedoExtended(lsn, record, 0,
target_node, MAIN_FORKNUM, blkno,
- RBM_ZERO, false, &buffer);
+ RBM_ZERO_AND_LOCK, false, &buffer);
page = BufferGetPage(buffer);
PageInit(page, BufferGetPageSize(buffer), 0);
action = BLK_NEEDS_REDO;
{
XLogReadBufferForRedoExtended(lsn, record, 0,
rnode, MAIN_FORKNUM, blkno,
- RBM_ZERO, false, &buffer);
+ RBM_ZERO_AND_LOCK, false, &buffer);
page = BufferGetPage(buffer);
PageInit(page, BufferGetPageSize(buffer), 0);
action = BLK_NEEDS_REDO;
{
XLogReadBufferForRedoExtended(lsn, record, 1,
rnode, MAIN_FORKNUM, newblk,
- RBM_ZERO, false, &nbuffer);
+ RBM_ZERO_AND_LOCK, false, &nbuffer);
page = (Page) BufferGetPage(nbuffer);
PageInit(page, BufferGetPageSize(nbuffer), 0);
newaction = BLK_NEEDS_REDO;
* XLogReadBufferForRedoExtended
* Like XLogReadBufferForRedo, but with extra options.
*
- * If mode is RBM_ZERO or RBM_ZERO_ON_ERROR, if the page doesn't exist, the
- * relation is extended with all-zeroes pages up to the referenced block
- * number. In RBM_ZERO mode, the return value is always BLK_NEEDS_REDO.
+ * In RBM_ZERO_* modes, if the page doesn't exist, the relation is extended
+ * with all-zeroes pages up to the referenced block number. In
+ * RBM_ZERO_AND_LOCK and RBM_ZERO_AND_CLEANUP_LOCK modes, the return value
+ * is always BLK_NEEDS_REDO.
+ *
+ * (The RBM_ZERO_AND_CLEANUP_LOCK mode is redundant with the get_cleanup_lock
+ * parameter. Do not use an inconsistent combination!)
*
* If 'get_cleanup_lock' is true, a "cleanup lock" is acquired on the buffer
* using LockBufferForCleanup(), instead of a regular exclusive lock.
*buf = XLogReadBufferExtended(rnode, forkno, blkno, mode);
if (BufferIsValid(*buf))
{
- if (get_cleanup_lock)
- LockBufferForCleanup(*buf);
- else
- LockBuffer(*buf, BUFFER_LOCK_EXCLUSIVE);
+ if (mode != RBM_ZERO_AND_LOCK && mode != RBM_ZERO_AND_CLEANUP_LOCK)
+ {
+ if (get_cleanup_lock)
+ LockBufferForCleanup(*buf);
+ else
+ LockBuffer(*buf, BUFFER_LOCK_EXCLUSIVE);
+ }
if (lsn <= PageGetLSN(BufferGetPage(*buf)))
return BLK_DONE;
else
* The returned buffer is exclusively-locked.
*
* For historical reasons, instead of a ReadBufferMode argument, this only
- * supports RBM_ZERO (init == true) and RBM_NORMAL (init == false) modes.
+ * supports RBM_ZERO_AND_LOCK (init == true) and RBM_NORMAL (init == false)
+ * modes.
*/
Buffer
XLogReadBuffer(RelFileNode rnode, BlockNumber blkno, bool init)
Buffer buf;
buf = XLogReadBufferExtended(rnode, MAIN_FORKNUM, blkno,
- init ? RBM_ZERO : RBM_NORMAL);
- if (BufferIsValid(buf))
+ init ? RBM_ZERO_AND_LOCK : RBM_NORMAL);
+ if (BufferIsValid(buf) && !init)
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
return buf;
* dropped or truncated. If we don't see evidence of that later in the WAL
* sequence, we'll complain at the end of WAL replay.)
*
- * In RBM_ZERO and RBM_ZERO_ON_ERROR modes, if the page doesn't exist, the
- * relation is extended with all-zeroes pages up to the given block number.
+ * In RBM_ZERO_* modes, if the page doesn't exist, the relation is extended
+ * with all-zeroes pages up to the given block number.
*
* In RBM_NORMAL_NO_LOG mode, we return InvalidBuffer if the page doesn't
* exist, and we don't check for all-zeroes. Thus, no log entry is made
do
{
if (buffer != InvalidBuffer)
+ {
+ if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
+ }
buffer = ReadBufferWithoutRelcache(rnode, forknum,
P_NEW, mode, NULL);
}
/* Handle the corner case that P_NEW returns non-consecutive pages */
if (BufferGetBlockNumber(buffer) != blkno)
{
+ if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
mode, NULL);
Page page;
buffer = XLogReadBufferExtended(bkpb.node, bkpb.fork, bkpb.block,
- RBM_ZERO);
+ get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK);
Assert(BufferIsValid(buffer));
- if (get_cleanup_lock)
- LockBufferForCleanup(buffer);
- else
- LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
page = (Page) BufferGetPage(buffer);
* valid, the page is zeroed instead of throwing an error. This is intended
* for non-critical data, where the caller is prepared to repair errors.
*
- * In RBM_ZERO mode, if the page isn't in buffer cache already, it's filled
- * with zeros instead of reading it from disk. Useful when the caller is
- * going to fill the page from scratch, since this saves I/O and avoids
+ * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
+ * filled with zeros instead of reading it from disk. Useful when the caller
+ * is going to fill the page from scratch, since this saves I/O and avoids
* unnecessary failure if the page-on-disk has corrupt page headers.
+ * The page is returned locked to ensure that the caller has a chance to
+ * initialize the page before it's made visible to others.
* Caution: do not use this mode to read a page that is beyond the relation's
* current physical EOF; that is likely to cause problems in md.c when
* the page is modified and written out. P_NEW is OK, though.
*
+ * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
+ * a cleanup-strength lock on the page.
+ *
* RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
*
* If strategy is not NULL, a nondefault buffer access strategy is used.
isExtend,
found);
+ /*
+ * In RBM_ZERO_AND_LOCK mode the caller expects the page to
+ * be locked on return.
+ */
+ if (!isLocalBuf)
+ {
+ if (mode == RBM_ZERO_AND_LOCK)
+ LWLockAcquire(bufHdr->content_lock, LW_EXCLUSIVE);
+ else if (mode == RBM_ZERO_AND_CLEANUP_LOCK)
+ LockBufferForCleanup(BufferDescriptorGetBuffer(bufHdr));
+ }
+
return BufferDescriptorGetBuffer(bufHdr);
}
* Read in the page, unless the caller intends to overwrite it and
* just wants us to allocate a buffer.
*/
- if (mode == RBM_ZERO)
+ if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
MemSet((char *) bufBlock, 0, BLCKSZ);
else
{
}
}
+ /*
+ * In RBM_ZERO_AND_LOCK mode, grab the buffer content lock before marking
+ * the page as valid, to make sure that no other backend sees the zeroed
+ * page before the caller has had a chance to initialize it.
+ *
+ * Since no-one else can be looking at the page contents yet, there is no
+ * difference between an exclusive lock and a cleanup-strength lock.
+ * (Note that we cannot use LockBuffer() of LockBufferForCleanup() here,
+ * because they assert that the buffer is already valid.)
+ */
+ if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
+ !isLocalBuf)
+ {
+ LWLockAcquire(bufHdr->content_lock, LW_EXCLUSIVE);
+ }
+
if (isLocalBuf)
{
/* Only need to adjust flags */
typedef enum
{
RBM_NORMAL, /* Normal read */
- RBM_ZERO, /* Don't read from disk, caller will
- * initialize */
+ RBM_ZERO_AND_LOCK, /* Don't read from disk, caller will
+ * initialize. Also locks the page. */
+ RBM_ZERO_AND_CLEANUP_LOCK, /* Like RBM_ZERO_AND_LOCK, but locks the page
+ * in "cleanup" mode */
RBM_ZERO_ON_ERROR, /* Read, but return an all-zeros page on error */
RBM_NORMAL_NO_LOG /* Don't log page as invalid during WAL
* replay; otherwise same as RBM_NORMAL */