BufferDescPadded *BufferDescriptors;
char *BufferBlocks;
+static LWLockTranche BufferContentLWLockTranche;
+static LWLockTranche BufferIOLWLockTranche;
/*
* Initialize shared buffer pool
*
* This is called once during shared-memory initialization (either in the
- * postmaster, or in a standalone backend).
+ * postmaster, or in a standalone backend). It is also called by a backend
+ * forked from the postmaster in the EXEC_BACKEND case.
*/
void
InitBufferPool(void)
*/
buf->freeNext = i + 1;
- buf->io_in_progress_lock = LWLockAssign();
- buf->content_lock = LWLockAssign();
+ LWLockInitialize(&buf->io_in_progress_lock,
+ LWTRANCHE_BUFFER_IO_IN_PROGRESS);
+ LWLockInitialize(&buf->content_lock, LWTRANCHE_BUFFER_CONTENT);
}
/* Correct last entry of linked list */
GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
}
+ /* Register LWLock tranches. */
+ BufferContentLWLockTranche.name = "buffer_content";
+ BufferContentLWLockTranche.array_base = BufferDescriptors;
+ BufferContentLWLockTranche.array_stride = sizeof(BufferDesc);
+ LWLockRegisterTranche(LWTRANCHE_BUFFER_CONTENT,
+ &BufferContentLWLockTranche);
+ BufferIOLWLockTranche.name = "buffer_io";
+ BufferIOLWLockTranche.array_base = BufferDescriptors;
+ BufferIOLWLockTranche.array_stride = sizeof(BufferDesc);
+ LWLockRegisterTranche(LWTRANCHE_BUFFER_IO_IN_PROGRESS,
+ &BufferContentLWLockTranche);
+
/* Init other shared buffer-management stuff */
StrategyInitialize(!foundDescs);
}
if (!isLocalBuf)
{
if (mode == RBM_ZERO_AND_LOCK)
- LWLockAcquire(bufHdr->content_lock, LW_EXCLUSIVE);
+ LWLockAcquire(&bufHdr->content_lock, LW_EXCLUSIVE);
else if (mode == RBM_ZERO_AND_CLEANUP_LOCK)
LockBufferForCleanup(BufferDescriptorGetBuffer(bufHdr));
}
if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
!isLocalBuf)
{
- LWLockAcquire(bufHdr->content_lock, LW_EXCLUSIVE);
+ LWLockAcquire(&bufHdr->content_lock, LW_EXCLUSIVE);
}
if (isLocalBuf)
* happens to be trying to split the page the first one got from
* StrategyGetBuffer.)
*/
- if (LWLockConditionalAcquire(buf->content_lock, LW_SHARED))
+ if (LWLockConditionalAcquire(&buf->content_lock, LW_SHARED))
{
/*
* If using a nondefault strategy, and writing the buffer
StrategyRejectBuffer(strategy, buf))
{
/* Drop lock/pin and loop around for another buffer */
- LWLockRelease(buf->content_lock);
+ LWLockRelease(&buf->content_lock);
UnpinBuffer(buf, true);
continue;
}
smgr->smgr_rnode.node.relNode);
FlushBuffer(buf, NULL);
- LWLockRelease(buf->content_lock);
+ LWLockRelease(&buf->content_lock);
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
Assert(BufferIsPinned(buffer));
/* unfortunately we can't check if the lock is held exclusively */
- Assert(LWLockHeldByMe(bufHdr->content_lock));
+ Assert(LWLockHeldByMe(&bufHdr->content_lock));
LockBufHdr(bufHdr);
if (ref->refcount == 0)
{
/* I'd better not still hold any locks on the buffer */
- Assert(!LWLockHeldByMe(buf->content_lock));
- Assert(!LWLockHeldByMe(buf->io_in_progress_lock));
+ Assert(!LWLockHeldByMe(&buf->content_lock));
+ Assert(!LWLockHeldByMe(&buf->io_in_progress_lock));
LockBufHdr(buf);
* buffer is clean by the time we've locked it.)
*/
PinBuffer_Locked(bufHdr);
- LWLockAcquire(bufHdr->content_lock, LW_SHARED);
+ LWLockAcquire(&bufHdr->content_lock, LW_SHARED);
FlushBuffer(bufHdr, NULL);
- LWLockRelease(bufHdr->content_lock);
+ LWLockRelease(&bufHdr->content_lock);
UnpinBuffer(bufHdr, true);
return result | BUF_WRITTEN;
(bufHdr->flags & BM_VALID) && (bufHdr->flags & BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
- LWLockAcquire(bufHdr->content_lock, LW_SHARED);
+ LWLockAcquire(&bufHdr->content_lock, LW_SHARED);
FlushBuffer(bufHdr, rel->rd_smgr);
- LWLockRelease(bufHdr->content_lock);
+ LWLockRelease(&bufHdr->content_lock);
UnpinBuffer(bufHdr, true);
}
else
(bufHdr->flags & BM_VALID) && (bufHdr->flags & BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
- LWLockAcquire(bufHdr->content_lock, LW_SHARED);
+ LWLockAcquire(&bufHdr->content_lock, LW_SHARED);
FlushBuffer(bufHdr, NULL);
- LWLockRelease(bufHdr->content_lock);
+ LWLockRelease(&bufHdr->content_lock);
UnpinBuffer(bufHdr, true);
}
else
Assert(GetPrivateRefCount(buffer) > 0);
/* here, either share or exclusive lock is OK */
- Assert(LWLockHeldByMe(bufHdr->content_lock));
+ Assert(LWLockHeldByMe(&bufHdr->content_lock));
/*
* This routine might get called many times on the same page, if we are
buf = GetBufferDescriptor(buffer - 1);
if (mode == BUFFER_LOCK_UNLOCK)
- LWLockRelease(buf->content_lock);
+ LWLockRelease(&buf->content_lock);
else if (mode == BUFFER_LOCK_SHARE)
- LWLockAcquire(buf->content_lock, LW_SHARED);
+ LWLockAcquire(&buf->content_lock, LW_SHARED);
else if (mode == BUFFER_LOCK_EXCLUSIVE)
- LWLockAcquire(buf->content_lock, LW_EXCLUSIVE);
+ LWLockAcquire(&buf->content_lock, LW_EXCLUSIVE);
else
elog(ERROR, "unrecognized buffer lock mode: %d", mode);
}
buf = GetBufferDescriptor(buffer - 1);
- return LWLockConditionalAcquire(buf->content_lock, LW_EXCLUSIVE);
+ return LWLockConditionalAcquire(&buf->content_lock, LW_EXCLUSIVE);
}
/*
UnlockBufHdr(buf);
if (!(sv_flags & BM_IO_IN_PROGRESS))
break;
- LWLockAcquire(buf->io_in_progress_lock, LW_SHARED);
- LWLockRelease(buf->io_in_progress_lock);
+ LWLockAcquire(&buf->io_in_progress_lock, LW_SHARED);
+ LWLockRelease(&buf->io_in_progress_lock);
}
}
* Grab the io_in_progress lock so that other processes can wait for
* me to finish the I/O.
*/
- LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE);
+ LWLockAcquire(&buf->io_in_progress_lock, LW_EXCLUSIVE);
LockBufHdr(buf);
* him to get unwedged.
*/
UnlockBufHdr(buf);
- LWLockRelease(buf->io_in_progress_lock);
+ LWLockRelease(&buf->io_in_progress_lock);
WaitIO(buf);
}
{
/* someone else already did the I/O */
UnlockBufHdr(buf);
- LWLockRelease(buf->io_in_progress_lock);
+ LWLockRelease(&buf->io_in_progress_lock);
return false;
}
InProgressBuf = NULL;
- LWLockRelease(buf->io_in_progress_lock);
+ LWLockRelease(&buf->io_in_progress_lock);
}
/*
* we can use TerminateBufferIO. Anyone who's executing WaitIO on the
* buffer will be in a busy spin until we succeed in doing this.
*/
- LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE);
+ LWLockAcquire(&buf->io_in_progress_lock, LW_EXCLUSIVE);
LockBufHdr(buf);
Assert(buf->flags & BM_IO_IN_PROGRESS);
/* Initialize all LWLocks in main array */
for (id = 0, lock = MainLWLockArray; id < numLocks; id++, lock++)
- LWLockInitialize(&lock->lock, 0);
+ LWLockInitialize(&lock->lock, LWTRANCHE_MAIN);
/*
* Initialize the dynamic-allocation counters, which are stored just
LWLockCounter = (int *) ((char *) MainLWLockArray - 3 * sizeof(int));
LWLockCounter[0] = NUM_FIXED_LWLOCKS;
LWLockCounter[1] = numLocks;
- LWLockCounter[2] = 1; /* 0 is the main array */
+ LWLockCounter[2] = LWTRANCHE_LAST_BUILTIN_ID + 1;
}
if (LWLockTrancheArray == NULL)
LWLockTrancheArray = (LWLockTranche **)
MemoryContextAlloc(TopMemoryContext,
LWLockTranchesAllocated * sizeof(LWLockTranche *));
+ Assert(LWLockTranchesAllocated > LWTRANCHE_LAST_BUILTIN_ID);
}
MainLWLockTranche.name = "main";
MainLWLockTranche.array_base = MainLWLockArray;
MainLWLockTranche.array_stride = sizeof(LWLockPadded);
- LWLockRegisterTranche(0, &MainLWLockTranche);
+ LWLockRegisterTranche(LWTRANCHE_MAIN, &MainLWLockTranche);
}
/*