*/
typedef struct pgssSharedState
{
- LWLockId lock; /* protects hashtable search/modification */
+ FlexLockId lock; /* protects hashtable search/modification */
int query_size; /* max query length in bytes */
} pgssSharedState;
</varlistentry>
<varlistentry>
- <term><varname>trace_lwlocks</varname> (<type>boolean</type>)</term>
+ <term><varname>trace_flexlocks</varname> (<type>boolean</type>)</term>
<indexterm>
- <primary><varname>trace_lwlocks</> configuration parameter</primary>
+ <primary><varname>trace_flexlocks</> configuration parameter</primary>
</indexterm>
<listitem>
<para>
- If on, emit information about lightweight lock usage. Lightweight
- locks are intended primarily to provide mutual exclusion of access
+ If on, emit information about FlexLock usage. FlexLocks
+ are intended primarily to provide mutual exclusion of access
to shared-memory data structures.
</para>
<para>
or kilobytes of memory used for an internal sort.</entry>
</row>
<row>
- <entry>lwlock-acquire</entry>
- <entry>(LWLockId, LWLockMode)</entry>
- <entry>Probe that fires when an LWLock has been acquired.
- arg0 is the LWLock's ID.
- arg1 is the requested lock mode, either exclusive or shared.</entry>
+ <entry>flexlock-acquire</entry>
+ <entry>(FlexLockId, FlexLockMode)</entry>
+ <entry>Probe that fires when an FlexLock has been acquired.
+ arg0 is the FlexLock's ID.
+ arg1 is the requested lock mode.</entry>
</row>
<row>
- <entry>lwlock-release</entry>
- <entry>(LWLockId)</entry>
- <entry>Probe that fires when an LWLock has been released (but note
+ <entry>flexlock-release</entry>
+ <entry>(FlexLockId)</entry>
+ <entry>Probe that fires when a FlexLock has been released (but note
that any released waiters have not yet been awakened).
- arg0 is the LWLock's ID.</entry>
+ arg0 is the FlexLock's ID.</entry>
</row>
<row>
- <entry>lwlock-wait-start</entry>
- <entry>(LWLockId, LWLockMode)</entry>
- <entry>Probe that fires when an LWLock was not immediately available and
+ <entry>flexlock-wait-start</entry>
+ <entry>(FlexLockId, FlexLockMode)</entry>
+ <entry>Probe that fires when an FlexLock was not immediately available and
a server process has begun to wait for the lock to become available.
- arg0 is the LWLock's ID.
+ arg0 is the FlexLock's ID.
arg1 is the requested lock mode, either exclusive or shared.</entry>
</row>
<row>
- <entry>lwlock-wait-done</entry>
- <entry>(LWLockId, LWLockMode)</entry>
+ <entry>flexlock-wait-done</entry>
+ <entry>(FlexLockId, FlexLockMode)</entry>
<entry>Probe that fires when a server process has been released from its
- wait for an LWLock (it does not actually have the lock yet).
- arg0 is the LWLock's ID.
+ wait for an FlexLock (it does not actually have the lock yet).
+ arg0 is the FlexLock's ID.
arg1 is the requested lock mode, either exclusive or shared.</entry>
</row>
<row>
- <entry>lwlock-condacquire</entry>
- <entry>(LWLockId, LWLockMode)</entry>
- <entry>Probe that fires when an LWLock was successfully acquired when the
- caller specified no waiting.
- arg0 is the LWLock's ID.
+ <entry>flexlock-condacquire</entry>
+ <entry>(FlexLockId, FlexLockMode)</entry>
+ <entry>Probe that fires when an FlexLock was successfully acquired when
+ the caller specified no waiting.
+ arg0 is the FlexLock's ID.
arg1 is the requested lock mode, either exclusive or shared.</entry>
</row>
<row>
- <entry>lwlock-condacquire-fail</entry>
- <entry>(LWLockId, LWLockMode)</entry>
- <entry>Probe that fires when an LWLock was not successfully acquired when
- the caller specified no waiting.
- arg0 is the LWLock's ID.
+ <entry>flexlock-condacquire-fail</entry>
+ <entry>(FlexLockId, FlexLockMode)</entry>
+ <entry>Probe that fires when an FlexLock was not successfully acquired
+ when the caller specified no waiting.
+ arg0 is the FlexLock's ID.
arg1 is the requested lock mode, either exclusive or shared.</entry>
</row>
<row>
<entry>unsigned int</entry>
</row>
<row>
- <entry>LWLockId</entry>
+ <entry>FlexLockId</entry>
<entry>int</entry>
</row>
<row>
- <entry>LWLockMode</entry>
+ <entry>FlexLockMode</entry>
<entry>int</entry>
</row>
<row>
sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
- sz += MAXALIGN(nslots * sizeof(LWLockId)); /* buffer_locks[] */
+ sz += MAXALIGN(nslots * sizeof(FlexLockId)); /* buffer_locks[] */
if (nlsns > 0)
sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
void
SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
- LWLockId ctllock, const char *subdir)
+ FlexLockId ctllock, const char *subdir)
{
SlruShared shared;
bool found;
offset += MAXALIGN(nslots * sizeof(int));
shared->page_lru_count = (int *) (ptr + offset);
offset += MAXALIGN(nslots * sizeof(int));
- shared->buffer_locks = (LWLockId *) (ptr + offset);
- offset += MAXALIGN(nslots * sizeof(LWLockId));
+ shared->buffer_locks = (FlexLockId *) (ptr + offset);
+ offset += MAXALIGN(nslots * sizeof(FlexLockId));
if (nlsns > 0)
{
gxact->proc.roleId = owner;
gxact->proc.inCommit = false;
gxact->proc.vacuumFlags = 0;
- gxact->proc.lwWaiting = false;
- gxact->proc.lwExclusive = false;
- gxact->proc.lwWaitLink = NULL;
+ gxact->proc.flWaitResult = 0;
+ gxact->proc.flWaitMode = 0;
+ gxact->proc.flWaitLink = NULL;
gxact->proc.waitLock = NULL;
gxact->proc.waitProcLock = NULL;
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
* Releasing LW locks is critical since we might try to grab them again
* while cleaning up!
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
/* Clean up buffer I/O and buffer context locks, too */
AbortBufferIO();
* FIXME This may be incorrect --- Are there some locks we should keep?
* Buffer locks, for example? I don't think so but I'm not sure.
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
AbortBufferIO();
UnlockBuffers();
* Begin shutdown of an auxiliary process. This is approximately the equivalent
* of ShutdownPostgres() in postinit.c. We can't run transactions in an
* auxiliary process, so most of the work of AbortTransaction() is not needed,
- * but we do need to make sure we've released any LWLocks we are holding.
+ * but we do need to make sure we've released any flex locks we are holding.
* (This is only critical during an error exit.)
*/
static void
ShutdownAuxiliaryProcess(int code, Datum arg)
{
- LWLockReleaseAll();
+ FlexLockReleaseAll();
}
/* ----------------------------------------------------------------
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "storage/procarray.h"
+#include "storage/procarraylock.h"
#include "utils/acl.h"
#include "utils/attoptcache.h"
#include "utils/datum.h"
/*
* OK, let's do it. First let other backends know I'm in ANALYZE.
*/
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
MyProc->vacuumFlags |= PROC_IN_ANALYZE;
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
/*
* Do the normal non-recursive ANALYZE.
* Reset my PGPROC flag. Note: we need this here, and not in vacuum_rel,
* because the vacuum flag is cleared by the end-of-xact code.
*/
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
MyProc->vacuumFlags &= ~PROC_IN_ANALYZE;
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
}
/*
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "storage/procarray.h"
+#include "storage/procarraylock.h"
#include "utils/acl.h"
#include "utils/fmgroids.h"
#include "utils/guc.h"
* MyProc->xid/xmin, else OldestXmin might appear to go backwards,
* which is probably Not Good.
*/
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
MyProc->vacuumFlags |= PROC_IN_VACUUM;
if (for_wraparound)
MyProc->vacuumFlags |= PROC_VACUUM_FOR_WRAPAROUND;
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
}
/*
/*
* These operations are really just a minimal subset of
* AbortTransaction(). We don't have very many resources to worry
- * about in bgwriter, but we do have LWLocks, buffers, and temp files.
+ * about in bgwriter, but we do have flex locks, buffers, and temp
+ * files.
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
AbortBufferIO();
UnlockBuffers();
/* buffer pins are released here: */
/*
* These operations are really just a minimal subset of
* AbortTransaction(). We don't have very many resources to worry
- * about in checkpointer, but we do have LWLocks, buffers, and temp files.
+ * about in checkpointer, but we do have flex locks, buffers, and temp
+ * files.
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
AbortBufferIO();
UnlockBuffers();
/* buffer pins are released here: */
bool *skip_slot;
/* must hold BgWriterCommLock in exclusive mode */
- Assert(LWLockHeldByMe(BgWriterCommLock));
+ Assert(FlexLockHeldByMe(BgWriterCommLock));
/* Initialize temporary hash table */
MemSet(&ctl, 0, sizeof(ctl));
#include "postmaster/syslogger.h"
#include "replication/walsender.h"
#include "storage/fd.h"
+#include "storage/flexlock_internals.h"
#include "storage/ipc.h"
#include "storage/pg_shmem.h"
#include "storage/pmsignal.h"
typedef int InheritableSocket;
#endif
-typedef struct LWLock LWLock; /* ugly kluge */
-
/*
* Structure contains all variables passed to exec:ed backends
*/
slock_t *ShmemLock;
VariableCache ShmemVariableCache;
Backend *ShmemBackendArray;
- LWLock *LWLockArray;
+ FlexLock *FlexLockArray;
slock_t *ProcStructLock;
PROC_HDR *ProcGlobal;
PGPROC *AuxiliaryProcs;
* functions
*/
extern slock_t *ShmemLock;
-extern LWLock *LWLockArray;
extern slock_t *ProcStructLock;
extern PGPROC *AuxiliaryProcs;
extern PMSignalData *PMSignalState;
param->ShmemVariableCache = ShmemVariableCache;
param->ShmemBackendArray = ShmemBackendArray;
- param->LWLockArray = LWLockArray;
+ param->FlexLockArray = FlexLockArray;
param->ProcStructLock = ProcStructLock;
param->ProcGlobal = ProcGlobal;
param->AuxiliaryProcs = AuxiliaryProcs;
ShmemVariableCache = param->ShmemVariableCache;
ShmemBackendArray = param->ShmemBackendArray;
- LWLockArray = param->LWLockArray;
+ FlexLockArray = param->FlexLockArray;
ProcStructLock = param->ProcStructLock;
ProcGlobal = param->ProcGlobal;
AuxiliaryProcs = param->AuxiliaryProcs;
/*
* These operations are really just a minimal subset of
* AbortTransaction(). We don't have very many resources to worry
- * about in walwriter, but we do have LWLocks, and perhaps buffers?
+ * about in walwriter, but we do have flex locks, and perhaps buffers?
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
AbortBufferIO();
UnlockBuffers();
/* buffer pins are released here: */
{
BufferTag newTag; /* identity of requested block */
uint32 newHash; /* hash value for newTag */
- LWLockId newPartitionLock; /* buffer partition lock for it */
+ FlexLockId newPartitionLock; /* buffer partition lock for it */
int buf_id;
/* create a tag so we can lookup the buffer */
{
BufferTag newTag; /* identity of requested block */
uint32 newHash; /* hash value for newTag */
- LWLockId newPartitionLock; /* buffer partition lock for it */
+ FlexLockId newPartitionLock; /* buffer partition lock for it */
BufferTag oldTag; /* previous identity of selected buffer */
uint32 oldHash; /* hash value for oldTag */
- LWLockId oldPartitionLock; /* buffer partition lock for it */
+ FlexLockId oldPartitionLock; /* buffer partition lock for it */
BufFlags oldFlags;
int buf_id;
volatile BufferDesc *buf;
{
BufferTag oldTag;
uint32 oldHash; /* hash value for oldTag */
- LWLockId oldPartitionLock; /* buffer partition lock for it */
+ FlexLockId oldPartitionLock; /* buffer partition lock for it */
BufFlags oldFlags;
/* Save the original buffer tag before dropping the spinlock */
Assert(PrivateRefCount[buffer - 1] > 0);
/* unfortunately we can't check if the lock is held exclusively */
- Assert(LWLockHeldByMe(bufHdr->content_lock));
+ Assert(FlexLockHeldByMe(bufHdr->content_lock));
LockBufHdr(bufHdr);
if (PrivateRefCount[b] == 0)
{
/* I'd better not still hold any locks on the buffer */
- Assert(!LWLockHeldByMe(buf->content_lock));
- Assert(!LWLockHeldByMe(buf->io_in_progress_lock));
+ Assert(!FlexLockHeldByMe(buf->content_lock));
+ Assert(!FlexLockHeldByMe(buf->io_in_progress_lock));
LockBufHdr(buf);
Assert(PrivateRefCount[buffer - 1] > 0);
/* here, either share or exclusive lock is OK */
- Assert(LWLockHeldByMe(bufHdr->content_lock));
+ Assert(FlexLockHeldByMe(bufHdr->content_lock));
/*
* This routine might get called many times on the same page, if we are
size = add_size(size, SUBTRANSShmemSize());
size = add_size(size, TwoPhaseShmemSize());
size = add_size(size, MultiXactShmemSize());
- size = add_size(size, LWLockShmemSize());
+ size = add_size(size, FlexLockShmemSize());
size = add_size(size, ProcArrayShmemSize());
size = add_size(size, BackendStatusShmemSize());
size = add_size(size, SInvalShmemSize());
* needed for InitShmemIndex.
*/
if (!IsUnderPostmaster)
- CreateLWLocks();
+ CreateFlexLocks();
/*
* Set up shmem.c index hashtable
#include "access/twophase.h"
#include "miscadmin.h"
#include "storage/procarray.h"
+#include "storage/procarraylock.h"
#include "storage/spin.h"
#include "utils/builtins.h"
#include "utils/snapmgr.h"
{
ProcArrayStruct *arrayP = procArray;
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
if (arrayP->numProcs >= arrayP->maxProcs)
{
* fixed supply of PGPROC structs too, and so we should have failed
* earlier.)
*/
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
errmsg("sorry, too many clients already")));
arrayP->procs[arrayP->numProcs] = proc;
arrayP->numProcs++;
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
}
/*
DisplayXidCache();
#endif
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
if (TransactionIdIsValid(latestXid))
{
arrayP->procs[index] = arrayP->procs[arrayP->numProcs - 1];
arrayP->procs[arrayP->numProcs - 1] = NULL; /* for debugging */
arrayP->numProcs--;
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
return;
}
}
/* Ooops */
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
elog(LOG, "failed to find proc %p in ProcArray", proc);
}
{
if (TransactionIdIsValid(latestXid))
{
- /*
- * We must lock ProcArrayLock while clearing proc->xid, so that we do
- * not exit the set of "running" transactions while someone else is
- * taking a snapshot. See discussion in
- * src/backend/access/transam/README.
- */
- Assert(TransactionIdIsValid(proc->xid));
-
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
-
- proc->xid = InvalidTransactionId;
- proc->lxid = InvalidLocalTransactionId;
- proc->xmin = InvalidTransactionId;
- /* must be cleared with xid/xmin: */
- proc->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- proc->inCommit = false; /* be sure this is cleared in abort */
- proc->recoveryConflictPending = false;
-
- /* Clear the subtransaction-XID cache too while holding the lock */
- proc->subxids.nxids = 0;
- proc->subxids.overflowed = false;
-
- /* Also advance global latestCompletedXid while holding the lock */
- if (TransactionIdPrecedes(ShmemVariableCache->latestCompletedXid,
- latestXid))
- ShmemVariableCache->latestCompletedXid = latestXid;
-
- LWLockRelease(ProcArrayLock);
+ Assert(proc == MyProc);
+ ProcArrayLockClearTransaction(latestXid);
}
else
- {
- /*
- * If we have no XID, we don't need to lock, since we won't affect
- * anyone else's calculation of a snapshot. We might change their
- * estimate of global xmin, but that's OK.
- */
- Assert(!TransactionIdIsValid(proc->xid));
-
- proc->lxid = InvalidLocalTransactionId;
proc->xmin = InvalidTransactionId;
- /* must be cleared with xid/xmin: */
- proc->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- proc->inCommit = false; /* be sure this is cleared in abort */
- proc->recoveryConflictPending = false;
- Assert(proc->subxids.nxids == 0);
- Assert(proc->subxids.overflowed == false);
- }
+ proc->lxid = InvalidLocalTransactionId;
+ proc->inCommit = false; /* be sure this is cleared in abort */
+ proc->recoveryConflictPending = false;
}
/*
* Nobody else is running yet, but take locks anyhow
*/
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
/*
* KnownAssignedXids is sorted so we cannot just add the xids, we have to
Assert(TransactionIdIsNormal(ShmemVariableCache->latestCompletedXid));
Assert(TransactionIdIsValid(ShmemVariableCache->nextXid));
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
KnownAssignedXidsDisplay(trace_recovery(DEBUG3));
if (standbyState == STANDBY_SNAPSHOT_READY)
/*
* Uses same locking as transaction commit
*/
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
/*
* Remove subxids from known-assigned-xacts.
if (TransactionIdPrecedes(procArray->lastOverflowedXid, max_xid))
procArray->lastOverflowedXid = max_xid;
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
}
/*
errmsg("out of memory")));
}
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
/*
* Now that we have the lock, we can check latestCompletedXid; if the
*/
if (TransactionIdPrecedes(ShmemVariableCache->latestCompletedXid, xid))
{
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
xc_by_latest_xid_inc();
return true;
}
*/
if (TransactionIdEquals(pxid, xid))
{
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
xc_by_main_xid_inc();
return true;
}
if (TransactionIdEquals(cxid, xid))
{
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
xc_by_child_xid_inc();
return true;
}
if (KnownAssignedXidExists(xid))
{
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
xc_by_known_assigned_inc();
return true;
}
nxids = KnownAssignedXidsGet(xids, xid);
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
/*
* If none of the relevant caches overflowed, we know the Xid is not
if (TransactionIdPrecedes(xid, RecentXmin))
return false;
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (i = 0; i < arrayP->numProcs; i++)
{
}
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
return result;
}
/* Cannot look for individual databases during recovery */
Assert(allDbs || !RecoveryInProgress());
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
/*
* We initialize the MIN() calculation with latestCompletedXid + 1. This
*/
TransactionId kaxmin = KnownAssignedXidsGetOldestXmin();
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
if (TransactionIdIsNormal(kaxmin) &&
TransactionIdPrecedes(kaxmin, result))
/*
* No other information needed, so release the lock immediately.
*/
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
/*
* Compute the cutoff XID by subtracting vacuum_defer_cleanup_age,
* It is sufficient to get shared lock on ProcArrayLock, even if we are
* going to set MyProc->xmin.
*/
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
/* xmax is always latestCompletedXid + 1 */
xmax = ShmemVariableCache->latestCompletedXid;
if (!TransactionIdIsValid(MyProc->xmin))
MyProc->xmin = TransactionXmin = xmin;
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
/*
* Update globalxmin to include actual process xids. This is a slightly
return false;
/* Get lock so source xact can't end while we're doing this */
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
break;
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
return result;
}
* Ensure that no xids enter or leave the procarray while we obtain
* snapshot.
*/
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
LWLockAcquire(XidGenLock, LW_SHARED);
latestCompletedXid = ShmemVariableCache->latestCompletedXid;
CurrentRunningXacts->latestCompletedXid = latestCompletedXid;
/* We don't release XidGenLock here, the caller is responsible for that */
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
Assert(TransactionIdIsValid(CurrentRunningXacts->nextXid));
Assert(TransactionIdIsValid(CurrentRunningXacts->oldestRunningXid));
Assert(!RecoveryInProgress());
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
oldestRunningXid = ShmemVariableCache->nextXid;
*/
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
return oldestRunningXid;
}
xids = (TransactionId *) palloc(arrayP->maxProcs * sizeof(TransactionId));
nxids = 0;
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
xids[nxids++] = pxid;
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
*xids_p = xids;
return nxids;
ProcArrayStruct *arrayP = procArray;
int index;
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
}
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
return result;
}
if (pid == 0) /* never match dummy PGPROCs */
return NULL;
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
}
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
return result;
}
if (xid == InvalidTransactionId) /* never match invalid xid */
return 0;
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
}
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
return result;
}
vxids = (VirtualTransactionId *)
palloc(sizeof(VirtualTransactionId) * arrayP->maxProcs);
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
}
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
*nvxids = count;
return vxids;
errmsg("out of memory")));
}
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
}
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
/* add the terminator */
vxids[count].backendId = InvalidBackendId;
int index;
pid_t pid = 0;
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
}
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
return pid;
}
int count = 0;
int index;
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
count++;
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
return count;
}
pid_t pid = 0;
/* tell all backends to die */
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
for (index = 0; index < arrayP->numProcs; index++)
{
}
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
}
/*
int count = 0;
int index;
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
count++;
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
return count;
}
*nbackends = *nprepared = 0;
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ ProcArrayLockAcquire(PAL_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
}
}
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
if (!found)
return false; /* no conflicting backends, so done */
* to abort subtransactions, but pending closer analysis we'd best be
* conservative.
*/
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
/*
* Under normal circumstances xid and xids[] will be in increasing order,
latestXid))
ShmemVariableCache->latestCompletedXid = latestXid;
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
}
#ifdef XIDCACHE_DEBUG
/*
* Uses same locking as transaction commit
*/
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
KnownAssignedXidsRemoveTree(xid, nsubxids, subxids);
max_xid))
ShmemVariableCache->latestCompletedXid = max_xid;
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
}
/*
void
ExpireAllKnownAssignedTransactionIds(void)
{
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
KnownAssignedXidsRemovePreceding(InvalidTransactionId);
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
}
/*
void
ExpireOldKnownAssignedTransactionIds(TransactionId xid)
{
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
KnownAssignedXidsRemovePreceding(xid);
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
}
{
/* must hold lock to compress */
if (!exclusive_lock)
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
KnownAssignedXidsCompress(true);
/* note: we no longer care about the tail pointer */
if (!exclusive_lock)
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
/*
* If it still won't fit then we're out of memory
top_builddir = ../../../..
include $(top_builddir)/src/Makefile.global
-OBJS = lmgr.o lock.o proc.o deadlock.o lwlock.o spin.o s_lock.o predicate.o
+OBJS = flexlock.o lmgr.o lock.o proc.o deadlock.o lwlock.o spin.o s_lock.o \
+ procarraylock.o predicate.o
include $(top_srcdir)/src/backend/common.mk
bool found;
ResourceOwner owner;
uint32 hashcode;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
int status;
bool log_lock = false;
LOCALLOCK *locallock;
LOCK *lock;
PROCLOCK *proclock;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
bool wakeupNeeded;
if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
*/
for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
{
- LWLockId partitionLock = FirstLockMgrLock + partition;
+ FlexLockId partitionLock = FirstLockMgrLock + partition;
SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
uint32 hashcode)
{
- LWLockId partitionLock = LockHashPartitionLock(hashcode);
+ FlexLockId partitionLock = LockHashPartitionLock(hashcode);
Oid relid = locktag->locktag_field2;
uint32 i;
LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
LOCKTAG *locktag = &locallock->tag.lock;
PROCLOCK *proclock = NULL;
- LWLockId partitionLock = LockHashPartitionLock(locallock->hashcode);
+ FlexLockId partitionLock = LockHashPartitionLock(locallock->hashcode);
Oid relid = locktag->locktag_field2;
uint32 f;
SHM_QUEUE *procLocks;
PROCLOCK *proclock;
uint32 hashcode;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
int count = 0;
int fast_count = 0;
PROCLOCKTAG proclocktag;
uint32 hashcode;
uint32 proclock_hashcode;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
bool wakeupNeeded;
hashcode = LockTagHashCode(locktag);
*/
for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
{
- LWLockId partitionLock = FirstLockMgrLock + partition;
+ FlexLockId partitionLock = FirstLockMgrLock + partition;
SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
uint32 hashcode;
uint32 proclock_hashcode;
int partition;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
LockMethod lockMethodTable;
Assert(len == sizeof(TwoPhaseLockRecord));
*/
#include "postgres.h"
-#include "access/clog.h"
-#include "access/multixact.h"
-#include "access/subtrans.h"
-#include "commands/async.h"
#include "miscadmin.h"
#include "pg_trace.h"
+#include "storage/flexlock_internals.h"
#include "storage/ipc.h"
-#include "storage/predicate.h"
#include "storage/proc.h"
#include "storage/spin.h"
-
-/* We use the ShmemLock spinlock to protect LWLockAssign */
-extern slock_t *ShmemLock;
-
-
typedef struct LWLock
{
- slock_t mutex; /* Protects LWLock and queue of PGPROCs */
- bool releaseOK; /* T if ok to release waiters */
+ FlexLock flex; /* common FlexLock infrastructure */
char exclusive; /* # of exclusive holders (0 or 1) */
int shared; /* # of shared holders (0..MaxBackends) */
- PGPROC *head; /* head of list of waiting PGPROCs */
- PGPROC *tail; /* tail of list of waiting PGPROCs */
- /* tail is undefined when head is NULL */
} LWLock;
-/*
- * All the LWLock structs are allocated as an array in shared memory.
- * (LWLockIds are indexes into the array.) We force the array stride to
- * be a power of 2, which saves a few cycles in indexing, but more
- * importantly also ensures that individual LWLocks don't cross cache line
- * boundaries. This reduces cache contention problems, especially on AMD
- * Opterons. (Of course, we have to also ensure that the array start
- * address is suitably aligned.)
- *
- * LWLock is between 16 and 32 bytes on all known platforms, so these two
- * cases are sufficient.
- */
-#define LWLOCK_PADDED_SIZE (sizeof(LWLock) <= 16 ? 16 : 32)
-
-typedef union LWLockPadded
-{
- LWLock lock;
- char pad[LWLOCK_PADDED_SIZE];
-} LWLockPadded;
-
-/*
- * This points to the array of LWLocks in shared memory. Backends inherit
- * the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
- * where we have special measures to pass it down).
- */
-NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL;
-
-
-/*
- * We use this structure to keep track of locked LWLocks for release
- * during error recovery. The maximum size could be determined at runtime
- * if necessary, but it seems unlikely that more than a few locks could
- * ever be held simultaneously.
- */
-#define MAX_SIMUL_LWLOCKS 100
-
-static int num_held_lwlocks = 0;
-static LWLockId held_lwlocks[MAX_SIMUL_LWLOCKS];
-
-static int lock_addin_request = 0;
-static bool lock_addin_request_allowed = true;
+#define LWLockPointer(lockid) \
+ (AssertMacro(FlexLockArray[lockid].flex.locktype == FLEXLOCK_TYPE_LWLOCK), \
+ (volatile LWLock *) &FlexLockArray[lockid])
#ifdef LWLOCK_STATS
static int counts_for_pid = 0;
#endif
#ifdef LOCK_DEBUG
-bool Trace_lwlocks = false;
-
inline static void
-PRINT_LWDEBUG(const char *where, LWLockId lockid, const volatile LWLock *lock)
+PRINT_LWDEBUG(const char *where, FlexLockId lockid, const volatile LWLock *lock)
{
- if (Trace_lwlocks)
+ if (Trace_flexlocks)
elog(LOG, "%s(%d): excl %d shared %d head %p rOK %d",
where, (int) lockid,
- (int) lock->exclusive, lock->shared, lock->head,
- (int) lock->releaseOK);
-}
-
-inline static void
-LOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg)
-{
- if (Trace_lwlocks)
- elog(LOG, "%s(%d): %s", where, (int) lockid, msg);
+ (int) lock->exclusive, lock->shared, lock->flex.head,
+ (int) lock->flex.releaseOK);
}
#else /* not LOCK_DEBUG */
#define PRINT_LWDEBUG(a,b,c)
-#define LOG_LWDEBUG(a,b,c)
#endif /* LOCK_DEBUG */
#ifdef LWLOCK_STATS
print_lwlock_stats(int code, Datum arg)
{
int i;
- int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
- int numLocks = LWLockCounter[1];
+ int *FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int));
+ int numLocks = FlexLockCounter[1];
/* Grab an LWLock to keep different backends from mixing reports */
LWLockAcquire(0, LW_EXCLUSIVE);
}
#endif /* LWLOCK_STATS */
-
-/*
- * Compute number of LWLocks to allocate.
- */
-int
-NumLWLocks(void)
-{
- int numLocks;
-
- /*
- * Possibly this logic should be spread out among the affected modules,
- * the same way that shmem space estimation is done. But for now, there
- * are few enough users of LWLocks that we can get away with just keeping
- * the knowledge here.
- */
-
- /* Predefined LWLocks */
- numLocks = (int) NumFixedLWLocks;
-
- /* bufmgr.c needs two for each shared buffer */
- numLocks += 2 * NBuffers;
-
- /* proc.c needs one for each backend or auxiliary process */
- numLocks += MaxBackends + NUM_AUXILIARY_PROCS;
-
- /* clog.c needs one per CLOG buffer */
- numLocks += NUM_CLOG_BUFFERS;
-
- /* subtrans.c needs one per SubTrans buffer */
- numLocks += NUM_SUBTRANS_BUFFERS;
-
- /* multixact.c needs two SLRU areas */
- numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS;
-
- /* async.c needs one per Async buffer */
- numLocks += NUM_ASYNC_BUFFERS;
-
- /* predicate.c needs one per old serializable xid buffer */
- numLocks += NUM_OLDSERXID_BUFFERS;
-
- /*
- * Add any requested by loadable modules; for backwards-compatibility
- * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if
- * there are no explicit requests.
- */
- lock_addin_request_allowed = false;
- numLocks += Max(lock_addin_request, NUM_USER_DEFINED_LWLOCKS);
-
- return numLocks;
-}
-
-
-/*
- * RequestAddinLWLocks
- * Request that extra LWLocks be allocated for use by
- * a loadable module.
- *
- * This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
- * shared memory has been allocated, calls will be ignored. (We could
- * raise an error, but it seems better to make it a no-op, so that
- * libraries containing such calls can be reloaded if needed.)
- */
-void
-RequestAddinLWLocks(int n)
-{
- if (IsUnderPostmaster || !lock_addin_request_allowed)
- return; /* too late */
- lock_addin_request += n;
-}
-
-
-/*
- * Compute shmem space needed for LWLocks.
- */
-Size
-LWLockShmemSize(void)
-{
- Size size;
- int numLocks = NumLWLocks();
-
- /* Space for the LWLock array. */
- size = mul_size(numLocks, sizeof(LWLockPadded));
-
- /* Space for dynamic allocation counter, plus room for alignment. */
- size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);
-
- return size;
-}
-
-
-/*
- * Allocate shmem space for LWLocks and initialize the locks.
- */
-void
-CreateLWLocks(void)
-{
- int numLocks = NumLWLocks();
- Size spaceLocks = LWLockShmemSize();
- LWLockPadded *lock;
- int *LWLockCounter;
- char *ptr;
- int id;
-
- /* Allocate space */
- ptr = (char *) ShmemAlloc(spaceLocks);
-
- /* Leave room for dynamic allocation counter */
- ptr += 2 * sizeof(int);
-
- /* Ensure desired alignment of LWLock array */
- ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;
-
- LWLockArray = (LWLockPadded *) ptr;
-
- /*
- * Initialize all LWLocks to "unlocked" state
- */
- for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)
- {
- SpinLockInit(&lock->lock.mutex);
- lock->lock.releaseOK = true;
- lock->lock.exclusive = 0;
- lock->lock.shared = 0;
- lock->lock.head = NULL;
- lock->lock.tail = NULL;
- }
-
- /*
- * Initialize the dynamic-allocation counter, which is stored just before
- * the first LWLock.
- */
- LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
- LWLockCounter[0] = (int) NumFixedLWLocks;
- LWLockCounter[1] = numLocks;
-}
-
-
/*
- * LWLockAssign - assign a dynamically-allocated LWLock number
- *
- * We interlock this using the same spinlock that is used to protect
- * ShmemAlloc(). Interlocking is not really necessary during postmaster
- * startup, but it is needed if any user-defined code tries to allocate
- * LWLocks after startup.
+ * LWLockAssign - initialize a new lwlock and return its ID
*/
-LWLockId
+FlexLockId
LWLockAssign(void)
{
- LWLockId result;
-
- /* use volatile pointer to prevent code rearrangement */
- volatile int *LWLockCounter;
-
- LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
- SpinLockAcquire(ShmemLock);
- if (LWLockCounter[0] >= LWLockCounter[1])
- {
- SpinLockRelease(ShmemLock);
- elog(ERROR, "no more LWLockIds available");
- }
- result = (LWLockId) (LWLockCounter[0]++);
- SpinLockRelease(ShmemLock);
- return result;
+ return FlexLockAssign(FLEXLOCK_TYPE_LWLOCK);
}
-
/*
* LWLockAcquire - acquire a lightweight lock in the specified mode
*
* Side effect: cancel/die interrupts are held off until lock release.
*/
void
-LWLockAcquire(LWLockId lockid, LWLockMode mode)
+LWLockAcquire(FlexLockId lockid, LWLockMode mode)
{
- volatile LWLock *lock = &(LWLockArray[lockid].lock);
+ volatile LWLock *lock = LWLockPointer(lockid);
PGPROC *proc = MyProc;
bool retry = false;
int extraWaits = 0;
/* Set up local count state first time through in a given process */
if (counts_for_pid != MyProcPid)
{
- int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
- int numLocks = LWLockCounter[1];
+ int *FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int));
+ int numLocks = FlexLockCounter[1];
sh_acquire_counts = calloc(numLocks, sizeof(int));
ex_acquire_counts = calloc(numLocks, sizeof(int));
*/
Assert(!(proc == NULL && IsUnderPostmaster));
- /* Ensure we will have room to remember the lock */
- if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
- elog(ERROR, "too many LWLocks taken");
-
/*
* Lock out cancel/die interrupts until we exit the code section protected
* by the LWLock. This ensures that interrupts will not interfere with
bool mustwait;
/* Acquire mutex. Time spent holding mutex should be short! */
- SpinLockAcquire(&lock->mutex);
+ SpinLockAcquire(&lock->flex.mutex);
/* If retrying, allow LWLockRelease to release waiters again */
if (retry)
- lock->releaseOK = true;
+ lock->flex.releaseOK = true;
/* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE)
if (!mustwait)
break; /* got the lock */
- /*
- * Add myself to wait queue.
- *
- * If we don't have a PGPROC structure, there's no way to wait. This
- * should never occur, since MyProc should only be null during shared
- * memory initialization.
- */
- if (proc == NULL)
- elog(PANIC, "cannot wait without a PGPROC structure");
-
- proc->lwWaiting = true;
- proc->lwExclusive = (mode == LW_EXCLUSIVE);
- proc->lwWaitLink = NULL;
- if (lock->head == NULL)
- lock->head = proc;
- else
- lock->tail->lwWaitLink = proc;
- lock->tail = proc;
+ /* Add myself to wait queue. */
+ FlexLockJoinWaitQueue(lock, (int) mode);
/* Can release the mutex now */
- SpinLockRelease(&lock->mutex);
-
- /*
- * Wait until awakened.
- *
- * Since we share the process wait semaphore with the regular lock
- * manager and ProcWaitForSignal, and we may need to acquire an LWLock
- * while one of those is pending, it is possible that we get awakened
- * for a reason other than being signaled by LWLockRelease. If so,
- * loop back and wait again. Once we've gotten the LWLock,
- * re-increment the sema by the number of additional signals received,
- * so that the lock manager or signal manager will see the received
- * signal when it next waits.
- */
- LOG_LWDEBUG("LWLockAcquire", lockid, "waiting");
+ SpinLockRelease(&lock->flex.mutex);
+
+ /* Wait until awakened. */
+ extraWaits += FlexLockWait(lockid, mode);
#ifdef LWLOCK_STATS
block_counts[lockid]++;
#endif
- TRACE_POSTGRESQL_LWLOCK_WAIT_START(lockid, mode);
-
- for (;;)
- {
- /* "false" means cannot accept cancel/die interrupt here. */
- PGSemaphoreLock(&proc->sem, false);
- if (!proc->lwWaiting)
- break;
- extraWaits++;
- }
-
- TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(lockid, mode);
-
- LOG_LWDEBUG("LWLockAcquire", lockid, "awakened");
-
/* Now loop back and try to acquire lock again. */
retry = true;
}
/* We are done updating shared state of the lock itself. */
- SpinLockRelease(&lock->mutex);
+ SpinLockRelease(&lock->flex.mutex);
- TRACE_POSTGRESQL_LWLOCK_ACQUIRE(lockid, mode);
+ TRACE_POSTGRESQL_FLEXLOCK_ACQUIRE(lockid, mode);
/* Add lock to list of locks held by this backend */
- held_lwlocks[num_held_lwlocks++] = lockid;
+ FlexLockRemember(lockid);
/*
* Fix the process wait semaphore's count for any absorbed wakeups.
* If successful, cancel/die interrupts are held off until lock release.
*/
bool
-LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
+LWLockConditionalAcquire(FlexLockId lockid, LWLockMode mode)
{
- volatile LWLock *lock = &(LWLockArray[lockid].lock);
+ volatile LWLock *lock = LWLockPointer(lockid);
bool mustwait;
PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);
- /* Ensure we will have room to remember the lock */
- if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
- elog(ERROR, "too many LWLocks taken");
-
/*
* Lock out cancel/die interrupts until we exit the code section protected
* by the LWLock. This ensures that interrupts will not interfere with
HOLD_INTERRUPTS();
/* Acquire mutex. Time spent holding mutex should be short! */
- SpinLockAcquire(&lock->mutex);
+ SpinLockAcquire(&lock->flex.mutex);
/* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE)
}
/* We are done updating shared state of the lock itself. */
- SpinLockRelease(&lock->mutex);
+ SpinLockRelease(&lock->flex.mutex);
if (mustwait)
{
/* Failed to get lock, so release interrupt holdoff */
RESUME_INTERRUPTS();
- LOG_LWDEBUG("LWLockConditionalAcquire", lockid, "failed");
- TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(lockid, mode);
+ FlexLockDebug("LWLockConditionalAcquire", lockid, "failed");
+ TRACE_POSTGRESQL_FLEXLOCK_CONDACQUIRE_FAIL(lockid, mode);
}
else
{
/* Add lock to list of locks held by this backend */
- held_lwlocks[num_held_lwlocks++] = lockid;
- TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(lockid, mode);
+ FlexLockRemember(lockid);
+ TRACE_POSTGRESQL_FLEXLOCK_CONDACQUIRE(lockid, mode);
}
return !mustwait;
* LWLockRelease - release a previously acquired lock
*/
void
-LWLockRelease(LWLockId lockid)
+LWLockRelease(FlexLockId lockid)
{
- volatile LWLock *lock = &(LWLockArray[lockid].lock);
+ volatile LWLock *lock = LWLockPointer(lockid);
PGPROC *head;
PGPROC *proc;
- int i;
PRINT_LWDEBUG("LWLockRelease", lockid, lock);
- /*
- * Remove lock from list of locks held. Usually, but not always, it will
- * be the latest-acquired lock; so search array backwards.
- */
- for (i = num_held_lwlocks; --i >= 0;)
- {
- if (lockid == held_lwlocks[i])
- break;
- }
- if (i < 0)
- elog(ERROR, "lock %d is not held", (int) lockid);
- num_held_lwlocks--;
- for (; i < num_held_lwlocks; i++)
- held_lwlocks[i] = held_lwlocks[i + 1];
+ FlexLockForget(lockid);
/* Acquire mutex. Time spent holding mutex should be short! */
- SpinLockAcquire(&lock->mutex);
+ SpinLockAcquire(&lock->flex.mutex);
/* Release my hold on lock */
if (lock->exclusive > 0)
* if someone has already awakened waiters that haven't yet acquired the
* lock.
*/
- head = lock->head;
+ head = lock->flex.head;
if (head != NULL)
{
- if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
+ if (lock->exclusive == 0 && lock->shared == 0 && lock->flex.releaseOK)
{
/*
* Remove the to-be-awakened PGPROCs from the queue. If the front
* as many waiters as want shared access.
*/
proc = head;
- if (!proc->lwExclusive)
+ if (proc->flWaitMode != LW_EXCLUSIVE)
{
- while (proc->lwWaitLink != NULL &&
- !proc->lwWaitLink->lwExclusive)
- proc = proc->lwWaitLink;
+ while (proc->flWaitLink != NULL &&
+ proc->flWaitLink->flWaitMode != LW_EXCLUSIVE)
+ proc = proc->flWaitLink;
}
/* proc is now the last PGPROC to be released */
- lock->head = proc->lwWaitLink;
- proc->lwWaitLink = NULL;
+ lock->flex.head = proc->flWaitLink;
+ proc->flWaitLink = NULL;
/* prevent additional wakeups until retryer gets to run */
- lock->releaseOK = false;
+ lock->flex.releaseOK = false;
}
else
{
}
/* We are done updating shared state of the lock itself. */
- SpinLockRelease(&lock->mutex);
+ SpinLockRelease(&lock->flex.mutex);
- TRACE_POSTGRESQL_LWLOCK_RELEASE(lockid);
+ TRACE_POSTGRESQL_FLEXLOCK_RELEASE(lockid);
/*
* Awaken any waiters I removed from the queue.
*/
while (head != NULL)
{
- LOG_LWDEBUG("LWLockRelease", lockid, "release waiter");
+ FlexLockDebug("LWLockRelease", lockid, "release waiter");
proc = head;
- head = proc->lwWaitLink;
- proc->lwWaitLink = NULL;
- proc->lwWaiting = false;
+ head = proc->flWaitLink;
+ proc->flWaitLink = NULL;
+ proc->flWaitResult = 1; /* any non-zero value will do */
PGSemaphoreUnlock(&proc->sem);
}
*/
RESUME_INTERRUPTS();
}
-
-
-/*
- * LWLockReleaseAll - release all currently-held locks
- *
- * Used to clean up after ereport(ERROR). An important difference between this
- * function and retail LWLockRelease calls is that InterruptHoldoffCount is
- * unchanged by this operation. This is necessary since InterruptHoldoffCount
- * has been set to an appropriate level earlier in error recovery. We could
- * decrement it below zero if we allow it to drop for each released lock!
- */
-void
-LWLockReleaseAll(void)
-{
- while (num_held_lwlocks > 0)
- {
- HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
-
- LWLockRelease(held_lwlocks[num_held_lwlocks - 1]);
- }
-}
-
-
-/*
- * LWLockHeldByMe - test whether my process currently holds a lock
- *
- * This is meant as debug support only. We do not distinguish whether the
- * lock is held shared or exclusive.
- */
-bool
-LWLockHeldByMe(LWLockId lockid)
-{
- int i;
-
- for (i = 0; i < num_held_lwlocks; i++)
- {
- if (held_lwlocks[i] == lockid)
- return true;
- }
- return false;
-}
#define PredicateLockHashPartition(hashcode) \
((hashcode) % NUM_PREDICATELOCK_PARTITIONS)
#define PredicateLockHashPartitionLock(hashcode) \
- ((LWLockId) (FirstPredicateLockMgrLock + PredicateLockHashPartition(hashcode)))
+ ((FlexLockId) (FirstPredicateLockMgrLock + PredicateLockHashPartition(hashcode)))
#define NPREDICATELOCKTARGETENTS() \
mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
{
PREDICATELOCKTARGETTAG targettag;
uint32 targettaghash;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
PREDICATELOCKTARGET *target;
SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
{
bool found;
- Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
+ Assert(FlexLockHeldByMe(SerializablePredicateLockListLock));
if (!lockheld)
LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
{
bool found;
- Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
+ Assert(FlexLockHeldByMe(SerializablePredicateLockListLock));
if (!lockheld)
LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
{
PREDICATELOCKTARGET *rmtarget;
- Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
+ Assert(FlexLockHeldByMe(SerializablePredicateLockListLock));
/* Can't remove it until no locks at this target. */
if (!SHMQueueEmpty(&target->predicateLocks))
if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
{
uint32 oldtargettaghash;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
PREDICATELOCK *rmpredlock;
oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
PREDICATELOCKTARGET *target;
PREDICATELOCKTAG locktag;
PREDICATELOCK *lock;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
bool found;
partitionLock = PredicateLockHashPartitionLock(targettaghash);
PREDICATELOCK *nextpredlock;
bool found;
- Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
- Assert(LWLockHeldByMe(PredicateLockHashPartitionLock(targettaghash)));
+ Assert(FlexLockHeldByMe(SerializablePredicateLockListLock));
+ Assert(FlexLockHeldByMe(PredicateLockHashPartitionLock(targettaghash)));
predlock = (PREDICATELOCK *)
SHMQueueNext(&(target->predicateLocks),
bool removeOld)
{
uint32 oldtargettaghash;
- LWLockId oldpartitionLock;
+ FlexLockId oldpartitionLock;
PREDICATELOCKTARGET *oldtarget;
uint32 newtargettaghash;
- LWLockId newpartitionLock;
+ FlexLockId newpartitionLock;
bool found;
bool outOfShmem = false;
- Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
+ Assert(FlexLockHeldByMe(SerializablePredicateLockListLock));
oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
newtargettaghash = PredicateLockTargetTagHashCode(&newtargettag);
{
SERIALIZABLEXACT *sxact;
- Assert(LWLockHeldByMe(SerializableXactHashLock));
+ Assert(FlexLockHeldByMe(SerializableXactHashLock));
PredXact->SxactGlobalXmin = InvalidTransactionId;
PredXact->SxactGlobalXminCount = 0;
PREDICATELOCKTARGET *target;
PREDICATELOCKTARGETTAG targettag;
uint32 targettaghash;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
tag = predlock->tag;
target = tag.myTarget;
Assert(sxact != NULL);
Assert(SxactIsRolledBack(sxact) || SxactIsCommitted(sxact));
- Assert(LWLockHeldByMe(SerializableFinishedListLock));
+ Assert(FlexLockHeldByMe(SerializableFinishedListLock));
/*
* First release all the predicate locks held by this xact (or transfer
PREDICATELOCKTARGET *target;
PREDICATELOCKTARGETTAG targettag;
uint32 targettaghash;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
nextpredlock = (PREDICATELOCK *)
SHMQueueNext(&(sxact->predicateLocks),
CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
{
uint32 targettaghash;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
PREDICATELOCKTARGET *target;
PREDICATELOCK *predlock;
PREDICATELOCK *mypredlock = NULL;
bool failure;
RWConflict conflict;
- Assert(LWLockHeldByMe(SerializableXactHashLock));
+ Assert(FlexLockHeldByMe(SerializableXactHashLock));
failure = false;
#include "storage/pmsignal.h"
#include "storage/proc.h"
#include "storage/procarray.h"
+#include "storage/procarraylock.h"
#include "storage/procsignal.h"
#include "storage/spin.h"
#include "utils/timestamp.h"
/* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
if (IsAutoVacuumWorkerProcess())
MyProc->vacuumFlags |= PROC_IS_AUTOVACUUM;
- MyProc->lwWaiting = false;
- MyProc->lwExclusive = false;
- MyProc->lwWaitLink = NULL;
+ MyProc->flWaitResult = 0;
+ MyProc->flWaitMode = 0;
+ MyProc->flWaitLink = NULL;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
#ifdef USE_ASSERT_CHECKING
MyProc->roleId = InvalidOid;
MyProc->inCommit = false;
MyProc->vacuumFlags = 0;
- MyProc->lwWaiting = false;
- MyProc->lwExclusive = false;
- MyProc->lwWaitLink = NULL;
+ MyProc->flWaitMode = 0;
+ MyProc->flWaitResult = 0;
+ MyProc->flWaitLink = NULL;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
#ifdef USE_ASSERT_CHECKING
void
LockWaitCancel(void)
{
- LWLockId partitionLock;
+ FlexLockId partitionLock;
/* Nothing to do if we weren't waiting for a lock */
if (lockAwaited == NULL)
#endif
/*
- * Release any LW locks I am holding. There really shouldn't be any, but
- * it's cheap to check again before we cut the knees off the LWLock
+ * Release any felx locks I am holding. There really shouldn't be any, but
+ * it's cheap to check again before we cut the knees off the flex lock
* facility by releasing our PGPROC ...
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
/* Release ownership of the process's latch, too */
DisownLatch(&MyProc->procLatch);
Assert(MyProc == auxproc);
- /* Release any LW locks I am holding (see notes above) */
- LWLockReleaseAll();
+ /* Release any flex locks I am holding (see notes above) */
+ FlexLockReleaseAll();
/* Release ownership of the process's latch, too */
DisownLatch(&MyProc->procLatch);
LOCK *lock = locallock->lock;
PROCLOCK *proclock = locallock->proclock;
uint32 hashcode = locallock->hashcode;
- LWLockId partitionLock = LockHashPartitionLock(hashcode);
+ FlexLockId partitionLock = LockHashPartitionLock(hashcode);
PROC_QUEUE *waitQueue = &(lock->waitProcs);
LOCKMASK myHeldLocks = MyProc->heldLocks;
bool early_deadlock = false;
{
PGPROC *autovac = GetBlockingAutoVacuumPgproc();
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ ProcArrayLockAcquire(PAL_EXCLUSIVE);
/*
* Only do it if the worker is not working to protect against Xid
pid);
/* don't hold the lock across the kill() syscall */
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
/* send the autovacuum worker Back to Old Kent Road */
if (kill(pid, SIGINT) < 0)
}
}
else
- LWLockRelease(ProcArrayLock);
+ ProcArrayLockRelease();
/* prevent signal from being resent more than once */
allow_autovacuum_cancel = false;
INTENTIONALLY_NOT_INCLUDED="autocommit debug_deadlocks \
is_superuser lc_collate lc_ctype lc_messages lc_monetary lc_numeric lc_time \
pre_auth_delay role seed server_encoding server_version server_version_int \
-session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_lwlocks \
+session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_flexlocks \
trace_notify trace_userlocks transaction_isolation transaction_read_only \
zero_damaged_pages"
#include "replication/walreceiver.h"
#include "replication/walsender.h"
#include "storage/bufmgr.h"
+#include "storage/flexlock_internals.h"
#include "storage/standby.h"
#include "storage/fd.h"
#include "storage/predicate.h"
NULL, NULL, NULL
},
{
- {"trace_lwlocks", PGC_SUSET, DEVELOPER_OPTIONS,
+ {"trace_flexlocks", PGC_SUSET, DEVELOPER_OPTIONS,
gettext_noop("No description available."),
NULL,
GUC_NOT_IN_SAMPLE
},
- &Trace_lwlocks,
+ &Trace_flexlocks,
false,
NULL, NULL, NULL
},
* in probe definitions, as they cause compilation errors on Mac OS X 10.5.
*/
#define LocalTransactionId unsigned int
-#define LWLockId int
-#define LWLockMode int
+#define FlexLockId int
+#define FlexLockMode int
#define LOCKMODE int
#define BlockNumber unsigned int
#define Oid unsigned int
probe transaction__commit(LocalTransactionId);
probe transaction__abort(LocalTransactionId);
- probe lwlock__acquire(LWLockId, LWLockMode);
- probe lwlock__release(LWLockId);
- probe lwlock__wait__start(LWLockId, LWLockMode);
- probe lwlock__wait__done(LWLockId, LWLockMode);
- probe lwlock__condacquire(LWLockId, LWLockMode);
- probe lwlock__condacquire__fail(LWLockId, LWLockMode);
+ probe flexlock__acquire(FlexLockId, FlexLockMode);
+ probe flexlock__release(FlexLockId);
+ probe flexlock__wait__start(FlexLockId, FlexLockMode);
+ probe flexlock__wait__done(FlexLockId, FlexLockMode);
+ probe flexlock__condacquire(FlexLockId, FlexLockMode);
+ probe flexlock__condacquire__fail(FlexLockId, FlexLockMode);
probe lock__wait__start(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE);
probe lock__wait__done(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE);
*/
typedef struct SlruSharedData
{
- LWLockId ControlLock;
+ FlexLockId ControlLock;
/* Number of buffers managed by this SLRU structure */
int num_slots;
bool *page_dirty;
int *page_number;
int *page_lru_count;
- LWLockId *buffer_locks;
+ FlexLockId *buffer_locks;
/*
* Optional array of WAL flush LSNs associated with entries in the SLRU
extern Size SimpleLruShmemSize(int nslots, int nlsns);
extern void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
- LWLockId ctllock, const char *subdir);
+ FlexLockId ctllock, const char *subdir);
extern int SimpleLruZeroPage(SlruCtl ctl, int pageno);
extern int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
TransactionId xid);
#define SEQ_MINVALUE (-SEQ_MAXVALUE)
/*
- * Number of spare LWLocks to allocate for user-defined add-on code.
+ * Number of spare FlexLocks to allocate for user-defined add-on code.
*/
-#define NUM_USER_DEFINED_LWLOCKS 4
+#define NUM_USER_DEFINED_FLEXLOCKS 4
/*
* Define this if you want to allow the lo_import and lo_export SQL
#define BufTableHashPartition(hashcode) \
((hashcode) % NUM_BUFFER_PARTITIONS)
#define BufMappingPartitionLock(hashcode) \
- ((LWLockId) (FirstBufMappingLock + BufTableHashPartition(hashcode)))
+ ((FlexLockId) (FirstBufMappingLock + BufTableHashPartition(hashcode)))
/*
* BufferDesc -- shared descriptor/state data for a single shared buffer.
int buf_id; /* buffer's index number (from 0) */
int freeNext; /* link in freelist chain */
- LWLockId io_in_progress_lock; /* to wait for I/O to complete */
- LWLockId content_lock; /* to lock access to buffer contents */
+ FlexLockId io_in_progress_lock; /* to wait for I/O to complete */
+ FlexLockId content_lock; /* to lock access to buffer contents */
} BufferDesc;
#define BufferDescriptorGetBuffer(bdesc) ((bdesc)->buf_id + 1)
#define LockHashPartition(hashcode) \
((hashcode) % NUM_LOCK_PARTITIONS)
#define LockHashPartitionLock(hashcode) \
- ((LWLockId) (FirstLockMgrLock + LockHashPartition(hashcode)))
+ ((FlexLockId) (FirstLockMgrLock + LockHashPartition(hashcode)))
/*
#ifndef LWLOCK_H
#define LWLOCK_H
-/*
- * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
- * here, but we need them to set up enum LWLockId correctly, and having
- * this file include lock.h or bufmgr.h would be backwards.
- */
-
-/* Number of partitions of the shared buffer mapping hashtable */
-#define NUM_BUFFER_PARTITIONS 16
-
-/* Number of partitions the shared lock tables are divided into */
-#define LOG2_NUM_LOCK_PARTITIONS 4
-#define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS)
-
-/* Number of partitions the shared predicate lock tables are divided into */
-#define LOG2_NUM_PREDICATELOCK_PARTITIONS 4
-#define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
-
-/*
- * We have a number of predefined LWLocks, plus a bunch of LWLocks that are
- * dynamically assigned (e.g., for shared buffers). The LWLock structures
- * live in shared memory (since they contain shared data) and are identified
- * by values of this enumerated type. We abuse the notion of an enum somewhat
- * by allowing values not listed in the enum declaration to be assigned.
- * The extra value MaxDynamicLWLock is there to keep the compiler from
- * deciding that the enum can be represented as char or short ...
- *
- * If you remove a lock, please replace it with a placeholder. This retains
- * the lock numbering, which is helpful for DTrace and other external
- * debugging scripts.
- */
-typedef enum LWLockId
-{
- BufFreelistLock,
- ShmemIndexLock,
- OidGenLock,
- XidGenLock,
- ProcArrayLock,
- SInvalReadLock,
- SInvalWriteLock,
- WALInsertLock,
- WALWriteLock,
- ControlFileLock,
- CheckpointLock,
- CLogControlLock,
- SubtransControlLock,
- MultiXactGenLock,
- MultiXactOffsetControlLock,
- MultiXactMemberControlLock,
- RelCacheInitLock,
- BgWriterCommLock,
- TwoPhaseStateLock,
- TablespaceCreateLock,
- BtreeVacuumLock,
- AddinShmemInitLock,
- AutovacuumLock,
- AutovacuumScheduleLock,
- SyncScanLock,
- RelationMappingLock,
- AsyncCtlLock,
- AsyncQueueLock,
- SerializableXactHashLock,
- SerializableFinishedListLock,
- SerializablePredicateLockListLock,
- OldSerXidLock,
- SyncRepLock,
- /* Individual lock IDs end here */
- FirstBufMappingLock,
- FirstLockMgrLock = FirstBufMappingLock + NUM_BUFFER_PARTITIONS,
- FirstPredicateLockMgrLock = FirstLockMgrLock + NUM_LOCK_PARTITIONS,
-
- /* must be last except for MaxDynamicLWLock: */
- NumFixedLWLocks = FirstPredicateLockMgrLock + NUM_PREDICATELOCK_PARTITIONS,
-
- MaxDynamicLWLock = 1000000000
-} LWLockId;
-
+#include "storage/flexlock.h"
typedef enum LWLockMode
{
LW_SHARED
} LWLockMode;
-
-#ifdef LOCK_DEBUG
-extern bool Trace_lwlocks;
-#endif
-
-extern LWLockId LWLockAssign(void);
-extern void LWLockAcquire(LWLockId lockid, LWLockMode mode);
-extern bool LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode);
-extern void LWLockRelease(LWLockId lockid);
-extern void LWLockReleaseAll(void);
-extern bool LWLockHeldByMe(LWLockId lockid);
-
-extern int NumLWLocks(void);
-extern Size LWLockShmemSize(void);
-extern void CreateLWLocks(void);
-
-extern void RequestAddinLWLocks(int n);
+extern FlexLockId LWLockAssign(void);
+extern void LWLockAcquire(FlexLockId lockid, LWLockMode mode);
+extern bool LWLockConditionalAcquire(FlexLockId lockid, LWLockMode mode);
+extern void LWLockRelease(FlexLockId lockid);
#endif /* LWLOCK_H */
*/
bool recoveryConflictPending;
- /* Info about LWLock the process is currently waiting for, if any. */
- bool lwWaiting; /* true if waiting for an LW lock */
- bool lwExclusive; /* true if waiting for exclusive access */
- struct PGPROC *lwWaitLink; /* next waiter for same LW lock */
+ /* Info about FlexLock the process is currently waiting for, if any. */
+ int flWaitResult; /* result of wait, or 0 if still waiting */
+ int flWaitMode; /* lock mode sought */
+ struct PGPROC *flWaitLink; /* next waiter for same FlexLock */
/* Info about lock the process is currently waiting for, if any. */
/* waitLock and waitProcLock are NULL if not currently waiting. */
struct XidCache subxids; /* cache for subtransaction XIDs */
/* Per-backend LWLock. Protects fields below. */
- LWLockId backendLock; /* protects the fields below */
+ FlexLockId backendLock; /* protects the fields below */
/* Lock manager data, recording fast-path locks taken by this backend. */
uint64 fpLockBits; /* lock modes held for each fast-path slot */