*/
typedef struct pgssSharedState
{
- LWLockId lock; /* protects hashtable search/modification */
+ FlexLockId lock; /* protects hashtable search/modification */
int query_size; /* max query length in bytes */
} pgssSharedState;
* resources in pgss_shmem_startup().
*/
RequestAddinShmemSpace(pgss_memsize());
- RequestAddinLWLocks(1);
+ RequestAddinFlexLocks(1);
/*
* Install hooks.
</varlistentry>
<varlistentry>
- <term><varname>trace_lwlocks</varname> (<type>boolean</type>)</term>
+ <term><varname>trace_flexlocks</varname> (<type>boolean</type>)</term>
<indexterm>
- <primary><varname>trace_lwlocks</> configuration parameter</primary>
+ <primary><varname>trace_flexlocks</> configuration parameter</primary>
</indexterm>
<listitem>
<para>
- If on, emit information about lightweight lock usage. Lightweight
- locks are intended primarily to provide mutual exclusion of access
+ If on, emit information about FlexLock usage. FlexLocks
+ are intended primarily to provide mutual exclusion of access
to shared-memory data structures.
</para>
<para>
or kilobytes of memory used for an internal sort.</entry>
</row>
<row>
- <entry>lwlock-acquire</entry>
- <entry>(LWLockId, LWLockMode)</entry>
- <entry>Probe that fires when an LWLock has been acquired.
- arg0 is the LWLock's ID.
- arg1 is the requested lock mode, either exclusive or shared.</entry>
+ <entry>flexlock-acquire</entry>
+ <entry>(FlexLockId, FlexLockMode)</entry>
+ <entry>Probe that fires when an FlexLock has been acquired.
+ arg0 is the FlexLock's ID.
+ arg1 is the requested lock mode.</entry>
</row>
<row>
- <entry>lwlock-release</entry>
- <entry>(LWLockId)</entry>
- <entry>Probe that fires when an LWLock has been released (but note
+ <entry>flexlock-release</entry>
+ <entry>(FlexLockId)</entry>
+ <entry>Probe that fires when a FlexLock has been released (but note
that any released waiters have not yet been awakened).
- arg0 is the LWLock's ID.</entry>
+ arg0 is the FlexLock's ID.</entry>
</row>
<row>
- <entry>lwlock-wait-start</entry>
- <entry>(LWLockId, LWLockMode)</entry>
- <entry>Probe that fires when an LWLock was not immediately available and
+ <entry>flexlock-wait-start</entry>
+ <entry>(FlexLockId, FlexLockMode)</entry>
+ <entry>Probe that fires when an FlexLock was not immediately available and
a server process has begun to wait for the lock to become available.
- arg0 is the LWLock's ID.
+ arg0 is the FlexLock's ID.
arg1 is the requested lock mode, either exclusive or shared.</entry>
</row>
<row>
- <entry>lwlock-wait-done</entry>
- <entry>(LWLockId, LWLockMode)</entry>
+ <entry>flexlock-wait-done</entry>
+ <entry>(FlexLockId, FlexLockMode)</entry>
<entry>Probe that fires when a server process has been released from its
- wait for an LWLock (it does not actually have the lock yet).
- arg0 is the LWLock's ID.
+ wait for an FlexLock (it does not actually have the lock yet).
+ arg0 is the FlexLock's ID.
arg1 is the requested lock mode, either exclusive or shared.</entry>
</row>
<row>
- <entry>lwlock-condacquire</entry>
- <entry>(LWLockId, LWLockMode)</entry>
- <entry>Probe that fires when an LWLock was successfully acquired when the
- caller specified no waiting.
- arg0 is the LWLock's ID.
+ <entry>flexlock-condacquire</entry>
+ <entry>(FlexLockId, FlexLockMode)</entry>
+ <entry>Probe that fires when an FlexLock was successfully acquired when
+ the caller specified no waiting.
+ arg0 is the FlexLock's ID.
arg1 is the requested lock mode, either exclusive or shared.</entry>
</row>
<row>
- <entry>lwlock-condacquire-fail</entry>
- <entry>(LWLockId, LWLockMode)</entry>
- <entry>Probe that fires when an LWLock was not successfully acquired when
- the caller specified no waiting.
- arg0 is the LWLock's ID.
+ <entry>flexlock-condacquire-fail</entry>
+ <entry>(FlexLockId, FlexLockMode)</entry>
+ <entry>Probe that fires when an FlexLock was not successfully acquired
+ when the caller specified no waiting.
+ arg0 is the FlexLock's ID.
arg1 is the requested lock mode, either exclusive or shared.</entry>
</row>
<row>
<entry>unsigned int</entry>
</row>
<row>
- <entry>LWLockId</entry>
+ <entry>FlexLockId</entry>
<entry>int</entry>
</row>
<row>
- <entry>LWLockMode</entry>
+ <entry>FlexLockMode</entry>
<entry>int</entry>
</row>
<row>
sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
- sz += MAXALIGN(nslots * sizeof(LWLockId)); /* buffer_locks[] */
+ sz += MAXALIGN(nslots * sizeof(FlexLockId)); /* buffer_locks[] */
if (nlsns > 0)
sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
void
SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
- LWLockId ctllock, const char *subdir)
+ FlexLockId ctllock, const char *subdir)
{
SlruShared shared;
bool found;
offset += MAXALIGN(nslots * sizeof(int));
shared->page_lru_count = (int *) (ptr + offset);
offset += MAXALIGN(nslots * sizeof(int));
- shared->buffer_locks = (LWLockId *) (ptr + offset);
- offset += MAXALIGN(nslots * sizeof(LWLockId));
+ shared->buffer_locks = (FlexLockId *) (ptr + offset);
+ offset += MAXALIGN(nslots * sizeof(FlexLockId));
if (nlsns > 0)
{
proc->backendId = InvalidBackendId;
proc->databaseId = databaseid;
proc->roleId = owner;
- proc->lwWaiting = false;
- proc->lwExclusive = false;
- proc->lwWaitLink = NULL;
+ proc->flWaitResult = 0;
+ proc->flWaitMode = 0;
+ proc->flWaitLink = NULL;
proc->waitLock = NULL;
proc->waitProcLock = NULL;
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
* Releasing LW locks is critical since we might try to grab them again
* while cleaning up!
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
/* Clean up buffer I/O and buffer context locks, too */
AbortBufferIO();
* FIXME This may be incorrect --- Are there some locks we should keep?
* Buffer locks, for example? I don't think so but I'm not sure.
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
AbortBufferIO();
UnlockBuffers();
* Begin shutdown of an auxiliary process. This is approximately the equivalent
* of ShutdownPostgres() in postinit.c. We can't run transactions in an
* auxiliary process, so most of the work of AbortTransaction() is not needed,
- * but we do need to make sure we've released any LWLocks we are holding.
+ * but we do need to make sure we've released any flex locks we are holding.
* (This is only critical during an error exit.)
*/
static void
ShutdownAuxiliaryProcess(int code, Datum arg)
{
- LWLockReleaseAll();
+ FlexLockReleaseAll();
}
/* ----------------------------------------------------------------
/*
* These operations are really just a minimal subset of
* AbortTransaction(). We don't have very many resources to worry
- * about in bgwriter, but we do have LWLocks, buffers, and temp files.
+ * about in bgwriter, but we do have flex locks, buffers, and temp
+ * files.
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
AbortBufferIO();
UnlockBuffers();
/* buffer pins are released here: */
/*
* These operations are really just a minimal subset of
* AbortTransaction(). We don't have very many resources to worry
- * about in checkpointer, but we do have LWLocks, buffers, and temp files.
+ * about in checkpointer, but we do have flex locks, buffers, and temp
+ * files.
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
AbortBufferIO();
UnlockBuffers();
/* buffer pins are released here: */
typedef int InheritableSocket;
#endif
-typedef struct LWLock LWLock; /* ugly kluge */
-
/*
* Structure contains all variables passed to exec:ed backends
*/
slock_t *ShmemLock;
VariableCache ShmemVariableCache;
Backend *ShmemBackendArray;
- LWLock *LWLockArray;
+ FlexLock *FlexLockArray;
slock_t *ProcStructLock;
PROC_HDR *ProcGlobal;
PGPROC *AuxiliaryProcs;
* functions
*/
extern slock_t *ShmemLock;
-extern LWLock *LWLockArray;
extern slock_t *ProcStructLock;
extern PGPROC *AuxiliaryProcs;
extern PMSignalData *PMSignalState;
param->ShmemVariableCache = ShmemVariableCache;
param->ShmemBackendArray = ShmemBackendArray;
- param->LWLockArray = LWLockArray;
+ param->FlexLockArray = FlexLockArray;
param->ProcStructLock = ProcStructLock;
param->ProcGlobal = ProcGlobal;
param->AuxiliaryProcs = AuxiliaryProcs;
ShmemVariableCache = param->ShmemVariableCache;
ShmemBackendArray = param->ShmemBackendArray;
- LWLockArray = param->LWLockArray;
+ FlexLockArray = param->FlexLockArray;
ProcStructLock = param->ProcStructLock;
ProcGlobal = param->ProcGlobal;
AuxiliaryProcs = param->AuxiliaryProcs;
/*
* These operations are really just a minimal subset of
* AbortTransaction(). We don't have very many resources to worry
- * about in walwriter, but we do have LWLocks, and perhaps buffers?
+ * about in walwriter, but we do have flex locks, and perhaps buffers?
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
AbortBufferIO();
UnlockBuffers();
/* buffer pins are released here: */
{
BufferTag newTag; /* identity of requested block */
uint32 newHash; /* hash value for newTag */
- LWLockId newPartitionLock; /* buffer partition lock for it */
+ FlexLockId newPartitionLock; /* buffer partition lock for it */
int buf_id;
/* create a tag so we can lookup the buffer */
{
BufferTag newTag; /* identity of requested block */
uint32 newHash; /* hash value for newTag */
- LWLockId newPartitionLock; /* buffer partition lock for it */
+ FlexLockId newPartitionLock; /* buffer partition lock for it */
BufferTag oldTag; /* previous identity of selected buffer */
uint32 oldHash; /* hash value for oldTag */
- LWLockId oldPartitionLock; /* buffer partition lock for it */
+ FlexLockId oldPartitionLock; /* buffer partition lock for it */
BufFlags oldFlags;
int buf_id;
volatile BufferDesc *buf;
{
BufferTag oldTag;
uint32 oldHash; /* hash value for oldTag */
- LWLockId oldPartitionLock; /* buffer partition lock for it */
+ FlexLockId oldPartitionLock; /* buffer partition lock for it */
BufFlags oldFlags;
/* Save the original buffer tag before dropping the spinlock */
size = add_size(size, SUBTRANSShmemSize());
size = add_size(size, TwoPhaseShmemSize());
size = add_size(size, MultiXactShmemSize());
- size = add_size(size, LWLockShmemSize());
+ size = add_size(size, FlexLockShmemSize());
size = add_size(size, ProcArrayShmemSize());
size = add_size(size, BackendStatusShmemSize());
size = add_size(size, SInvalShmemSize());
* needed for InitShmemIndex.
*/
if (!IsUnderPostmaster)
- CreateLWLocks();
+ CreateFlexLocks();
/*
* Set up shmem.c index hashtable
top_builddir = ../../../..
include $(top_builddir)/src/Makefile.global
-OBJS = lmgr.o lock.o proc.o deadlock.o lwlock.o spin.o s_lock.o predicate.o
+OBJS = flexlock.o lmgr.o lock.o proc.o deadlock.o lwlock.o spin.o s_lock.o \
+ predicate.o
include $(top_srcdir)/src/backend/common.mk
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * flexlock.c
+ * Low-level routines for managing flex locks.
+ *
+ * Flex locks are intended primarily to provide mutual exclusion of access
+ * to shared-memory data structures. Most, but not all, flex locks are
+ * lightweight locks (LWLocks). This file contains support routines that
+ * are used for all types of flex locks, including lwlocks. User-level
+ * locking should be done with the full lock manager --- which depends on
+ * LWLocks to protect its shared state.
+ *
+ * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/backend/storage/lmgr/flexlock.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "miscadmin.h"
+#include "pg_trace.h"
+#include "access/clog.h"
+#include "access/multixact.h"
+#include "access/subtrans.h"
+#include "commands/async.h"
+#include "storage/flexlock.h"
+#include "storage/flexlock_internals.h"
+#include "storage/predicate.h"
+#include "storage/spin.h"
+
+/*
+ * We use this structure to keep track of flex locks held, for release
+ * during error recovery. The maximum size could be determined at runtime
+ * if necessary, but it seems unlikely that more than a few locks could
+ * ever be held simultaneously.
+ */
+#define MAX_SIMUL_FLEXLOCKS 100
+
+static int num_held_flexlocks = 0;
+static FlexLockId held_flexlocks[MAX_SIMUL_FLEXLOCKS];
+
+static int lock_addin_request = 0;
+static bool lock_addin_request_allowed = true;
+
+#ifdef LOCK_DEBUG
+bool Trace_flexlocks = false;
+#endif
+
+/*
+ * This points to the array of FlexLocks in shared memory. Backends inherit
+ * the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
+ * where we have special measures to pass it down).
+ */
+FlexLockPadded *FlexLockArray = NULL;
+
+/* We use the ShmemLock spinlock to protect LWLockAssign */
+extern slock_t *ShmemLock;
+
+static void FlexLockInit(FlexLock *flex, char locktype);
+
+/*
+ * Compute number of FlexLocks to allocate.
+ */
+int
+NumFlexLocks(void)
+{
+ int numLocks;
+
+ /*
+ * Possibly this logic should be spread out among the affected modules,
+ * the same way that shmem space estimation is done. But for now, there
+ * are few enough users of FlexLocks that we can get away with just keeping
+ * the knowledge here.
+ */
+
+ /* Predefined FlexLocks */
+ numLocks = (int) NumFixedFlexLocks;
+
+ /* bufmgr.c needs two for each shared buffer */
+ numLocks += 2 * NBuffers;
+
+ /* proc.c needs one for each backend or auxiliary process */
+ numLocks += MaxBackends + NUM_AUXILIARY_PROCS;
+
+ /* clog.c needs one per CLOG buffer */
+ numLocks += NUM_CLOG_BUFFERS;
+
+ /* subtrans.c needs one per SubTrans buffer */
+ numLocks += NUM_SUBTRANS_BUFFERS;
+
+ /* multixact.c needs two SLRU areas */
+ numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS;
+
+ /* async.c needs one per Async buffer */
+ numLocks += NUM_ASYNC_BUFFERS;
+
+ /* predicate.c needs one per old serializable xid buffer */
+ numLocks += NUM_OLDSERXID_BUFFERS;
+
+ /*
+ * Add any requested by loadable modules; for backwards-compatibility
+ * reasons, allocate at least NUM_USER_DEFINED_FLEXLOCKS of them even if
+ * there are no explicit requests.
+ */
+ lock_addin_request_allowed = false;
+ numLocks += Max(lock_addin_request, NUM_USER_DEFINED_FLEXLOCKS);
+
+ return numLocks;
+}
+
+
+/*
+ * RequestAddinFlexLocks
+ * Request that extra FlexLocks be allocated for use by
+ * a loadable module.
+ *
+ * This is only useful if called from the _PG_init hook of a library that
+ * is loaded into the postmaster via shared_preload_libraries. Once
+ * shared memory has been allocated, calls will be ignored. (We could
+ * raise an error, but it seems better to make it a no-op, so that
+ * libraries containing such calls can be reloaded if needed.)
+ */
+void
+RequestAddinFlexLocks(int n)
+{
+ if (IsUnderPostmaster || !lock_addin_request_allowed)
+ return; /* too late */
+ lock_addin_request += n;
+}
+
+
+/*
+ * Compute shmem space needed for FlexLocks.
+ */
+Size
+FlexLockShmemSize(void)
+{
+ Size size;
+ int numLocks = NumFlexLocks();
+
+ /* Space for the FlexLock array. */
+ size = mul_size(numLocks, FLEX_LOCK_BYTES);
+
+ /* Space for dynamic allocation counter, plus room for alignment. */
+ size = add_size(size, 2 * sizeof(int) + FLEX_LOCK_BYTES);
+
+ return size;
+}
+
+/*
+ * Allocate shmem space for FlexLocks and initialize the locks.
+ */
+void
+CreateFlexLocks(void)
+{
+ int numLocks = NumFlexLocks();
+ Size spaceLocks = FlexLockShmemSize();
+ FlexLockPadded *lock;
+ int *FlexLockCounter;
+ char *ptr;
+ int id;
+
+ /* Allocate and zero space */
+ ptr = (char *) ShmemAlloc(spaceLocks);
+ memset(ptr, 0, spaceLocks);
+
+ /* Leave room for dynamic allocation counter */
+ ptr += 2 * sizeof(int);
+
+ /* Ensure desired alignment of FlexLock array */
+ ptr += FLEX_LOCK_BYTES - ((uintptr_t) ptr) % FLEX_LOCK_BYTES;
+
+ FlexLockArray = (FlexLockPadded *) ptr;
+
+ /* All of the "fixed" FlexLocks are LWLocks. */
+ for (id = 0, lock = FlexLockArray; id < NumFixedFlexLocks; id++, lock++)
+ FlexLockInit(&lock->flex, FLEXLOCK_TYPE_LWLOCK);
+
+ /*
+ * Initialize the dynamic-allocation counter, which is stored just before
+ * the first FlexLock.
+ */
+ FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int));
+ FlexLockCounter[0] = (int) NumFixedFlexLocks;
+ FlexLockCounter[1] = numLocks;
+}
+
+/*
+ * FlexLockAssign - assign a dynamically-allocated FlexLock number
+ *
+ * We interlock this using the same spinlock that is used to protect
+ * ShmemAlloc(). Interlocking is not really necessary during postmaster
+ * startup, but it is needed if any user-defined code tries to allocate
+ * LWLocks after startup.
+ */
+FlexLockId
+FlexLockAssign(char locktype)
+{
+ FlexLockId result;
+
+ /* use volatile pointer to prevent code rearrangement */
+ volatile int *FlexLockCounter;
+
+ FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int));
+ SpinLockAcquire(ShmemLock);
+ if (FlexLockCounter[0] >= FlexLockCounter[1])
+ {
+ SpinLockRelease(ShmemLock);
+ elog(ERROR, "no more FlexLockIds available");
+ }
+ result = (FlexLockId) (FlexLockCounter[0]++);
+ SpinLockRelease(ShmemLock);
+
+ FlexLockInit(&FlexLockArray[result].flex, locktype);
+
+ return result;
+}
+
+/*
+ * Initialize a FlexLock.
+ */
+static void
+FlexLockInit(FlexLock *flex, char locktype)
+{
+ SpinLockInit(&flex->mutex);
+ flex->releaseOK = true;
+ flex->locktype = locktype;
+ /*
+ * We might need to think a little harder about what should happen here
+ * if some future type of FlexLock requires more initialization than this.
+ * For now, this will suffice.
+ */
+}
+
+/*
+ * Add lock to list of locks held by this backend.
+ */
+void
+FlexLockRemember(FlexLockId id)
+{
+ if (num_held_flexlocks >= MAX_SIMUL_FLEXLOCKS)
+ elog(PANIC, "too many FlexLocks taken");
+ held_flexlocks[num_held_flexlocks++] = id;
+}
+
+/*
+ * Remove lock from list of locks held. Usually, but not always, it will
+ * be the latest-acquired lock; so search array backwards.
+ */
+void
+FlexLockForget(FlexLockId id)
+{
+ int i;
+
+ for (i = num_held_flexlocks; --i >= 0;)
+ {
+ if (id == held_flexlocks[i])
+ break;
+ }
+ if (i < 0)
+ elog(ERROR, "lock %d is not held", (int) id);
+ num_held_flexlocks--;
+ for (; i < num_held_flexlocks; i++)
+ held_flexlocks[i] = held_flexlocks[i + 1];
+}
+
+/*
+ * FlexLockWait - wait until awakened
+ *
+ * Since we share the process wait semaphore with the regular lock manager
+ * and ProcWaitForSignal, and we may need to acquire a FlexLock while one of
+ * those is pending, it is possible that we get awakened for a reason other
+ * than being signaled by a FlexLock release. If so, loop back and wait again.
+ *
+ * Returns the number of "extra" waits absorbed so that, once we've gotten the
+ * FlexLock, we can re-increment the sema by the number of additional signals
+ * received, so that the lock manager or signal manager will see the received
+ * signal when it next waits.
+ */
+int
+FlexLockWait(FlexLockId id, int mode)
+{
+ int extraWaits = 0;
+
+ FlexLockDebug("LWLockAcquire", id, "waiting");
+ TRACE_POSTGRESQL_FLEXLOCK_WAIT_START(id, mode);
+
+ for (;;)
+ {
+ /* "false" means cannot accept cancel/die interrupt here. */
+ PGSemaphoreLock(&MyProc->sem, false);
+ /*
+ * FLEXTODO: I think we should return this, instead of ignoring it.
+ * Any non-zero value means "wake up".
+ */
+ if (MyProc->flWaitResult)
+ break;
+ extraWaits++;
+ }
+
+ TRACE_POSTGRESQL_FLEXLOCK_WAIT_DONE(id, mode);
+ FlexLockDebug("LWLockAcquire", id, "awakened");
+
+ return extraWaits;
+}
+
+/*
+ * FlexLockReleaseAll - release all currently-held locks
+ *
+ * Used to clean up after ereport(ERROR). An important difference between this
+ * function and retail LWLockRelease calls is that InterruptHoldoffCount is
+ * unchanged by this operation. This is necessary since InterruptHoldoffCount
+ * has been set to an appropriate level earlier in error recovery. We could
+ * decrement it below zero if we allow it to drop for each released lock!
+ */
+void
+FlexLockReleaseAll(void)
+{
+ while (num_held_flexlocks > 0)
+ {
+ HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
+
+ /*
+ * FLEXTODO: When we have multiple types of flex locks, this will
+ * need to call the appropriate release function for each lock type.
+ */
+ LWLockRelease(held_flexlocks[num_held_flexlocks - 1]);
+ }
+}
+
+/*
+ * FlexLockHeldByMe - test whether my process currently holds a lock
+ *
+ * This is meant as debug support only. We do not consider the lock mode.
+ */
+bool
+FlexLockHeldByMe(FlexLockId id)
+{
+ int i;
+
+ for (i = 0; i < num_held_flexlocks; i++)
+ {
+ if (held_flexlocks[i] == id)
+ return true;
+ }
+ return false;
+}
bool found;
ResourceOwner owner;
uint32 hashcode;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
int status;
bool log_lock = false;
LOCALLOCK *locallock;
LOCK *lock;
PROCLOCK *proclock;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
bool wakeupNeeded;
if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
*/
for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
{
- LWLockId partitionLock = FirstLockMgrLock + partition;
+ FlexLockId partitionLock = FirstLockMgrLock + partition;
SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
uint32 hashcode)
{
- LWLockId partitionLock = LockHashPartitionLock(hashcode);
+ FlexLockId partitionLock = LockHashPartitionLock(hashcode);
Oid relid = locktag->locktag_field2;
uint32 i;
LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
LOCKTAG *locktag = &locallock->tag.lock;
PROCLOCK *proclock = NULL;
- LWLockId partitionLock = LockHashPartitionLock(locallock->hashcode);
+ FlexLockId partitionLock = LockHashPartitionLock(locallock->hashcode);
Oid relid = locktag->locktag_field2;
uint32 f;
SHM_QUEUE *procLocks;
PROCLOCK *proclock;
uint32 hashcode;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
int count = 0;
int fast_count = 0;
PROCLOCKTAG proclocktag;
uint32 hashcode;
uint32 proclock_hashcode;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
bool wakeupNeeded;
hashcode = LockTagHashCode(locktag);
*/
for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
{
- LWLockId partitionLock = FirstLockMgrLock + partition;
+ FlexLockId partitionLock = FirstLockMgrLock + partition;
SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
uint32 hashcode;
uint32 proclock_hashcode;
int partition;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
LockMethod lockMethodTable;
Assert(len == sizeof(TwoPhaseLockRecord));
*/
#include "postgres.h"
-#include "access/clog.h"
-#include "access/multixact.h"
-#include "access/subtrans.h"
-#include "commands/async.h"
#include "miscadmin.h"
#include "pg_trace.h"
+#include "storage/flexlock_internals.h"
#include "storage/ipc.h"
-#include "storage/predicate.h"
#include "storage/proc.h"
#include "storage/spin.h"
-
-/* We use the ShmemLock spinlock to protect LWLockAssign */
-extern slock_t *ShmemLock;
-
-
typedef struct LWLock
{
- slock_t mutex; /* Protects LWLock and queue of PGPROCs */
- bool releaseOK; /* T if ok to release waiters */
+ FlexLock flex; /* common FlexLock infrastructure */
char exclusive; /* # of exclusive holders (0 or 1) */
int shared; /* # of shared holders (0..MaxBackends) */
- PGPROC *head; /* head of list of waiting PGPROCs */
- PGPROC *tail; /* tail of list of waiting PGPROCs */
- /* tail is undefined when head is NULL */
} LWLock;
-/*
- * All the LWLock structs are allocated as an array in shared memory.
- * (LWLockIds are indexes into the array.) We force the array stride to
- * be a power of 2, which saves a few cycles in indexing, but more
- * importantly also ensures that individual LWLocks don't cross cache line
- * boundaries. This reduces cache contention problems, especially on AMD
- * Opterons. (Of course, we have to also ensure that the array start
- * address is suitably aligned.)
- *
- * LWLock is between 16 and 32 bytes on all known platforms, so these two
- * cases are sufficient.
- */
-#define LWLOCK_PADDED_SIZE (sizeof(LWLock) <= 16 ? 16 : 32)
-
-typedef union LWLockPadded
-{
- LWLock lock;
- char pad[LWLOCK_PADDED_SIZE];
-} LWLockPadded;
-
-/*
- * This points to the array of LWLocks in shared memory. Backends inherit
- * the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
- * where we have special measures to pass it down).
- */
-NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL;
-
-
-/*
- * We use this structure to keep track of locked LWLocks for release
- * during error recovery. The maximum size could be determined at runtime
- * if necessary, but it seems unlikely that more than a few locks could
- * ever be held simultaneously.
- */
-#define MAX_SIMUL_LWLOCKS 100
-
-static int num_held_lwlocks = 0;
-static LWLockId held_lwlocks[MAX_SIMUL_LWLOCKS];
-
-static int lock_addin_request = 0;
-static bool lock_addin_request_allowed = true;
+#define LWLockPointer(lockid) \
+ (AssertMacro(FlexLockArray[lockid].flex.locktype == FLEXLOCK_TYPE_LWLOCK), \
+ (volatile LWLock *) &FlexLockArray[lockid])
#ifdef LWLOCK_STATS
static int counts_for_pid = 0;
#endif
#ifdef LOCK_DEBUG
-bool Trace_lwlocks = false;
-
inline static void
-PRINT_LWDEBUG(const char *where, LWLockId lockid, const volatile LWLock *lock)
+PRINT_LWDEBUG(const char *where, FlexLockId lockid, const volatile LWLock *lock)
{
- if (Trace_lwlocks)
+ if (Trace_flexlocks)
elog(LOG, "%s(%d): excl %d shared %d head %p rOK %d",
where, (int) lockid,
- (int) lock->exclusive, lock->shared, lock->head,
- (int) lock->releaseOK);
-}
-
-inline static void
-LOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg)
-{
- if (Trace_lwlocks)
- elog(LOG, "%s(%d): %s", where, (int) lockid, msg);
+ (int) lock->exclusive, lock->shared, lock->flex.head,
+ (int) lock->flex.releaseOK);
}
#else /* not LOCK_DEBUG */
#define PRINT_LWDEBUG(a,b,c)
-#define LOG_LWDEBUG(a,b,c)
#endif /* LOCK_DEBUG */
#ifdef LWLOCK_STATS
print_lwlock_stats(int code, Datum arg)
{
int i;
- int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
- int numLocks = LWLockCounter[1];
+ int *FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int));
+ int numLocks = FlexLockCounter[1];
/* Grab an LWLock to keep different backends from mixing reports */
LWLockAcquire(0, LW_EXCLUSIVE);
}
#endif /* LWLOCK_STATS */
-
/*
- * Compute number of LWLocks to allocate.
+ * LWLockAssign - initialize a new lwlock and return its ID
*/
-int
-NumLWLocks(void)
-{
- int numLocks;
-
- /*
- * Possibly this logic should be spread out among the affected modules,
- * the same way that shmem space estimation is done. But for now, there
- * are few enough users of LWLocks that we can get away with just keeping
- * the knowledge here.
- */
-
- /* Predefined LWLocks */
- numLocks = (int) NumFixedLWLocks;
-
- /* bufmgr.c needs two for each shared buffer */
- numLocks += 2 * NBuffers;
-
- /* proc.c needs one for each backend or auxiliary process */
- numLocks += MaxBackends + NUM_AUXILIARY_PROCS;
-
- /* clog.c needs one per CLOG buffer */
- numLocks += NUM_CLOG_BUFFERS;
-
- /* subtrans.c needs one per SubTrans buffer */
- numLocks += NUM_SUBTRANS_BUFFERS;
-
- /* multixact.c needs two SLRU areas */
- numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS;
-
- /* async.c needs one per Async buffer */
- numLocks += NUM_ASYNC_BUFFERS;
-
- /* predicate.c needs one per old serializable xid buffer */
- numLocks += NUM_OLDSERXID_BUFFERS;
-
- /*
- * Add any requested by loadable modules; for backwards-compatibility
- * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if
- * there are no explicit requests.
- */
- lock_addin_request_allowed = false;
- numLocks += Max(lock_addin_request, NUM_USER_DEFINED_LWLOCKS);
-
- return numLocks;
-}
-
-
-/*
- * RequestAddinLWLocks
- * Request that extra LWLocks be allocated for use by
- * a loadable module.
- *
- * This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
- * shared memory has been allocated, calls will be ignored. (We could
- * raise an error, but it seems better to make it a no-op, so that
- * libraries containing such calls can be reloaded if needed.)
- */
-void
-RequestAddinLWLocks(int n)
-{
- if (IsUnderPostmaster || !lock_addin_request_allowed)
- return; /* too late */
- lock_addin_request += n;
-}
-
-
-/*
- * Compute shmem space needed for LWLocks.
- */
-Size
-LWLockShmemSize(void)
-{
- Size size;
- int numLocks = NumLWLocks();
-
- /* Space for the LWLock array. */
- size = mul_size(numLocks, sizeof(LWLockPadded));
-
- /* Space for dynamic allocation counter, plus room for alignment. */
- size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);
-
- return size;
-}
-
-
-/*
- * Allocate shmem space for LWLocks and initialize the locks.
- */
-void
-CreateLWLocks(void)
-{
- int numLocks = NumLWLocks();
- Size spaceLocks = LWLockShmemSize();
- LWLockPadded *lock;
- int *LWLockCounter;
- char *ptr;
- int id;
-
- /* Allocate space */
- ptr = (char *) ShmemAlloc(spaceLocks);
-
- /* Leave room for dynamic allocation counter */
- ptr += 2 * sizeof(int);
-
- /* Ensure desired alignment of LWLock array */
- ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;
-
- LWLockArray = (LWLockPadded *) ptr;
-
- /*
- * Initialize all LWLocks to "unlocked" state
- */
- for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)
- {
- SpinLockInit(&lock->lock.mutex);
- lock->lock.releaseOK = true;
- lock->lock.exclusive = 0;
- lock->lock.shared = 0;
- lock->lock.head = NULL;
- lock->lock.tail = NULL;
- }
-
- /*
- * Initialize the dynamic-allocation counter, which is stored just before
- * the first LWLock.
- */
- LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
- LWLockCounter[0] = (int) NumFixedLWLocks;
- LWLockCounter[1] = numLocks;
-}
-
-
-/*
- * LWLockAssign - assign a dynamically-allocated LWLock number
- *
- * We interlock this using the same spinlock that is used to protect
- * ShmemAlloc(). Interlocking is not really necessary during postmaster
- * startup, but it is needed if any user-defined code tries to allocate
- * LWLocks after startup.
- */
-LWLockId
+FlexLockId
LWLockAssign(void)
{
- LWLockId result;
-
- /* use volatile pointer to prevent code rearrangement */
- volatile int *LWLockCounter;
-
- LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
- SpinLockAcquire(ShmemLock);
- if (LWLockCounter[0] >= LWLockCounter[1])
- {
- SpinLockRelease(ShmemLock);
- elog(ERROR, "no more LWLockIds available");
- }
- result = (LWLockId) (LWLockCounter[0]++);
- SpinLockRelease(ShmemLock);
- return result;
+ return FlexLockAssign(FLEXLOCK_TYPE_LWLOCK);
}
-
/*
* LWLockAcquire - acquire a lightweight lock in the specified mode
*
* Side effect: cancel/die interrupts are held off until lock release.
*/
void
-LWLockAcquire(LWLockId lockid, LWLockMode mode)
+LWLockAcquire(FlexLockId lockid, LWLockMode mode)
{
- volatile LWLock *lock = &(LWLockArray[lockid].lock);
+ volatile LWLock *lock = LWLockPointer(lockid);
PGPROC *proc = MyProc;
bool retry = false;
int extraWaits = 0;
/* Set up local count state first time through in a given process */
if (counts_for_pid != MyProcPid)
{
- int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
- int numLocks = LWLockCounter[1];
+ int *FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int));
+ int numLocks = FlexLockCounter[1];
sh_acquire_counts = calloc(numLocks, sizeof(int));
ex_acquire_counts = calloc(numLocks, sizeof(int));
*/
Assert(!(proc == NULL && IsUnderPostmaster));
- /* Ensure we will have room to remember the lock */
- if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
- elog(ERROR, "too many LWLocks taken");
-
/*
* Lock out cancel/die interrupts until we exit the code section protected
* by the LWLock. This ensures that interrupts will not interfere with
bool mustwait;
/* Acquire mutex. Time spent holding mutex should be short! */
- SpinLockAcquire(&lock->mutex);
+ SpinLockAcquire(&lock->flex.mutex);
/* If retrying, allow LWLockRelease to release waiters again */
if (retry)
- lock->releaseOK = true;
+ lock->flex.releaseOK = true;
/* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE)
if (!mustwait)
break; /* got the lock */
- /*
- * Add myself to wait queue.
- *
- * If we don't have a PGPROC structure, there's no way to wait. This
- * should never occur, since MyProc should only be null during shared
- * memory initialization.
- */
- if (proc == NULL)
- elog(PANIC, "cannot wait without a PGPROC structure");
-
- proc->lwWaiting = true;
- proc->lwExclusive = (mode == LW_EXCLUSIVE);
- proc->lwWaitLink = NULL;
- if (lock->head == NULL)
- lock->head = proc;
- else
- lock->tail->lwWaitLink = proc;
- lock->tail = proc;
+ /* Add myself to wait queue. */
+ FlexLockJoinWaitQueue(lock, (int) mode);
/* Can release the mutex now */
- SpinLockRelease(&lock->mutex);
-
- /*
- * Wait until awakened.
- *
- * Since we share the process wait semaphore with the regular lock
- * manager and ProcWaitForSignal, and we may need to acquire an LWLock
- * while one of those is pending, it is possible that we get awakened
- * for a reason other than being signaled by LWLockRelease. If so,
- * loop back and wait again. Once we've gotten the LWLock,
- * re-increment the sema by the number of additional signals received,
- * so that the lock manager or signal manager will see the received
- * signal when it next waits.
- */
- LOG_LWDEBUG("LWLockAcquire", lockid, "waiting");
+ SpinLockRelease(&lock->flex.mutex);
+
+ /* Wait until awakened. */
+ extraWaits += FlexLockWait(lockid, mode);
#ifdef LWLOCK_STATS
block_counts[lockid]++;
#endif
- TRACE_POSTGRESQL_LWLOCK_WAIT_START(lockid, mode);
-
- for (;;)
- {
- /* "false" means cannot accept cancel/die interrupt here. */
- PGSemaphoreLock(&proc->sem, false);
- if (!proc->lwWaiting)
- break;
- extraWaits++;
- }
-
- TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(lockid, mode);
-
- LOG_LWDEBUG("LWLockAcquire", lockid, "awakened");
-
/* Now loop back and try to acquire lock again. */
retry = true;
}
/* We are done updating shared state of the lock itself. */
- SpinLockRelease(&lock->mutex);
+ SpinLockRelease(&lock->flex.mutex);
- TRACE_POSTGRESQL_LWLOCK_ACQUIRE(lockid, mode);
+ TRACE_POSTGRESQL_FLEXLOCK_ACQUIRE(lockid, mode);
/* Add lock to list of locks held by this backend */
- held_lwlocks[num_held_lwlocks++] = lockid;
+ FlexLockRemember(lockid);
/*
* Fix the process wait semaphore's count for any absorbed wakeups.
* If successful, cancel/die interrupts are held off until lock release.
*/
bool
-LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
+LWLockConditionalAcquire(FlexLockId lockid, LWLockMode mode)
{
- volatile LWLock *lock = &(LWLockArray[lockid].lock);
+ volatile LWLock *lock = LWLockPointer(lockid);
bool mustwait;
PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);
- /* Ensure we will have room to remember the lock */
- if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
- elog(ERROR, "too many LWLocks taken");
-
/*
* Lock out cancel/die interrupts until we exit the code section protected
* by the LWLock. This ensures that interrupts will not interfere with
HOLD_INTERRUPTS();
/* Acquire mutex. Time spent holding mutex should be short! */
- SpinLockAcquire(&lock->mutex);
+ SpinLockAcquire(&lock->flex.mutex);
/* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE)
}
/* We are done updating shared state of the lock itself. */
- SpinLockRelease(&lock->mutex);
+ SpinLockRelease(&lock->flex.mutex);
if (mustwait)
{
/* Failed to get lock, so release interrupt holdoff */
RESUME_INTERRUPTS();
- LOG_LWDEBUG("LWLockConditionalAcquire", lockid, "failed");
- TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(lockid, mode);
+ FlexLockDebug("LWLockConditionalAcquire", lockid, "failed");
+ TRACE_POSTGRESQL_FLEXLOCK_CONDACQUIRE_FAIL(lockid, mode);
}
else
{
/* Add lock to list of locks held by this backend */
- held_lwlocks[num_held_lwlocks++] = lockid;
- TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(lockid, mode);
+ FlexLockRemember(lockid);
+ TRACE_POSTGRESQL_FLEXLOCK_CONDACQUIRE(lockid, mode);
}
return !mustwait;
* LWLockRelease - release a previously acquired lock
*/
void
-LWLockRelease(LWLockId lockid)
+LWLockRelease(FlexLockId lockid)
{
- volatile LWLock *lock = &(LWLockArray[lockid].lock);
+ volatile LWLock *lock = LWLockPointer(lockid);
PGPROC *head;
PGPROC *proc;
- int i;
PRINT_LWDEBUG("LWLockRelease", lockid, lock);
- /*
- * Remove lock from list of locks held. Usually, but not always, it will
- * be the latest-acquired lock; so search array backwards.
- */
- for (i = num_held_lwlocks; --i >= 0;)
- {
- if (lockid == held_lwlocks[i])
- break;
- }
- if (i < 0)
- elog(ERROR, "lock %d is not held", (int) lockid);
- num_held_lwlocks--;
- for (; i < num_held_lwlocks; i++)
- held_lwlocks[i] = held_lwlocks[i + 1];
+ FlexLockForget(lockid);
/* Acquire mutex. Time spent holding mutex should be short! */
- SpinLockAcquire(&lock->mutex);
+ SpinLockAcquire(&lock->flex.mutex);
/* Release my hold on lock */
if (lock->exclusive > 0)
* if someone has already awakened waiters that haven't yet acquired the
* lock.
*/
- head = lock->head;
+ head = lock->flex.head;
if (head != NULL)
{
- if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
+ if (lock->exclusive == 0 && lock->shared == 0 && lock->flex.releaseOK)
{
/*
* Remove the to-be-awakened PGPROCs from the queue. If the front
* as many waiters as want shared access.
*/
proc = head;
- if (!proc->lwExclusive)
+ if (proc->flWaitMode != LW_EXCLUSIVE)
{
- while (proc->lwWaitLink != NULL &&
- !proc->lwWaitLink->lwExclusive)
- proc = proc->lwWaitLink;
+ while (proc->flWaitLink != NULL &&
+ proc->flWaitLink->flWaitMode != LW_EXCLUSIVE)
+ proc = proc->flWaitLink;
}
/* proc is now the last PGPROC to be released */
- lock->head = proc->lwWaitLink;
- proc->lwWaitLink = NULL;
+ lock->flex.head = proc->flWaitLink;
+ proc->flWaitLink = NULL;
/* prevent additional wakeups until retryer gets to run */
- lock->releaseOK = false;
+ lock->flex.releaseOK = false;
}
else
{
}
/* We are done updating shared state of the lock itself. */
- SpinLockRelease(&lock->mutex);
+ SpinLockRelease(&lock->flex.mutex);
- TRACE_POSTGRESQL_LWLOCK_RELEASE(lockid);
+ TRACE_POSTGRESQL_FLEXLOCK_RELEASE(lockid);
/*
* Awaken any waiters I removed from the queue.
*/
while (head != NULL)
{
- LOG_LWDEBUG("LWLockRelease", lockid, "release waiter");
+ FlexLockDebug("LWLockRelease", lockid, "release waiter");
proc = head;
- head = proc->lwWaitLink;
- proc->lwWaitLink = NULL;
- proc->lwWaiting = false;
+ head = proc->flWaitLink;
+ proc->flWaitLink = NULL;
+ proc->flWaitResult = 1; /* any non-zero value will do */
PGSemaphoreUnlock(&proc->sem);
}
RESUME_INTERRUPTS();
}
-
-/*
- * LWLockReleaseAll - release all currently-held locks
- *
- * Used to clean up after ereport(ERROR). An important difference between this
- * function and retail LWLockRelease calls is that InterruptHoldoffCount is
- * unchanged by this operation. This is necessary since InterruptHoldoffCount
- * has been set to an appropriate level earlier in error recovery. We could
- * decrement it below zero if we allow it to drop for each released lock!
- */
-void
-LWLockReleaseAll(void)
-{
- while (num_held_lwlocks > 0)
- {
- HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
-
- LWLockRelease(held_lwlocks[num_held_lwlocks - 1]);
- }
-}
-
-
/*
* LWLockHeldByMe - test whether my process currently holds a lock
*
- * This is meant as debug support only. We do not distinguish whether the
- * lock is held shared or exclusive.
+ * The following convenience routine might not be worthwhile but for the fact
+ * that we've had a function by this name since long before FlexLocks existed.
+ * Callers who want to check whether an arbitrary FlexLock (that may or may not
+ * be an LWLock) is held can use FlexLockHeldByMe directly.
*/
bool
-LWLockHeldByMe(LWLockId lockid)
+LWLockHeldByMe(FlexLockId lockid)
{
- int i;
-
- for (i = 0; i < num_held_lwlocks; i++)
- {
- if (held_lwlocks[i] == lockid)
- return true;
- }
- return false;
+ AssertMacro(FlexLockArray[lockid].flex.locktype == FLEXLOCK_TYPE_LWLOCK);
+ return FlexLockHeldByMe(lockid);
}
#define PredicateLockHashPartition(hashcode) \
((hashcode) % NUM_PREDICATELOCK_PARTITIONS)
#define PredicateLockHashPartitionLock(hashcode) \
- ((LWLockId) (FirstPredicateLockMgrLock + PredicateLockHashPartition(hashcode)))
+ ((FlexLockId) (FirstPredicateLockMgrLock + PredicateLockHashPartition(hashcode)))
#define NPREDICATELOCKTARGETENTS() \
mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
{
PREDICATELOCKTARGETTAG targettag;
uint32 targettaghash;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
PREDICATELOCKTARGET *target;
SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
{
uint32 oldtargettaghash;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
PREDICATELOCK *rmpredlock;
oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
PREDICATELOCKTARGET *target;
PREDICATELOCKTAG locktag;
PREDICATELOCK *lock;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
bool found;
partitionLock = PredicateLockHashPartitionLock(targettaghash);
bool removeOld)
{
uint32 oldtargettaghash;
- LWLockId oldpartitionLock;
+ FlexLockId oldpartitionLock;
PREDICATELOCKTARGET *oldtarget;
uint32 newtargettaghash;
- LWLockId newpartitionLock;
+ FlexLockId newpartitionLock;
bool found;
bool outOfShmem = false;
PREDICATELOCKTARGET *target;
PREDICATELOCKTARGETTAG targettag;
uint32 targettaghash;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
tag = predlock->tag;
target = tag.myTarget;
PREDICATELOCKTARGET *target;
PREDICATELOCKTARGETTAG targettag;
uint32 targettaghash;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
nextpredlock = (PREDICATELOCK *)
SHMQueueNext(&(sxact->predicateLocks),
CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
{
uint32 targettaghash;
- LWLockId partitionLock;
+ FlexLockId partitionLock;
PREDICATELOCKTARGET *target;
PREDICATELOCK *predlock;
PREDICATELOCK *mypredlock = NULL;
/* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
if (IsAutoVacuumWorkerProcess())
MyPgXact->vacuumFlags |= PROC_IS_AUTOVACUUM;
- MyProc->lwWaiting = false;
- MyProc->lwExclusive = false;
- MyProc->lwWaitLink = NULL;
+ MyProc->flWaitResult = 0;
+ MyProc->flWaitMode = 0;
+ MyProc->flWaitLink = NULL;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
#ifdef USE_ASSERT_CHECKING
MyProc->roleId = InvalidOid;
MyPgXact->inCommit = false;
MyPgXact->vacuumFlags = 0;
- MyProc->lwWaiting = false;
- MyProc->lwExclusive = false;
- MyProc->lwWaitLink = NULL;
+ MyProc->flWaitMode = 0;
+ MyProc->flWaitResult = 0;
+ MyProc->flWaitLink = NULL;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
#ifdef USE_ASSERT_CHECKING
void
LockWaitCancel(void)
{
- LWLockId partitionLock;
+ FlexLockId partitionLock;
/* Nothing to do if we weren't waiting for a lock */
if (lockAwaited == NULL)
#endif
/*
- * Release any LW locks I am holding. There really shouldn't be any, but
- * it's cheap to check again before we cut the knees off the LWLock
+ * Release any flex locks I am holding. There really shouldn't be any, but
+ * it's cheap to check again before we cut the knees off the flex lock
* facility by releasing our PGPROC ...
*/
- LWLockReleaseAll();
+ FlexLockReleaseAll();
/* Release ownership of the process's latch, too */
DisownLatch(&MyProc->procLatch);
Assert(MyProc == auxproc);
- /* Release any LW locks I am holding (see notes above) */
- LWLockReleaseAll();
+ /* Release any flex locks I am holding (see notes above) */
+ FlexLockReleaseAll();
/* Release ownership of the process's latch, too */
DisownLatch(&MyProc->procLatch);
LOCK *lock = locallock->lock;
PROCLOCK *proclock = locallock->proclock;
uint32 hashcode = locallock->hashcode;
- LWLockId partitionLock = LockHashPartitionLock(hashcode);
+ FlexLockId partitionLock = LockHashPartitionLock(hashcode);
PROC_QUEUE *waitQueue = &(lock->waitProcs);
LOCKMASK myHeldLocks = MyProc->heldLocks;
bool early_deadlock = false;
INTENTIONALLY_NOT_INCLUDED="autocommit debug_deadlocks \
is_superuser lc_collate lc_ctype lc_messages lc_monetary lc_numeric lc_time \
pre_auth_delay role seed server_encoding server_version server_version_int \
-session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_lwlocks \
+session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_flexlocks \
trace_notify trace_userlocks transaction_isolation transaction_read_only \
zero_damaged_pages"
#include "replication/walreceiver.h"
#include "replication/walsender.h"
#include "storage/bufmgr.h"
+#include "storage/flexlock_internals.h"
#include "storage/standby.h"
#include "storage/fd.h"
#include "storage/predicate.h"
NULL, NULL, NULL
},
{
- {"trace_lwlocks", PGC_SUSET, DEVELOPER_OPTIONS,
+ {"trace_flexlocks", PGC_SUSET, DEVELOPER_OPTIONS,
gettext_noop("No description available."),
NULL,
GUC_NOT_IN_SAMPLE
},
- &Trace_lwlocks,
+ &Trace_flexlocks,
false,
NULL, NULL, NULL
},
* in probe definitions, as they cause compilation errors on Mac OS X 10.5.
*/
#define LocalTransactionId unsigned int
-#define LWLockId int
-#define LWLockMode int
+#define FlexLockId int
+#define FlexLockMode int
#define LOCKMODE int
#define BlockNumber unsigned int
#define Oid unsigned int
probe transaction__commit(LocalTransactionId);
probe transaction__abort(LocalTransactionId);
- probe lwlock__acquire(LWLockId, LWLockMode);
- probe lwlock__release(LWLockId);
- probe lwlock__wait__start(LWLockId, LWLockMode);
- probe lwlock__wait__done(LWLockId, LWLockMode);
- probe lwlock__condacquire(LWLockId, LWLockMode);
- probe lwlock__condacquire__fail(LWLockId, LWLockMode);
+ probe flexlock__acquire(FlexLockId, FlexLockMode);
+ probe flexlock__release(FlexLockId);
+ probe flexlock__wait__start(FlexLockId, FlexLockMode);
+ probe flexlock__wait__done(FlexLockId, FlexLockMode);
+ probe flexlock__condacquire(FlexLockId, FlexLockMode);
+ probe flexlock__condacquire__fail(FlexLockId, FlexLockMode);
probe lock__wait__start(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE);
probe lock__wait__done(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE);
*/
typedef struct SlruSharedData
{
- LWLockId ControlLock;
+ FlexLockId ControlLock;
/* Number of buffers managed by this SLRU structure */
int num_slots;
bool *page_dirty;
int *page_number;
int *page_lru_count;
- LWLockId *buffer_locks;
+ FlexLockId *buffer_locks;
/*
* Optional array of WAL flush LSNs associated with entries in the SLRU
extern Size SimpleLruShmemSize(int nslots, int nlsns);
extern void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
- LWLockId ctllock, const char *subdir);
+ FlexLockId ctllock, const char *subdir);
extern int SimpleLruZeroPage(SlruCtl ctl, int pageno);
extern int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
TransactionId xid);
#define SEQ_MINVALUE (-SEQ_MAXVALUE)
/*
- * Number of spare LWLocks to allocate for user-defined add-on code.
+ * Number of spare FlexLocks to allocate for user-defined add-on code.
*/
-#define NUM_USER_DEFINED_LWLOCKS 4
+#define NUM_USER_DEFINED_FLEXLOCKS 4
/*
* Define this if you want to allow the lo_import and lo_export SQL
#define BufTableHashPartition(hashcode) \
((hashcode) % NUM_BUFFER_PARTITIONS)
#define BufMappingPartitionLock(hashcode) \
- ((LWLockId) (FirstBufMappingLock + BufTableHashPartition(hashcode)))
+ ((FlexLockId) (FirstBufMappingLock + BufTableHashPartition(hashcode)))
/*
* BufferDesc -- shared descriptor/state data for a single shared buffer.
int buf_id; /* buffer's index number (from 0) */
int freeNext; /* link in freelist chain */
- LWLockId io_in_progress_lock; /* to wait for I/O to complete */
- LWLockId content_lock; /* to lock access to buffer contents */
+ FlexLockId io_in_progress_lock; /* to wait for I/O to complete */
+ FlexLockId content_lock; /* to lock access to buffer contents */
} BufferDesc;
#define BufferDescriptorGetBuffer(bdesc) ((bdesc)->buf_id + 1)
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * flexlock.h
+ * Flex lock manager
+ *
+ * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/storage/flexlock.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef FLEXLOCK_H
+#define FLEXLOCK_H
+
+/*
+ * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
+ * here, but we need them to set up enum FlexLockId correctly, and having
+ * this file include lock.h or bufmgr.h would be backwards.
+ */
+
+/* Number of partitions of the shared buffer mapping hashtable */
+#define NUM_BUFFER_PARTITIONS 16
+
+/* Number of partitions the shared lock tables are divided into */
+#define LOG2_NUM_LOCK_PARTITIONS 4
+#define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS)
+
+/* Number of partitions the shared predicate lock tables are divided into */
+#define LOG2_NUM_PREDICATELOCK_PARTITIONS 4
+#define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
+
+/*
+ * We have a number of predefined FlexLocks, plus a bunch of locks that are
+ * dynamically assigned (e.g., for shared buffers). The FlexLock structures
+ * live in shared memory (since they contain shared data) and are identified
+ * by values of this enumerated type. We abuse the notion of an enum somewhat
+ * by allowing values not listed in the enum declaration to be assigned.
+ * The extra value MaxDynamicFlexLock is there to keep the compiler from
+ * deciding that the enum can be represented as char or short ...
+ *
+ * If you remove a lock, please replace it with a placeholder. This retains
+ * the lock numbering, which is helpful for DTrace and other external
+ * debugging scripts.
+ */
+typedef enum FlexLockId
+{
+ BufFreelistLock,
+ ShmemIndexLock,
+ OidGenLock,
+ XidGenLock,
+ ProcArrayLock,
+ SInvalReadLock,
+ SInvalWriteLock,
+ WALInsertLock,
+ WALWriteLock,
+ ControlFileLock,
+ CheckpointLock,
+ CLogControlLock,
+ SubtransControlLock,
+ MultiXactGenLock,
+ MultiXactOffsetControlLock,
+ MultiXactMemberControlLock,
+ RelCacheInitLock,
+ BgWriterCommLock,
+ TwoPhaseStateLock,
+ TablespaceCreateLock,
+ BtreeVacuumLock,
+ AddinShmemInitLock,
+ AutovacuumLock,
+ AutovacuumScheduleLock,
+ SyncScanLock,
+ RelationMappingLock,
+ AsyncCtlLock,
+ AsyncQueueLock,
+ SerializableXactHashLock,
+ SerializableFinishedListLock,
+ SerializablePredicateLockListLock,
+ OldSerXidLock,
+ SyncRepLock,
+ /* Individual lock IDs end here */
+ FirstBufMappingLock,
+ FirstLockMgrLock = FirstBufMappingLock + NUM_BUFFER_PARTITIONS,
+ FirstPredicateLockMgrLock = FirstLockMgrLock + NUM_LOCK_PARTITIONS,
+
+ /* must be last except for MaxDynamicFlexLock: */
+ NumFixedFlexLocks = FirstPredicateLockMgrLock + NUM_PREDICATELOCK_PARTITIONS,
+
+ MaxDynamicFlexLock = 1000000000
+} FlexLockId;
+
+/* Shared memory setup. */
+extern int NumFlexLocks(void);
+extern Size FlexLockShmemSize(void);
+extern void RequestAddinFlexLocks(int n);
+extern void CreateFlexLocks(void);
+
+/* Error recovery and debugging support functions. */
+extern void FlexLockReleaseAll(void);
+extern bool FlexLockHeldByMe(FlexLockId id);
+
+#endif /* FLEXLOCK_H */
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * flexlock_internals.h
+ * Flex lock internals. Only files which implement a FlexLock
+ * type should need to include this. Merging this with flexlock.h
+ * creates a circular header dependency, but even if it didn't, this
+ * is cleaner.
+ *
+ * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/storage/flexlock_internals.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef FLEXLOCK_INTERNALS_H
+#define FLEXLOCK_INTERNALS_H
+
+#include "storage/proc.h"
+#include "storage/s_lock.h"
+
+/*
+ * Individual FlexLock implementations each get this many bytes to store
+ * its state; of course, a given implementation could also allocate additional
+ * shmem elsewhere, but we provide this many bytes within the array. The
+ * header fields common to all FlexLock types are included in this number.
+ * A power of two should probably be chosen, to avoid alignment issues and
+ * cache line splitting. It might be useful to increase this on systems where
+ * a cache line is more than 64 bytes in size.
+ */
+#define FLEX_LOCK_BYTES 64
+
+typedef struct FlexLock
+{
+ char locktype; /* see FLEXLOCK_TYPE_* constants */
+ slock_t mutex; /* Protects FlexLock state and wait queues */
+ bool releaseOK; /* T if ok to release waiters */
+ PGPROC *head; /* head of list of waiting PGPROCs */
+ PGPROC *tail; /* tail of list of waiting PGPROCs */
+ /* tail is undefined when head is NULL */
+} FlexLock;
+
+#define FLEXLOCK_TYPE_LWLOCK 'l'
+
+typedef union FlexLockPadded
+{
+ FlexLock flex;
+ char pad[FLEX_LOCK_BYTES];
+} FlexLockPadded;
+
+extern FlexLockPadded *FlexLockArray;
+
+extern FlexLockId FlexLockAssign(char locktype);
+extern void FlexLockRemember(FlexLockId id);
+extern void FlexLockForget(FlexLockId id);
+extern int FlexLockWait(FlexLockId id, int mode);
+
+/*
+ * We must join the wait queue while holding the spinlock, so we define this
+ * as a macro, for speed.
+ */
+#define FlexLockJoinWaitQueue(lock, mode) \
+ do { \
+ Assert(MyProc != NULL); \
+ MyProc->flWaitResult = 0; \
+ MyProc->flWaitMode = mode; \
+ MyProc->flWaitLink = NULL; \
+ if (lock->flex.head == NULL) \
+ lock->flex.head = MyProc; \
+ else \
+ lock->flex.tail->flWaitLink = MyProc; \
+ lock->flex.tail = MyProc; \
+ } while (0)
+
+#ifdef LOCK_DEBUG
+extern bool Trace_flexlocks;
+#define FlexLockDebug(where, id, msg) \
+ do { \
+ if (Trace_flexlocks) \
+ elog(LOG, "%s(%d): %s", where, (int) id, msg); \
+ } while (0)
+#else
+#define FlexLockDebug(where, id, msg)
+#endif
+
+#endif /* FLEXLOCK_H */
#define LockHashPartition(hashcode) \
((hashcode) % NUM_LOCK_PARTITIONS)
#define LockHashPartitionLock(hashcode) \
- ((LWLockId) (FirstLockMgrLock + LockHashPartition(hashcode)))
+ ((FlexLockId) (FirstLockMgrLock + LockHashPartition(hashcode)))
/*
#ifndef LWLOCK_H
#define LWLOCK_H
-/*
- * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
- * here, but we need them to set up enum LWLockId correctly, and having
- * this file include lock.h or bufmgr.h would be backwards.
- */
-
-/* Number of partitions of the shared buffer mapping hashtable */
-#define NUM_BUFFER_PARTITIONS 16
-
-/* Number of partitions the shared lock tables are divided into */
-#define LOG2_NUM_LOCK_PARTITIONS 4
-#define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS)
-
-/* Number of partitions the shared predicate lock tables are divided into */
-#define LOG2_NUM_PREDICATELOCK_PARTITIONS 4
-#define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
-
-/*
- * We have a number of predefined LWLocks, plus a bunch of LWLocks that are
- * dynamically assigned (e.g., for shared buffers). The LWLock structures
- * live in shared memory (since they contain shared data) and are identified
- * by values of this enumerated type. We abuse the notion of an enum somewhat
- * by allowing values not listed in the enum declaration to be assigned.
- * The extra value MaxDynamicLWLock is there to keep the compiler from
- * deciding that the enum can be represented as char or short ...
- *
- * If you remove a lock, please replace it with a placeholder. This retains
- * the lock numbering, which is helpful for DTrace and other external
- * debugging scripts.
- */
-typedef enum LWLockId
-{
- BufFreelistLock,
- ShmemIndexLock,
- OidGenLock,
- XidGenLock,
- ProcArrayLock,
- SInvalReadLock,
- SInvalWriteLock,
- WALInsertLock,
- WALWriteLock,
- ControlFileLock,
- CheckpointLock,
- CLogControlLock,
- SubtransControlLock,
- MultiXactGenLock,
- MultiXactOffsetControlLock,
- MultiXactMemberControlLock,
- RelCacheInitLock,
- BgWriterCommLock,
- TwoPhaseStateLock,
- TablespaceCreateLock,
- BtreeVacuumLock,
- AddinShmemInitLock,
- AutovacuumLock,
- AutovacuumScheduleLock,
- SyncScanLock,
- RelationMappingLock,
- AsyncCtlLock,
- AsyncQueueLock,
- SerializableXactHashLock,
- SerializableFinishedListLock,
- SerializablePredicateLockListLock,
- OldSerXidLock,
- SyncRepLock,
- /* Individual lock IDs end here */
- FirstBufMappingLock,
- FirstLockMgrLock = FirstBufMappingLock + NUM_BUFFER_PARTITIONS,
- FirstPredicateLockMgrLock = FirstLockMgrLock + NUM_LOCK_PARTITIONS,
-
- /* must be last except for MaxDynamicLWLock: */
- NumFixedLWLocks = FirstPredicateLockMgrLock + NUM_PREDICATELOCK_PARTITIONS,
-
- MaxDynamicLWLock = 1000000000
-} LWLockId;
-
+#include "storage/flexlock.h"
typedef enum LWLockMode
{
LW_SHARED
} LWLockMode;
-
-#ifdef LOCK_DEBUG
-extern bool Trace_lwlocks;
-#endif
-
-extern LWLockId LWLockAssign(void);
-extern void LWLockAcquire(LWLockId lockid, LWLockMode mode);
-extern bool LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode);
-extern void LWLockRelease(LWLockId lockid);
-extern void LWLockReleaseAll(void);
-extern bool LWLockHeldByMe(LWLockId lockid);
-
-extern int NumLWLocks(void);
-extern Size LWLockShmemSize(void);
-extern void CreateLWLocks(void);
-
-extern void RequestAddinLWLocks(int n);
+extern FlexLockId LWLockAssign(void);
+extern void LWLockAcquire(FlexLockId lockid, LWLockMode mode);
+extern bool LWLockConditionalAcquire(FlexLockId lockid, LWLockMode mode);
+extern void LWLockRelease(FlexLockId lockid);
+extern bool LWLockHeldByMe(FlexLockId lockid);
#endif /* LWLOCK_H */
*/
bool recoveryConflictPending;
- /* Info about LWLock the process is currently waiting for, if any. */
- bool lwWaiting; /* true if waiting for an LW lock */
- bool lwExclusive; /* true if waiting for exclusive access */
- struct PGPROC *lwWaitLink; /* next waiter for same LW lock */
+ /* Info about FlexLock the process is currently waiting for, if any. */
+ int flWaitResult; /* result of wait, or 0 if still waiting */
+ int flWaitMode; /* lock mode sought */
+ struct PGPROC *flWaitLink; /* next waiter for same FlexLock */
/* Info about lock the process is currently waiting for, if any. */
/* waitLock and waitProcLock are NULL if not currently waiting. */
struct XidCache subxids; /* cache for subtransaction XIDs */
/* Per-backend LWLock. Protects fields below. */
- LWLockId backendLock; /* protects the fields below */
+ FlexLockId backendLock; /* protects the fields below */
/* Lock manager data, recording fast-path locks taken by this backend. */
uint64 fpLockBits; /* lock modes held for each fast-path slot */