Introduce FlexLocks.
authorRobert Haas <rhaas@postgresql.org>
Tue, 8 Nov 2011 18:53:21 +0000 (13:53 -0500)
committerRobert Haas <rhaas@postgresql.org>
Fri, 2 Dec 2011 11:35:30 +0000 (06:35 -0500)
This commit separates the LWLock code into an upper level and a lower
level.  The lower level, called FlexLocks, provides some generic
infrastructure for lock waits and error handling, and allows multiple
types of locks to be implemented on top of the basic infrastructure.
Currently, the only client of this infrastructure is the existing
LWLock system, from which most of the FlexLock code was extracted.

30 files changed:
contrib/pg_stat_statements/pg_stat_statements.c
doc/src/sgml/config.sgml
doc/src/sgml/monitoring.sgml
src/backend/access/transam/slru.c
src/backend/access/transam/twophase.c
src/backend/access/transam/xact.c
src/backend/bootstrap/bootstrap.c
src/backend/postmaster/bgwriter.c
src/backend/postmaster/checkpointer.c
src/backend/postmaster/postmaster.c
src/backend/postmaster/walwriter.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/ipc/ipci.c
src/backend/storage/lmgr/Makefile
src/backend/storage/lmgr/flexlock.c [new file with mode: 0644]
src/backend/storage/lmgr/lock.c
src/backend/storage/lmgr/lwlock.c
src/backend/storage/lmgr/predicate.c
src/backend/storage/lmgr/proc.c
src/backend/utils/misc/check_guc
src/backend/utils/misc/guc.c
src/backend/utils/probes.d
src/include/access/slru.h
src/include/pg_config_manual.h
src/include/storage/buf_internals.h
src/include/storage/flexlock.h [new file with mode: 0644]
src/include/storage/flexlock_internals.h [new file with mode: 0644]
src/include/storage/lock.h
src/include/storage/lwlock.h
src/include/storage/proc.h

index 8dc3054e372ac12d4bf4216aa86b65a188b44cbc..6167e364cde69f9f16922c26e9b0d864580ccde6 100644 (file)
@@ -105,7 +105,7 @@ typedef struct pgssEntry
  */
 typedef struct pgssSharedState
 {
-       LWLockId        lock;                   /* protects hashtable search/modification */
+       FlexLockId      lock;                   /* protects hashtable search/modification */
        int                     query_size;             /* max query length in bytes */
 } pgssSharedState;
 
@@ -260,7 +260,7 @@ _PG_init(void)
         * resources in pgss_shmem_startup().
         */
        RequestAddinShmemSpace(pgss_memsize());
-       RequestAddinLWLocks(1);
+       RequestAddinFlexLocks(1);
 
        /*
         * Install hooks.
index d1e628fefcab1ff77931b1e291e7258bcba5d3b6..8517b3633186a980264f36f17ece7d5326f87a90 100644 (file)
@@ -6199,14 +6199,14 @@ LOG:  CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
      </varlistentry>
 
      <varlistentry>
-      <term><varname>trace_lwlocks</varname> (<type>boolean</type>)</term>
+      <term><varname>trace_flexlocks</varname> (<type>boolean</type>)</term>
       <indexterm>
-       <primary><varname>trace_lwlocks</> configuration parameter</primary>
+       <primary><varname>trace_flexlocks</> configuration parameter</primary>
       </indexterm>
       <listitem>
        <para>
-        If on, emit information about lightweight lock usage.  Lightweight
-        locks are intended primarily to provide mutual exclusion of access
+        If on, emit information about FlexLock usage.  FlexLocks
+        are intended primarily to provide mutual exclusion of access
         to shared-memory data structures.
        </para>
        <para>
index b9dc1d20013ae354f528b64bd8b225693865da64..98ed0d37ecece2e8a5c600435cfba095a9435396 100644 (file)
@@ -1724,49 +1724,49 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS procpid,
       or kilobytes of memory used for an internal sort.</entry>
     </row>
     <row>
-     <entry>lwlock-acquire</entry>
-     <entry>(LWLockId, LWLockMode)</entry>
-     <entry>Probe that fires when an LWLock has been acquired.
-      arg0 is the LWLock's ID.
-      arg1 is the requested lock mode, either exclusive or shared.</entry>
+     <entry>flexlock-acquire</entry>
+     <entry>(FlexLockId, FlexLockMode)</entry>
+     <entry>Probe that fires when an FlexLock has been acquired.
+      arg0 is the FlexLock's ID.
+      arg1 is the requested lock mode.</entry>
     </row>
     <row>
-     <entry>lwlock-release</entry>
-     <entry>(LWLockId)</entry>
-     <entry>Probe that fires when an LWLock has been released (but note
+     <entry>flexlock-release</entry>
+     <entry>(FlexLockId)</entry>
+     <entry>Probe that fires when a FlexLock has been released (but note
       that any released waiters have not yet been awakened).
-      arg0 is the LWLock's ID.</entry>
+      arg0 is the FlexLock's ID.</entry>
     </row>
     <row>
-     <entry>lwlock-wait-start</entry>
-     <entry>(LWLockId, LWLockMode)</entry>
-     <entry>Probe that fires when an LWLock was not immediately available and
+     <entry>flexlock-wait-start</entry>
+     <entry>(FlexLockId, FlexLockMode)</entry>
+     <entry>Probe that fires when an FlexLock was not immediately available and
       a server process has begun to wait for the lock to become available.
-      arg0 is the LWLock's ID.
+      arg0 is the FlexLock's ID.
       arg1 is the requested lock mode, either exclusive or shared.</entry>
     </row>
     <row>
-     <entry>lwlock-wait-done</entry>
-     <entry>(LWLockId, LWLockMode)</entry>
+     <entry>flexlock-wait-done</entry>
+     <entry>(FlexLockId, FlexLockMode)</entry>
      <entry>Probe that fires when a server process has been released from its
-      wait for an LWLock (it does not actually have the lock yet).
-      arg0 is the LWLock's ID.
+      wait for an FlexLock (it does not actually have the lock yet).
+      arg0 is the FlexLock's ID.
       arg1 is the requested lock mode, either exclusive or shared.</entry>
     </row>
     <row>
-     <entry>lwlock-condacquire</entry>
-     <entry>(LWLockId, LWLockMode)</entry>
-     <entry>Probe that fires when an LWLock was successfully acquired when the
-      caller specified no waiting.
-      arg0 is the LWLock's ID.
+     <entry>flexlock-condacquire</entry>
+     <entry>(FlexLockId, FlexLockMode)</entry>
+     <entry>Probe that fires when an FlexLock was successfully acquired when
+      the caller specified no waiting.
+      arg0 is the FlexLock's ID.
       arg1 is the requested lock mode, either exclusive or shared.</entry>
     </row>
     <row>
-     <entry>lwlock-condacquire-fail</entry>
-     <entry>(LWLockId, LWLockMode)</entry>
-     <entry>Probe that fires when an LWLock was not successfully acquired when
-      the caller specified no waiting.
-      arg0 is the LWLock's ID.
+     <entry>flexlock-condacquire-fail</entry>
+     <entry>(FlexLockId, FlexLockMode)</entry>
+     <entry>Probe that fires when an FlexLock was not successfully acquired
+      when the caller specified no waiting.
+      arg0 is the FlexLock's ID.
       arg1 is the requested lock mode, either exclusive or shared.</entry>
     </row>
     <row>
@@ -1813,11 +1813,11 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS procpid,
      <entry>unsigned int</entry>
     </row>
     <row>
-     <entry>LWLockId</entry>
+     <entry>FlexLockId</entry>
      <entry>int</entry>
     </row>
     <row>
-     <entry>LWLockMode</entry>
+     <entry>FlexLockMode</entry>
      <entry>int</entry>
     </row>
     <row>
index f7caa341e164c9edccb509655c19281303a0d07e..09d58626ba4b78683b0593af2a822178cfce49f6 100644 (file)
@@ -151,7 +151,7 @@ SimpleLruShmemSize(int nslots, int nlsns)
        sz += MAXALIGN(nslots * sizeof(bool));          /* page_dirty[] */
        sz += MAXALIGN(nslots * sizeof(int));           /* page_number[] */
        sz += MAXALIGN(nslots * sizeof(int));           /* page_lru_count[] */
-       sz += MAXALIGN(nslots * sizeof(LWLockId));      /* buffer_locks[] */
+       sz += MAXALIGN(nslots * sizeof(FlexLockId));            /* buffer_locks[] */
 
        if (nlsns > 0)
                sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr));    /* group_lsn[] */
@@ -161,7 +161,7 @@ SimpleLruShmemSize(int nslots, int nlsns)
 
 void
 SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
-                         LWLockId ctllock, const char *subdir)
+                         FlexLockId ctllock, const char *subdir)
 {
        SlruShared      shared;
        bool            found;
@@ -202,8 +202,8 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
                offset += MAXALIGN(nslots * sizeof(int));
                shared->page_lru_count = (int *) (ptr + offset);
                offset += MAXALIGN(nslots * sizeof(int));
-               shared->buffer_locks = (LWLockId *) (ptr + offset);
-               offset += MAXALIGN(nslots * sizeof(LWLockId));
+               shared->buffer_locks = (FlexLockId *) (ptr + offset);
+               offset += MAXALIGN(nslots * sizeof(FlexLockId));
 
                if (nlsns > 0)
                {
index d2fecb1ecb9171fc2c6dd2887d846c8a16779bb0..943929b4198d12b81aa79b64f6be86ea8aafefaa 100644 (file)
@@ -326,9 +326,9 @@ MarkAsPreparing(TransactionId xid, const char *gid,
        proc->backendId = InvalidBackendId;
        proc->databaseId = databaseid;
        proc->roleId = owner;
-       proc->lwWaiting = false;
-       proc->lwExclusive = false;
-       proc->lwWaitLink = NULL;
+       proc->flWaitResult = 0;
+       proc->flWaitMode = 0;
+       proc->flWaitLink = NULL;
        proc->waitLock = NULL;
        proc->waitProcLock = NULL;
        for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
index c383011b5f6538fa2b90fa5f7778da7ff59fa679..0da2ae5a5e3cdea3b8a84904e7bff06426a4f291 100644 (file)
@@ -2248,7 +2248,7 @@ AbortTransaction(void)
         * Releasing LW locks is critical since we might try to grab them again
         * while cleaning up!
         */
-       LWLockReleaseAll();
+       FlexLockReleaseAll();
 
        /* Clean up buffer I/O and buffer context locks, too */
        AbortBufferIO();
@@ -4138,7 +4138,7 @@ AbortSubTransaction(void)
         * FIXME This may be incorrect --- Are there some locks we should keep?
         * Buffer locks, for example?  I don't think so but I'm not sure.
         */
-       LWLockReleaseAll();
+       FlexLockReleaseAll();
 
        AbortBufferIO();
        UnlockBuffers();
index 6bf2421f65fdc4e448da76879b2f55f1c61113ea..9ceee9113aba0f43291eb74f650889b83ecc4e7f 100644 (file)
@@ -562,13 +562,13 @@ bootstrap_signals(void)
  * Begin shutdown of an auxiliary process.     This is approximately the equivalent
  * of ShutdownPostgres() in postinit.c.  We can't run transactions in an
  * auxiliary process, so most of the work of AbortTransaction() is not needed,
- * but we do need to make sure we've released any LWLocks we are holding.
+ * but we do need to make sure we've released any flex locks we are holding.
  * (This is only critical during an error exit.)
  */
 static void
 ShutdownAuxiliaryProcess(int code, Datum arg)
 {
-       LWLockReleaseAll();
+       FlexLockReleaseAll();
 }
 
 /* ----------------------------------------------------------------
index cacedab202561edce542e1ffa8c9993a55887617..f33f573601d3b4bc5b279738c5af0cc066ff30a8 100644 (file)
@@ -176,9 +176,10 @@ BackgroundWriterMain(void)
                /*
                 * These operations are really just a minimal subset of
                 * AbortTransaction().  We don't have very many resources to worry
-                * about in bgwriter, but we do have LWLocks, buffers, and temp files.
+                * about in bgwriter, but we do have flex locks, buffers, and temp
+                * files.
                 */
-               LWLockReleaseAll();
+               FlexLockReleaseAll();
                AbortBufferIO();
                UnlockBuffers();
                /* buffer pins are released here: */
index e9ae1e8ca0b1e7b4abc7a4473c77e04d7a763185..49f07a768cca80953e1553ee8531143b4a53618c 100644 (file)
@@ -281,9 +281,10 @@ CheckpointerMain(void)
                /*
                 * These operations are really just a minimal subset of
                 * AbortTransaction().  We don't have very many resources to worry
-                * about in checkpointer, but we do have LWLocks, buffers, and temp files.
+                * about in checkpointer, but we do have flex locks, buffers, and temp
+                * files.
                 */
-               LWLockReleaseAll();
+               FlexLockReleaseAll();
                AbortBufferIO();
                UnlockBuffers();
                /* buffer pins are released here: */
index 963189d4150799c63f24a7f0cb0cdb24cf91943a..a07a4c994a43f1d982792f673ad62da06cbd8b9a 100644 (file)
@@ -404,8 +404,6 @@ typedef struct
 typedef int InheritableSocket;
 #endif
 
-typedef struct LWLock LWLock;  /* ugly kluge */
-
 /*
  * Structure contains all variables passed to exec:ed backends
  */
@@ -426,7 +424,7 @@ typedef struct
        slock_t    *ShmemLock;
        VariableCache ShmemVariableCache;
        Backend    *ShmemBackendArray;
-       LWLock     *LWLockArray;
+       FlexLock   *FlexLockArray;
        slock_t    *ProcStructLock;
        PROC_HDR   *ProcGlobal;
        PGPROC     *AuxiliaryProcs;
@@ -4676,7 +4674,6 @@ MaxLivePostmasterChildren(void)
  * functions
  */
 extern slock_t *ShmemLock;
-extern LWLock *LWLockArray;
 extern slock_t *ProcStructLock;
 extern PGPROC *AuxiliaryProcs;
 extern PMSignalData *PMSignalState;
@@ -4721,7 +4718,7 @@ save_backend_variables(BackendParameters *param, Port *port,
        param->ShmemVariableCache = ShmemVariableCache;
        param->ShmemBackendArray = ShmemBackendArray;
 
-       param->LWLockArray = LWLockArray;
+       param->FlexLockArray = FlexLockArray;
        param->ProcStructLock = ProcStructLock;
        param->ProcGlobal = ProcGlobal;
        param->AuxiliaryProcs = AuxiliaryProcs;
@@ -4945,7 +4942,7 @@ restore_backend_variables(BackendParameters *param, Port *port)
        ShmemVariableCache = param->ShmemVariableCache;
        ShmemBackendArray = param->ShmemBackendArray;
 
-       LWLockArray = param->LWLockArray;
+       FlexLockArray = param->FlexLockArray;
        ProcStructLock = param->ProcStructLock;
        ProcGlobal = param->ProcGlobal;
        AuxiliaryProcs = param->AuxiliaryProcs;
index 157728e20e7b3eb80a71bb07d701ff99f5d71832..587443d3a7101cc7cda0c0352a7b5c3bf2d02756 100644 (file)
@@ -167,9 +167,9 @@ WalWriterMain(void)
                /*
                 * These operations are really just a minimal subset of
                 * AbortTransaction().  We don't have very many resources to worry
-                * about in walwriter, but we do have LWLocks, and perhaps buffers?
+                * about in walwriter, but we do have flex locks, and perhaps buffers?
                 */
-               LWLockReleaseAll();
+               FlexLockReleaseAll();
                AbortBufferIO();
                UnlockBuffers();
                /* buffer pins are released here: */
index 71fe8c665ecd4511d5b03dda7f70e5aea7ae5703..4c4959c915c2a2c3f18f9b85e5ff15fd32a840d8 100644 (file)
@@ -141,7 +141,7 @@ PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
        {
                BufferTag       newTag;         /* identity of requested block */
                uint32          newHash;        /* hash value for newTag */
-               LWLockId        newPartitionLock;       /* buffer partition lock for it */
+               FlexLockId      newPartitionLock;       /* buffer partition lock for it */
                int                     buf_id;
 
                /* create a tag so we can lookup the buffer */
@@ -514,10 +514,10 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
 {
        BufferTag       newTag;                 /* identity of requested block */
        uint32          newHash;                /* hash value for newTag */
-       LWLockId        newPartitionLock;               /* buffer partition lock for it */
+       FlexLockId      newPartitionLock;               /* buffer partition lock for it */
        BufferTag       oldTag;                 /* previous identity of selected buffer */
        uint32          oldHash;                /* hash value for oldTag */
-       LWLockId        oldPartitionLock;               /* buffer partition lock for it */
+       FlexLockId      oldPartitionLock;               /* buffer partition lock for it */
        BufFlags        oldFlags;
        int                     buf_id;
        volatile BufferDesc *buf;
@@ -857,7 +857,7 @@ InvalidateBuffer(volatile BufferDesc *buf)
 {
        BufferTag       oldTag;
        uint32          oldHash;                /* hash value for oldTag */
-       LWLockId        oldPartitionLock;               /* buffer partition lock for it */
+       FlexLockId      oldPartitionLock;               /* buffer partition lock for it */
        BufFlags        oldFlags;
 
        /* Save the original buffer tag before dropping the spinlock */
index bb8b832065b3a2de81bcc865fa96ba077d425421..a2c570ad9faf70c77ff5550d240f526e7a2f894a 100644 (file)
@@ -113,7 +113,7 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
                size = add_size(size, SUBTRANSShmemSize());
                size = add_size(size, TwoPhaseShmemSize());
                size = add_size(size, MultiXactShmemSize());
-               size = add_size(size, LWLockShmemSize());
+               size = add_size(size, FlexLockShmemSize());
                size = add_size(size, ProcArrayShmemSize());
                size = add_size(size, BackendStatusShmemSize());
                size = add_size(size, SInvalShmemSize());
@@ -179,7 +179,7 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
         * needed for InitShmemIndex.
         */
        if (!IsUnderPostmaster)
-               CreateLWLocks();
+               CreateFlexLocks();
 
        /*
         * Set up shmem.c index hashtable
index e12a8549f741020944ba81b583cd7ab66e3f0a6e..3730e51c7e42ad10bf19a561ffe9681be9a436d1 100644 (file)
@@ -12,7 +12,8 @@ subdir = src/backend/storage/lmgr
 top_builddir = ../../../..
 include $(top_builddir)/src/Makefile.global
 
-OBJS = lmgr.o lock.o proc.o deadlock.o lwlock.o spin.o s_lock.o predicate.o
+OBJS = flexlock.o lmgr.o lock.o proc.o deadlock.o lwlock.o spin.o s_lock.o \
+       predicate.o
 
 include $(top_srcdir)/src/backend/common.mk
 
diff --git a/src/backend/storage/lmgr/flexlock.c b/src/backend/storage/lmgr/flexlock.c
new file mode 100644 (file)
index 0000000..1bd3dc7
--- /dev/null
@@ -0,0 +1,351 @@
+/*-------------------------------------------------------------------------
+ *
+ * flexlock.c
+ *       Low-level routines for managing flex locks.
+ *
+ * Flex locks are intended primarily to provide mutual exclusion of access
+ * to shared-memory data structures.  Most, but not all, flex locks are
+ * lightweight locks (LWLocks).  This file contains support routines that
+ * are used for all types of flex locks, including lwlocks.  User-level
+ * locking should be done with the full lock manager --- which depends on
+ * LWLocks to protect its shared state.
+ *
+ * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *       src/backend/storage/lmgr/flexlock.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "miscadmin.h"
+#include "pg_trace.h"
+#include "access/clog.h"
+#include "access/multixact.h"
+#include "access/subtrans.h"
+#include "commands/async.h"
+#include "storage/flexlock.h"
+#include "storage/flexlock_internals.h"
+#include "storage/predicate.h"
+#include "storage/spin.h"
+
+/*
+ * We use this structure to keep track of flex locks held, for release
+ * during error recovery.  The maximum size could be determined at runtime
+ * if necessary, but it seems unlikely that more than a few locks could
+ * ever be held simultaneously.
+ */
+#define MAX_SIMUL_FLEXLOCKS    100
+
+static int     num_held_flexlocks = 0;
+static FlexLockId held_flexlocks[MAX_SIMUL_FLEXLOCKS];
+
+static int     lock_addin_request = 0;
+static bool lock_addin_request_allowed = true;
+
+#ifdef LOCK_DEBUG
+bool           Trace_flexlocks = false;
+#endif
+
+/*
+ * This points to the array of FlexLocks in shared memory.  Backends inherit
+ * the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
+ * where we have special measures to pass it down).
+ */
+FlexLockPadded *FlexLockArray = NULL;
+
+/* We use the ShmemLock spinlock to protect LWLockAssign */
+extern slock_t *ShmemLock;
+
+static void FlexLockInit(FlexLock *flex, char locktype);
+
+/*
+ * Compute number of FlexLocks to allocate.
+ */
+int
+NumFlexLocks(void)
+{
+       int                     numLocks;
+
+       /*
+        * Possibly this logic should be spread out among the affected modules,
+        * the same way that shmem space estimation is done.  But for now, there
+        * are few enough users of FlexLocks that we can get away with just keeping
+        * the knowledge here.
+        */
+
+       /* Predefined FlexLocks */
+       numLocks = (int) NumFixedFlexLocks;
+
+       /* bufmgr.c needs two for each shared buffer */
+       numLocks += 2 * NBuffers;
+
+       /* proc.c needs one for each backend or auxiliary process */
+       numLocks += MaxBackends + NUM_AUXILIARY_PROCS;
+
+       /* clog.c needs one per CLOG buffer */
+       numLocks += NUM_CLOG_BUFFERS;
+
+       /* subtrans.c needs one per SubTrans buffer */
+       numLocks += NUM_SUBTRANS_BUFFERS;
+
+       /* multixact.c needs two SLRU areas */
+       numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS;
+
+       /* async.c needs one per Async buffer */
+       numLocks += NUM_ASYNC_BUFFERS;
+
+       /* predicate.c needs one per old serializable xid buffer */
+       numLocks += NUM_OLDSERXID_BUFFERS;
+
+       /*
+        * Add any requested by loadable modules; for backwards-compatibility
+        * reasons, allocate at least NUM_USER_DEFINED_FLEXLOCKS of them even if
+        * there are no explicit requests.
+        */
+       lock_addin_request_allowed = false;
+       numLocks += Max(lock_addin_request, NUM_USER_DEFINED_FLEXLOCKS);
+
+       return numLocks;
+}
+
+
+/*
+ * RequestAddinFlexLocks
+ *             Request that extra FlexLocks be allocated for use by
+ *             a loadable module.
+ *
+ * This is only useful if called from the _PG_init hook of a library that
+ * is loaded into the postmaster via shared_preload_libraries. Once
+ * shared memory has been allocated, calls will be ignored.  (We could
+ * raise an error, but it seems better to make it a no-op, so that
+ * libraries containing such calls can be reloaded if needed.)
+ */
+void
+RequestAddinFlexLocks(int n)
+{
+       if (IsUnderPostmaster || !lock_addin_request_allowed)
+               return;                                 /* too late */
+       lock_addin_request += n;
+}
+
+
+/*
+ * Compute shmem space needed for FlexLocks.
+ */
+Size
+FlexLockShmemSize(void)
+{
+       Size            size;
+       int                     numLocks = NumFlexLocks();
+
+       /* Space for the FlexLock array. */
+       size = mul_size(numLocks, FLEX_LOCK_BYTES);
+
+       /* Space for dynamic allocation counter, plus room for alignment. */
+       size = add_size(size, 2 * sizeof(int) + FLEX_LOCK_BYTES);
+
+       return size;
+}
+
+/*
+ * Allocate shmem space for FlexLocks and initialize the locks.
+ */
+void
+CreateFlexLocks(void)
+{
+       int                     numLocks = NumFlexLocks();
+       Size            spaceLocks = FlexLockShmemSize();
+       FlexLockPadded *lock;
+       int                *FlexLockCounter;
+       char       *ptr;
+       int                     id;
+
+       /* Allocate and zero space */
+       ptr = (char *) ShmemAlloc(spaceLocks);
+       memset(ptr, 0, spaceLocks);
+
+       /* Leave room for dynamic allocation counter */
+       ptr += 2 * sizeof(int);
+
+       /* Ensure desired alignment of FlexLock array */
+       ptr += FLEX_LOCK_BYTES - ((uintptr_t) ptr) % FLEX_LOCK_BYTES;
+
+       FlexLockArray = (FlexLockPadded *) ptr;
+
+       /* All of the "fixed" FlexLocks are LWLocks. */
+       for (id = 0, lock = FlexLockArray; id < NumFixedFlexLocks; id++, lock++)
+               FlexLockInit(&lock->flex, FLEXLOCK_TYPE_LWLOCK);
+
+       /*
+        * Initialize the dynamic-allocation counter, which is stored just before
+        * the first FlexLock.
+        */
+       FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int));
+       FlexLockCounter[0] = (int) NumFixedFlexLocks;
+       FlexLockCounter[1] = numLocks;
+}
+
+/*
+ * FlexLockAssign - assign a dynamically-allocated FlexLock number
+ *
+ * We interlock this using the same spinlock that is used to protect
+ * ShmemAlloc().  Interlocking is not really necessary during postmaster
+ * startup, but it is needed if any user-defined code tries to allocate
+ * LWLocks after startup.
+ */
+FlexLockId
+FlexLockAssign(char locktype)
+{
+       FlexLockId      result;
+
+       /* use volatile pointer to prevent code rearrangement */
+       volatile int *FlexLockCounter;
+
+       FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int));
+       SpinLockAcquire(ShmemLock);
+       if (FlexLockCounter[0] >= FlexLockCounter[1])
+       {
+               SpinLockRelease(ShmemLock);
+               elog(ERROR, "no more FlexLockIds available");
+       }
+       result = (FlexLockId) (FlexLockCounter[0]++);
+       SpinLockRelease(ShmemLock);
+
+       FlexLockInit(&FlexLockArray[result].flex, locktype);
+
+       return result;
+}
+
+/*
+ * Initialize a FlexLock.
+ */
+static void
+FlexLockInit(FlexLock *flex, char locktype)
+{
+       SpinLockInit(&flex->mutex);
+       flex->releaseOK = true;
+       flex->locktype = locktype;
+       /*
+        * We might need to think a little harder about what should happen here
+        * if some future type of FlexLock requires more initialization than this.
+        * For now, this will suffice.
+        */
+}
+
+/*
+ * Add lock to list of locks held by this backend.
+ */
+void
+FlexLockRemember(FlexLockId id)
+{
+       if (num_held_flexlocks >= MAX_SIMUL_FLEXLOCKS)
+               elog(PANIC, "too many FlexLocks taken");
+       held_flexlocks[num_held_flexlocks++] = id;
+}
+
+/*
+ * Remove lock from list of locks held.  Usually, but not always, it will
+ * be the latest-acquired lock; so search array backwards.
+ */
+void
+FlexLockForget(FlexLockId id)
+{
+       int                     i;
+
+       for (i = num_held_flexlocks; --i >= 0;)
+       {
+               if (id == held_flexlocks[i])
+                       break;
+       }
+       if (i < 0)
+               elog(ERROR, "lock %d is not held", (int) id);
+       num_held_flexlocks--;
+       for (; i < num_held_flexlocks; i++)
+               held_flexlocks[i] = held_flexlocks[i + 1];
+}
+
+/*
+ * FlexLockWait - wait until awakened
+ *
+ * Since we share the process wait semaphore with the regular lock manager
+ * and ProcWaitForSignal, and we may need to acquire a FlexLock while one of
+ * those is pending, it is possible that we get awakened for a reason other
+ * than being signaled by a FlexLock release.  If so, loop back and wait again.
+ *
+ * Returns the number of "extra" waits absorbed so that, once we've gotten the
+ * FlexLock, we can re-increment the sema by the number of additional signals
+ * received, so that the lock manager or signal manager will see the received
+ * signal when it next waits.
+ */
+int
+FlexLockWait(FlexLockId id, int mode)
+{
+       int             extraWaits = 0;
+
+       FlexLockDebug("LWLockAcquire", id, "waiting");
+       TRACE_POSTGRESQL_FLEXLOCK_WAIT_START(id, mode);
+
+       for (;;)
+       {
+               /* "false" means cannot accept cancel/die interrupt here. */
+               PGSemaphoreLock(&MyProc->sem, false);
+               /*
+                * FLEXTODO: I think we should return this, instead of ignoring it.
+                * Any non-zero value means "wake up".
+                */
+               if (MyProc->flWaitResult)
+                       break;
+               extraWaits++;
+       }
+
+       TRACE_POSTGRESQL_FLEXLOCK_WAIT_DONE(id, mode);
+       FlexLockDebug("LWLockAcquire", id, "awakened");
+
+       return extraWaits;
+}
+
+/*
+ * FlexLockReleaseAll - release all currently-held locks
+ *
+ * Used to clean up after ereport(ERROR). An important difference between this
+ * function and retail LWLockRelease calls is that InterruptHoldoffCount is
+ * unchanged by this operation.  This is necessary since InterruptHoldoffCount
+ * has been set to an appropriate level earlier in error recovery. We could
+ * decrement it below zero if we allow it to drop for each released lock!
+ */
+void
+FlexLockReleaseAll(void)
+{
+       while (num_held_flexlocks > 0)
+       {
+               HOLD_INTERRUPTS();              /* match the upcoming RESUME_INTERRUPTS */
+
+               /*
+                * FLEXTODO: When we have multiple types of flex locks, this will
+                * need to call the appropriate release function for each lock type.
+                */
+               LWLockRelease(held_flexlocks[num_held_flexlocks - 1]);
+       }
+}
+
+/*
+ * FlexLockHeldByMe - test whether my process currently holds a lock
+ *
+ * This is meant as debug support only.  We do not consider the lock mode.
+ */
+bool
+FlexLockHeldByMe(FlexLockId id)
+{
+       int                     i;
+
+       for (i = 0; i < num_held_flexlocks; i++)
+       {
+               if (held_flexlocks[i] == id)
+                       return true;
+       }
+       return false;
+}
index 3ba4671ac315f5a767f329b035c2a60bde404469..f594983a5c50b81d31825b5b780d45d719f53a44 100644 (file)
@@ -591,7 +591,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
        bool            found;
        ResourceOwner owner;
        uint32          hashcode;
-       LWLockId        partitionLock;
+       FlexLockId      partitionLock;
        int                     status;
        bool            log_lock = false;
 
@@ -1546,7 +1546,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
        LOCALLOCK  *locallock;
        LOCK       *lock;
        PROCLOCK   *proclock;
-       LWLockId        partitionLock;
+       FlexLockId      partitionLock;
        bool            wakeupNeeded;
 
        if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
@@ -1912,7 +1912,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
         */
        for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
        {
-               LWLockId        partitionLock = FirstLockMgrLock + partition;
+               FlexLockId      partitionLock = FirstLockMgrLock + partition;
                SHM_QUEUE  *procLocks = &(MyProc->myProcLocks[partition]);
 
                proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
@@ -2197,7 +2197,7 @@ static bool
 FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
                                          uint32 hashcode)
 {
-       LWLockId                partitionLock = LockHashPartitionLock(hashcode);
+       FlexLockId              partitionLock = LockHashPartitionLock(hashcode);
        Oid                             relid = locktag->locktag_field2;
        uint32                  i;
 
@@ -2281,7 +2281,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
        LockMethod              lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
        LOCKTAG            *locktag = &locallock->tag.lock;
        PROCLOCK           *proclock = NULL;
-       LWLockId                partitionLock = LockHashPartitionLock(locallock->hashcode);
+       FlexLockId              partitionLock = LockHashPartitionLock(locallock->hashcode);
        Oid                             relid = locktag->locktag_field2;
        uint32                  f;
 
@@ -2382,7 +2382,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
        SHM_QUEUE  *procLocks;
        PROCLOCK   *proclock;
        uint32          hashcode;
-       LWLockId        partitionLock;
+       FlexLockId      partitionLock;
        int                     count = 0;
        int                     fast_count = 0;
 
@@ -2593,7 +2593,7 @@ LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
        PROCLOCKTAG proclocktag;
        uint32          hashcode;
        uint32          proclock_hashcode;
-       LWLockId        partitionLock;
+       FlexLockId      partitionLock;
        bool            wakeupNeeded;
 
        hashcode = LockTagHashCode(locktag);
@@ -2827,7 +2827,7 @@ PostPrepare_Locks(TransactionId xid)
         */
        for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
        {
-               LWLockId        partitionLock = FirstLockMgrLock + partition;
+               FlexLockId      partitionLock = FirstLockMgrLock + partition;
                SHM_QUEUE  *procLocks = &(MyProc->myProcLocks[partition]);
 
                proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
@@ -3343,7 +3343,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
        uint32          hashcode;
        uint32          proclock_hashcode;
        int                     partition;
-       LWLockId        partitionLock;
+       FlexLockId      partitionLock;
        LockMethod      lockMethodTable;
 
        Assert(len == sizeof(TwoPhaseLockRecord));
index 079eb29163e75e271a746bb8808da99fa4575ac3..ce6c931e9200e1a920f9961c57ce719f50268bbb 100644 (file)
  */
 #include "postgres.h"
 
-#include "access/clog.h"
-#include "access/multixact.h"
-#include "access/subtrans.h"
-#include "commands/async.h"
 #include "miscadmin.h"
 #include "pg_trace.h"
+#include "storage/flexlock_internals.h"
 #include "storage/ipc.h"
-#include "storage/predicate.h"
 #include "storage/proc.h"
 #include "storage/spin.h"
 
-
-/* We use the ShmemLock spinlock to protect LWLockAssign */
-extern slock_t *ShmemLock;
-
-
 typedef struct LWLock
 {
-       slock_t         mutex;                  /* Protects LWLock and queue of PGPROCs */
-       bool            releaseOK;              /* T if ok to release waiters */
+       FlexLock        flex;                   /* common FlexLock infrastructure */
        char            exclusive;              /* # of exclusive holders (0 or 1) */
        int                     shared;                 /* # of shared holders (0..MaxBackends) */
-       PGPROC     *head;                       /* head of list of waiting PGPROCs */
-       PGPROC     *tail;                       /* tail of list of waiting PGPROCs */
-       /* tail is undefined when head is NULL */
 } LWLock;
 
-/*
- * All the LWLock structs are allocated as an array in shared memory.
- * (LWLockIds are indexes into the array.)     We force the array stride to
- * be a power of 2, which saves a few cycles in indexing, but more
- * importantly also ensures that individual LWLocks don't cross cache line
- * boundaries. This reduces cache contention problems, especially on AMD
- * Opterons.  (Of course, we have to also ensure that the array start
- * address is suitably aligned.)
- *
- * LWLock is between 16 and 32 bytes on all known platforms, so these two
- * cases are sufficient.
- */
-#define LWLOCK_PADDED_SIZE     (sizeof(LWLock) <= 16 ? 16 : 32)
-
-typedef union LWLockPadded
-{
-       LWLock          lock;
-       char            pad[LWLOCK_PADDED_SIZE];
-} LWLockPadded;
-
-/*
- * This points to the array of LWLocks in shared memory.  Backends inherit
- * the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
- * where we have special measures to pass it down).
- */
-NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL;
-
-
-/*
- * We use this structure to keep track of locked LWLocks for release
- * during error recovery.  The maximum size could be determined at runtime
- * if necessary, but it seems unlikely that more than a few locks could
- * ever be held simultaneously.
- */
-#define MAX_SIMUL_LWLOCKS      100
-
-static int     num_held_lwlocks = 0;
-static LWLockId held_lwlocks[MAX_SIMUL_LWLOCKS];
-
-static int     lock_addin_request = 0;
-static bool lock_addin_request_allowed = true;
+#define        LWLockPointer(lockid) \
+       (AssertMacro(FlexLockArray[lockid].flex.locktype == FLEXLOCK_TYPE_LWLOCK), \
+        (volatile LWLock *) &FlexLockArray[lockid])
 
 #ifdef LWLOCK_STATS
 static int     counts_for_pid = 0;
@@ -98,27 +47,17 @@ static int *block_counts;
 #endif
 
 #ifdef LOCK_DEBUG
-bool           Trace_lwlocks = false;
-
 inline static void
-PRINT_LWDEBUG(const char *where, LWLockId lockid, const volatile LWLock *lock)
+PRINT_LWDEBUG(const char *where, FlexLockId lockid, const volatile LWLock *lock)
 {
-       if (Trace_lwlocks)
+       if (Trace_flexlocks)
                elog(LOG, "%s(%d): excl %d shared %d head %p rOK %d",
                         where, (int) lockid,
-                        (int) lock->exclusive, lock->shared, lock->head,
-                        (int) lock->releaseOK);
-}
-
-inline static void
-LOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg)
-{
-       if (Trace_lwlocks)
-               elog(LOG, "%s(%d): %s", where, (int) lockid, msg);
+                        (int) lock->exclusive, lock->shared, lock->flex.head,
+                        (int) lock->flex.releaseOK);
 }
 #else                                                  /* not LOCK_DEBUG */
 #define PRINT_LWDEBUG(a,b,c)
-#define LOG_LWDEBUG(a,b,c)
 #endif   /* LOCK_DEBUG */
 
 #ifdef LWLOCK_STATS
@@ -127,8 +66,8 @@ static void
 print_lwlock_stats(int code, Datum arg)
 {
        int                     i;
-       int                *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
-       int                     numLocks = LWLockCounter[1];
+       int                *FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int));
+       int                     numLocks = FlexLockCounter[1];
 
        /* Grab an LWLock to keep different backends from mixing reports */
        LWLockAcquire(0, LW_EXCLUSIVE);
@@ -145,173 +84,15 @@ print_lwlock_stats(int code, Datum arg)
 }
 #endif   /* LWLOCK_STATS */
 
-
 /*
- * Compute number of LWLocks to allocate.
+ * LWLockAssign - initialize a new lwlock and return its ID
  */
-int
-NumLWLocks(void)
-{
-       int                     numLocks;
-
-       /*
-        * Possibly this logic should be spread out among the affected modules,
-        * the same way that shmem space estimation is done.  But for now, there
-        * are few enough users of LWLocks that we can get away with just keeping
-        * the knowledge here.
-        */
-
-       /* Predefined LWLocks */
-       numLocks = (int) NumFixedLWLocks;
-
-       /* bufmgr.c needs two for each shared buffer */
-       numLocks += 2 * NBuffers;
-
-       /* proc.c needs one for each backend or auxiliary process */
-       numLocks += MaxBackends + NUM_AUXILIARY_PROCS;
-
-       /* clog.c needs one per CLOG buffer */
-       numLocks += NUM_CLOG_BUFFERS;
-
-       /* subtrans.c needs one per SubTrans buffer */
-       numLocks += NUM_SUBTRANS_BUFFERS;
-
-       /* multixact.c needs two SLRU areas */
-       numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS;
-
-       /* async.c needs one per Async buffer */
-       numLocks += NUM_ASYNC_BUFFERS;
-
-       /* predicate.c needs one per old serializable xid buffer */
-       numLocks += NUM_OLDSERXID_BUFFERS;
-
-       /*
-        * Add any requested by loadable modules; for backwards-compatibility
-        * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if
-        * there are no explicit requests.
-        */
-       lock_addin_request_allowed = false;
-       numLocks += Max(lock_addin_request, NUM_USER_DEFINED_LWLOCKS);
-
-       return numLocks;
-}
-
-
-/*
- * RequestAddinLWLocks
- *             Request that extra LWLocks be allocated for use by
- *             a loadable module.
- *
- * This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
- * shared memory has been allocated, calls will be ignored.  (We could
- * raise an error, but it seems better to make it a no-op, so that
- * libraries containing such calls can be reloaded if needed.)
- */
-void
-RequestAddinLWLocks(int n)
-{
-       if (IsUnderPostmaster || !lock_addin_request_allowed)
-               return;                                 /* too late */
-       lock_addin_request += n;
-}
-
-
-/*
- * Compute shmem space needed for LWLocks.
- */
-Size
-LWLockShmemSize(void)
-{
-       Size            size;
-       int                     numLocks = NumLWLocks();
-
-       /* Space for the LWLock array. */
-       size = mul_size(numLocks, sizeof(LWLockPadded));
-
-       /* Space for dynamic allocation counter, plus room for alignment. */
-       size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);
-
-       return size;
-}
-
-
-/*
- * Allocate shmem space for LWLocks and initialize the locks.
- */
-void
-CreateLWLocks(void)
-{
-       int                     numLocks = NumLWLocks();
-       Size            spaceLocks = LWLockShmemSize();
-       LWLockPadded *lock;
-       int                *LWLockCounter;
-       char       *ptr;
-       int                     id;
-
-       /* Allocate space */
-       ptr = (char *) ShmemAlloc(spaceLocks);
-
-       /* Leave room for dynamic allocation counter */
-       ptr += 2 * sizeof(int);
-
-       /* Ensure desired alignment of LWLock array */
-       ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;
-
-       LWLockArray = (LWLockPadded *) ptr;
-
-       /*
-        * Initialize all LWLocks to "unlocked" state
-        */
-       for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)
-       {
-               SpinLockInit(&lock->lock.mutex);
-               lock->lock.releaseOK = true;
-               lock->lock.exclusive = 0;
-               lock->lock.shared = 0;
-               lock->lock.head = NULL;
-               lock->lock.tail = NULL;
-       }
-
-       /*
-        * Initialize the dynamic-allocation counter, which is stored just before
-        * the first LWLock.
-        */
-       LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
-       LWLockCounter[0] = (int) NumFixedLWLocks;
-       LWLockCounter[1] = numLocks;
-}
-
-
-/*
- * LWLockAssign - assign a dynamically-allocated LWLock number
- *
- * We interlock this using the same spinlock that is used to protect
- * ShmemAlloc().  Interlocking is not really necessary during postmaster
- * startup, but it is needed if any user-defined code tries to allocate
- * LWLocks after startup.
- */
-LWLockId
+FlexLockId
 LWLockAssign(void)
 {
-       LWLockId        result;
-
-       /* use volatile pointer to prevent code rearrangement */
-       volatile int *LWLockCounter;
-
-       LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
-       SpinLockAcquire(ShmemLock);
-       if (LWLockCounter[0] >= LWLockCounter[1])
-       {
-               SpinLockRelease(ShmemLock);
-               elog(ERROR, "no more LWLockIds available");
-       }
-       result = (LWLockId) (LWLockCounter[0]++);
-       SpinLockRelease(ShmemLock);
-       return result;
+       return FlexLockAssign(FLEXLOCK_TYPE_LWLOCK);
 }
 
-
 /*
  * LWLockAcquire - acquire a lightweight lock in the specified mode
  *
@@ -320,9 +101,9 @@ LWLockAssign(void)
  * Side effect: cancel/die interrupts are held off until lock release.
  */
 void
-LWLockAcquire(LWLockId lockid, LWLockMode mode)
+LWLockAcquire(FlexLockId lockid, LWLockMode mode)
 {
-       volatile LWLock *lock = &(LWLockArray[lockid].lock);
+       volatile LWLock *lock = LWLockPointer(lockid);
        PGPROC     *proc = MyProc;
        bool            retry = false;
        int                     extraWaits = 0;
@@ -333,8 +114,8 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
        /* Set up local count state first time through in a given process */
        if (counts_for_pid != MyProcPid)
        {
-               int                *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
-               int                     numLocks = LWLockCounter[1];
+               int                *FlexLockCounter = (int *) ((char *) FlexLockArray - 2 * sizeof(int));
+               int                     numLocks = FlexLockCounter[1];
 
                sh_acquire_counts = calloc(numLocks, sizeof(int));
                ex_acquire_counts = calloc(numLocks, sizeof(int));
@@ -356,10 +137,6 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
         */
        Assert(!(proc == NULL && IsUnderPostmaster));
 
-       /* Ensure we will have room to remember the lock */
-       if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
-               elog(ERROR, "too many LWLocks taken");
-
        /*
         * Lock out cancel/die interrupts until we exit the code section protected
         * by the LWLock.  This ensures that interrupts will not interfere with
@@ -388,11 +165,11 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
                bool            mustwait;
 
                /* Acquire mutex.  Time spent holding mutex should be short! */
-               SpinLockAcquire(&lock->mutex);
+               SpinLockAcquire(&lock->flex.mutex);
 
                /* If retrying, allow LWLockRelease to release waiters again */
                if (retry)
-                       lock->releaseOK = true;
+                       lock->flex.releaseOK = true;
 
                /* If I can get the lock, do so quickly. */
                if (mode == LW_EXCLUSIVE)
@@ -419,72 +196,30 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
                if (!mustwait)
                        break;                          /* got the lock */
 
-               /*
-                * Add myself to wait queue.
-                *
-                * If we don't have a PGPROC structure, there's no way to wait. This
-                * should never occur, since MyProc should only be null during shared
-                * memory initialization.
-                */
-               if (proc == NULL)
-                       elog(PANIC, "cannot wait without a PGPROC structure");
-
-               proc->lwWaiting = true;
-               proc->lwExclusive = (mode == LW_EXCLUSIVE);
-               proc->lwWaitLink = NULL;
-               if (lock->head == NULL)
-                       lock->head = proc;
-               else
-                       lock->tail->lwWaitLink = proc;
-               lock->tail = proc;
+               /* Add myself to wait queue. */
+               FlexLockJoinWaitQueue(lock, (int) mode);
 
                /* Can release the mutex now */
-               SpinLockRelease(&lock->mutex);
-
-               /*
-                * Wait until awakened.
-                *
-                * Since we share the process wait semaphore with the regular lock
-                * manager and ProcWaitForSignal, and we may need to acquire an LWLock
-                * while one of those is pending, it is possible that we get awakened
-                * for a reason other than being signaled by LWLockRelease. If so,
-                * loop back and wait again.  Once we've gotten the LWLock,
-                * re-increment the sema by the number of additional signals received,
-                * so that the lock manager or signal manager will see the received
-                * signal when it next waits.
-                */
-               LOG_LWDEBUG("LWLockAcquire", lockid, "waiting");
+               SpinLockRelease(&lock->flex.mutex);
+
+               /* Wait until awakened. */
+               extraWaits += FlexLockWait(lockid, mode);
 
 #ifdef LWLOCK_STATS
                block_counts[lockid]++;
 #endif
 
-               TRACE_POSTGRESQL_LWLOCK_WAIT_START(lockid, mode);
-
-               for (;;)
-               {
-                       /* "false" means cannot accept cancel/die interrupt here. */
-                       PGSemaphoreLock(&proc->sem, false);
-                       if (!proc->lwWaiting)
-                               break;
-                       extraWaits++;
-               }
-
-               TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(lockid, mode);
-
-               LOG_LWDEBUG("LWLockAcquire", lockid, "awakened");
-
                /* Now loop back and try to acquire lock again. */
                retry = true;
        }
 
        /* We are done updating shared state of the lock itself. */
-       SpinLockRelease(&lock->mutex);
+       SpinLockRelease(&lock->flex.mutex);
 
-       TRACE_POSTGRESQL_LWLOCK_ACQUIRE(lockid, mode);
+       TRACE_POSTGRESQL_FLEXLOCK_ACQUIRE(lockid, mode);
 
        /* Add lock to list of locks held by this backend */
-       held_lwlocks[num_held_lwlocks++] = lockid;
+       FlexLockRemember(lockid);
 
        /*
         * Fix the process wait semaphore's count for any absorbed wakeups.
@@ -501,17 +236,13 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
  * If successful, cancel/die interrupts are held off until lock release.
  */
 bool
-LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
+LWLockConditionalAcquire(FlexLockId lockid, LWLockMode mode)
 {
-       volatile LWLock *lock = &(LWLockArray[lockid].lock);
+       volatile LWLock *lock = LWLockPointer(lockid);
        bool            mustwait;
 
        PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);
 
-       /* Ensure we will have room to remember the lock */
-       if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
-               elog(ERROR, "too many LWLocks taken");
-
        /*
         * Lock out cancel/die interrupts until we exit the code section protected
         * by the LWLock.  This ensures that interrupts will not interfere with
@@ -520,7 +251,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
        HOLD_INTERRUPTS();
 
        /* Acquire mutex.  Time spent holding mutex should be short! */
-       SpinLockAcquire(&lock->mutex);
+       SpinLockAcquire(&lock->flex.mutex);
 
        /* If I can get the lock, do so quickly. */
        if (mode == LW_EXCLUSIVE)
@@ -545,20 +276,20 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
        }
 
        /* We are done updating shared state of the lock itself. */
-       SpinLockRelease(&lock->mutex);
+       SpinLockRelease(&lock->flex.mutex);
 
        if (mustwait)
        {
                /* Failed to get lock, so release interrupt holdoff */
                RESUME_INTERRUPTS();
-               LOG_LWDEBUG("LWLockConditionalAcquire", lockid, "failed");
-               TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(lockid, mode);
+               FlexLockDebug("LWLockConditionalAcquire", lockid, "failed");
+               TRACE_POSTGRESQL_FLEXLOCK_CONDACQUIRE_FAIL(lockid, mode);
        }
        else
        {
                /* Add lock to list of locks held by this backend */
-               held_lwlocks[num_held_lwlocks++] = lockid;
-               TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(lockid, mode);
+               FlexLockRemember(lockid);
+               TRACE_POSTGRESQL_FLEXLOCK_CONDACQUIRE(lockid, mode);
        }
 
        return !mustwait;
@@ -568,32 +299,18 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
  * LWLockRelease - release a previously acquired lock
  */
 void
-LWLockRelease(LWLockId lockid)
+LWLockRelease(FlexLockId lockid)
 {
-       volatile LWLock *lock = &(LWLockArray[lockid].lock);
+       volatile LWLock *lock = LWLockPointer(lockid);
        PGPROC     *head;
        PGPROC     *proc;
-       int                     i;
 
        PRINT_LWDEBUG("LWLockRelease", lockid, lock);
 
-       /*
-        * Remove lock from list of locks held.  Usually, but not always, it will
-        * be the latest-acquired lock; so search array backwards.
-        */
-       for (i = num_held_lwlocks; --i >= 0;)
-       {
-               if (lockid == held_lwlocks[i])
-                       break;
-       }
-       if (i < 0)
-               elog(ERROR, "lock %d is not held", (int) lockid);
-       num_held_lwlocks--;
-       for (; i < num_held_lwlocks; i++)
-               held_lwlocks[i] = held_lwlocks[i + 1];
+       FlexLockForget(lockid);
 
        /* Acquire mutex.  Time spent holding mutex should be short! */
-       SpinLockAcquire(&lock->mutex);
+       SpinLockAcquire(&lock->flex.mutex);
 
        /* Release my hold on lock */
        if (lock->exclusive > 0)
@@ -610,10 +327,10 @@ LWLockRelease(LWLockId lockid)
         * if someone has already awakened waiters that haven't yet acquired the
         * lock.
         */
-       head = lock->head;
+       head = lock->flex.head;
        if (head != NULL)
        {
-               if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
+               if (lock->exclusive == 0 && lock->shared == 0 && lock->flex.releaseOK)
                {
                        /*
                         * Remove the to-be-awakened PGPROCs from the queue.  If the front
@@ -621,17 +338,17 @@ LWLockRelease(LWLockId lockid)
                         * as many waiters as want shared access.
                         */
                        proc = head;
-                       if (!proc->lwExclusive)
+                       if (proc->flWaitMode != LW_EXCLUSIVE)
                        {
-                               while (proc->lwWaitLink != NULL &&
-                                          !proc->lwWaitLink->lwExclusive)
-                                       proc = proc->lwWaitLink;
+                               while (proc->flWaitLink != NULL &&
+                                          proc->flWaitLink->flWaitMode != LW_EXCLUSIVE)
+                                       proc = proc->flWaitLink;
                        }
                        /* proc is now the last PGPROC to be released */
-                       lock->head = proc->lwWaitLink;
-                       proc->lwWaitLink = NULL;
+                       lock->flex.head = proc->flWaitLink;
+                       proc->flWaitLink = NULL;
                        /* prevent additional wakeups until retryer gets to run */
-                       lock->releaseOK = false;
+                       lock->flex.releaseOK = false;
                }
                else
                {
@@ -641,20 +358,20 @@ LWLockRelease(LWLockId lockid)
        }
 
        /* We are done updating shared state of the lock itself. */
-       SpinLockRelease(&lock->mutex);
+       SpinLockRelease(&lock->flex.mutex);
 
-       TRACE_POSTGRESQL_LWLOCK_RELEASE(lockid);
+       TRACE_POSTGRESQL_FLEXLOCK_RELEASE(lockid);
 
        /*
         * Awaken any waiters I removed from the queue.
         */
        while (head != NULL)
        {
-               LOG_LWDEBUG("LWLockRelease", lockid, "release waiter");
+               FlexLockDebug("LWLockRelease", lockid, "release waiter");
                proc = head;
-               head = proc->lwWaitLink;
-               proc->lwWaitLink = NULL;
-               proc->lwWaiting = false;
+               head = proc->flWaitLink;
+               proc->flWaitLink = NULL;
+               proc->flWaitResult = 1;         /* any non-zero value will do */
                PGSemaphoreUnlock(&proc->sem);
        }
 
@@ -664,43 +381,17 @@ LWLockRelease(LWLockId lockid)
        RESUME_INTERRUPTS();
 }
 
-
-/*
- * LWLockReleaseAll - release all currently-held locks
- *
- * Used to clean up after ereport(ERROR). An important difference between this
- * function and retail LWLockRelease calls is that InterruptHoldoffCount is
- * unchanged by this operation.  This is necessary since InterruptHoldoffCount
- * has been set to an appropriate level earlier in error recovery. We could
- * decrement it below zero if we allow it to drop for each released lock!
- */
-void
-LWLockReleaseAll(void)
-{
-       while (num_held_lwlocks > 0)
-       {
-               HOLD_INTERRUPTS();              /* match the upcoming RESUME_INTERRUPTS */
-
-               LWLockRelease(held_lwlocks[num_held_lwlocks - 1]);
-       }
-}
-
-
 /*
  * LWLockHeldByMe - test whether my process currently holds a lock
  *
- * This is meant as debug support only.  We do not distinguish whether the
- * lock is held shared or exclusive.
+ * The following convenience routine might not be worthwhile but for the fact
+ * that we've had a function by this name since long before FlexLocks existed.
+ * Callers who want to check whether an arbitrary FlexLock (that may or may not
+ * be an LWLock) is held can use FlexLockHeldByMe directly.
  */
 bool
-LWLockHeldByMe(LWLockId lockid)
+LWLockHeldByMe(FlexLockId lockid)
 {
-       int                     i;
-
-       for (i = 0; i < num_held_lwlocks; i++)
-       {
-               if (held_lwlocks[i] == lockid)
-                       return true;
-       }
-       return false;
+       AssertMacro(FlexLockArray[lockid].flex.locktype == FLEXLOCK_TYPE_LWLOCK);
+       return FlexLockHeldByMe(lockid);
 }
index 345f6f56a69557269d51382e17f36a8630e5fcc5..15978a4f7e712d45970580f16efb0a6caf955aae 100644 (file)
 #define PredicateLockHashPartition(hashcode) \
        ((hashcode) % NUM_PREDICATELOCK_PARTITIONS)
 #define PredicateLockHashPartitionLock(hashcode) \
-       ((LWLockId) (FirstPredicateLockMgrLock + PredicateLockHashPartition(hashcode)))
+       ((FlexLockId) (FirstPredicateLockMgrLock + PredicateLockHashPartition(hashcode)))
 
 #define NPREDICATELOCKTARGETENTS() \
        mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
@@ -1840,7 +1840,7 @@ PageIsPredicateLocked(Relation relation, BlockNumber blkno)
 {
        PREDICATELOCKTARGETTAG targettag;
        uint32          targettaghash;
-       LWLockId        partitionLock;
+       FlexLockId      partitionLock;
        PREDICATELOCKTARGET *target;
 
        SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
@@ -2073,7 +2073,7 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
                if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
                {
                        uint32          oldtargettaghash;
-                       LWLockId        partitionLock;
+                       FlexLockId      partitionLock;
                        PREDICATELOCK *rmpredlock;
 
                        oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
@@ -2285,7 +2285,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
        PREDICATELOCKTARGET *target;
        PREDICATELOCKTAG locktag;
        PREDICATELOCK *lock;
-       LWLockId        partitionLock;
+       FlexLockId      partitionLock;
        bool            found;
 
        partitionLock = PredicateLockHashPartitionLock(targettaghash);
@@ -2586,10 +2586,10 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
                                                                  bool removeOld)
 {
        uint32          oldtargettaghash;
-       LWLockId        oldpartitionLock;
+       FlexLockId      oldpartitionLock;
        PREDICATELOCKTARGET *oldtarget;
        uint32          newtargettaghash;
-       LWLockId        newpartitionLock;
+       FlexLockId      newpartitionLock;
        bool            found;
        bool            outOfShmem = false;
 
@@ -3578,7 +3578,7 @@ ClearOldPredicateLocks(void)
                        PREDICATELOCKTARGET *target;
                        PREDICATELOCKTARGETTAG targettag;
                        uint32          targettaghash;
-                       LWLockId        partitionLock;
+                       FlexLockId      partitionLock;
 
                        tag = predlock->tag;
                        target = tag.myTarget;
@@ -3656,7 +3656,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
                PREDICATELOCKTARGET *target;
                PREDICATELOCKTARGETTAG targettag;
                uint32          targettaghash;
-               LWLockId        partitionLock;
+               FlexLockId      partitionLock;
 
                nextpredlock = (PREDICATELOCK *)
                        SHMQueueNext(&(sxact->predicateLocks),
@@ -4034,7 +4034,7 @@ static void
 CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
 {
        uint32          targettaghash;
-       LWLockId        partitionLock;
+       FlexLockId      partitionLock;
        PREDICATELOCKTARGET *target;
        PREDICATELOCK *predlock;
        PREDICATELOCK *mypredlock = NULL;
index bcbc80222bb0e90379785f33c9088e7e07d35003..b402999d8ec8c206bd2a5bc39455ae60c7757aea 100644 (file)
@@ -360,9 +360,9 @@ InitProcess(void)
        /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
        if (IsAutoVacuumWorkerProcess())
                MyPgXact->vacuumFlags |= PROC_IS_AUTOVACUUM;
-       MyProc->lwWaiting = false;
-       MyProc->lwExclusive = false;
-       MyProc->lwWaitLink = NULL;
+       MyProc->flWaitResult = 0;
+       MyProc->flWaitMode = 0;
+       MyProc->flWaitLink = NULL;
        MyProc->waitLock = NULL;
        MyProc->waitProcLock = NULL;
 #ifdef USE_ASSERT_CHECKING
@@ -515,9 +515,9 @@ InitAuxiliaryProcess(void)
        MyProc->roleId = InvalidOid;
        MyPgXact->inCommit = false;
        MyPgXact->vacuumFlags = 0;
-       MyProc->lwWaiting = false;
-       MyProc->lwExclusive = false;
-       MyProc->lwWaitLink = NULL;
+       MyProc->flWaitMode = 0;
+       MyProc->flWaitResult = 0;
+       MyProc->flWaitLink = NULL;
        MyProc->waitLock = NULL;
        MyProc->waitProcLock = NULL;
 #ifdef USE_ASSERT_CHECKING
@@ -643,7 +643,7 @@ IsWaitingForLock(void)
 void
 LockWaitCancel(void)
 {
-       LWLockId        partitionLock;
+       FlexLockId      partitionLock;
 
        /* Nothing to do if we weren't waiting for a lock */
        if (lockAwaited == NULL)
@@ -754,11 +754,11 @@ ProcKill(int code, Datum arg)
 #endif
 
        /*
-        * Release any LW locks I am holding.  There really shouldn't be any, but
-        * it's cheap to check again before we cut the knees off the LWLock
+        * Release any flex locks I am holding.  There really shouldn't be any, but
+        * it's cheap to check again before we cut the knees off the flex lock
         * facility by releasing our PGPROC ...
         */
-       LWLockReleaseAll();
+       FlexLockReleaseAll();
 
        /* Release ownership of the process's latch, too */
        DisownLatch(&MyProc->procLatch);
@@ -815,8 +815,8 @@ AuxiliaryProcKill(int code, Datum arg)
 
        Assert(MyProc == auxproc);
 
-       /* Release any LW locks I am holding (see notes above) */
-       LWLockReleaseAll();
+       /* Release any flex locks I am holding (see notes above) */
+       FlexLockReleaseAll();
 
        /* Release ownership of the process's latch, too */
        DisownLatch(&MyProc->procLatch);
@@ -901,7 +901,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
        LOCK       *lock = locallock->lock;
        PROCLOCK   *proclock = locallock->proclock;
        uint32          hashcode = locallock->hashcode;
-       LWLockId        partitionLock = LockHashPartitionLock(hashcode);
+       FlexLockId      partitionLock = LockHashPartitionLock(hashcode);
        PROC_QUEUE *waitQueue = &(lock->waitProcs);
        LOCKMASK        myHeldLocks = MyProc->heldLocks;
        bool            early_deadlock = false;
index 293fb0363f8fbda2acad821490fa86821bd72a59..1a19e368263cdaf791de407be01a4d5bf5f387b0 100755 (executable)
@@ -19,7 +19,7 @@
 INTENTIONALLY_NOT_INCLUDED="autocommit debug_deadlocks \
 is_superuser lc_collate lc_ctype lc_messages lc_monetary lc_numeric lc_time \
 pre_auth_delay role seed server_encoding server_version server_version_int \
-session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_lwlocks \
+session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_flexlocks \
 trace_notify trace_userlocks transaction_isolation transaction_read_only \
 zero_damaged_pages"
 
index da7b6d4e90b41347e93631ad5826bc64f5032c75..52de233ccf99322d14b8f5c4cf98f398ba82ee07 100644 (file)
@@ -59,6 +59,7 @@
 #include "replication/walreceiver.h"
 #include "replication/walsender.h"
 #include "storage/bufmgr.h"
+#include "storage/flexlock_internals.h"
 #include "storage/standby.h"
 #include "storage/fd.h"
 #include "storage/predicate.h"
@@ -1071,12 +1072,12 @@ static struct config_bool ConfigureNamesBool[] =
                NULL, NULL, NULL
        },
        {
-               {"trace_lwlocks", PGC_SUSET, DEVELOPER_OPTIONS,
+               {"trace_flexlocks", PGC_SUSET, DEVELOPER_OPTIONS,
                        gettext_noop("No description available."),
                        NULL,
                        GUC_NOT_IN_SAMPLE
                },
-               &Trace_lwlocks,
+               &Trace_flexlocks,
                false,
                NULL, NULL, NULL
        },
index 71c5ab0bee75ee9763a432be8957270d65dd1ece..5b9cfe695a4a5568d488dfc869a5cc42452147aa 100644 (file)
@@ -15,8 +15,8 @@
  * in probe definitions, as they cause compilation errors on Mac OS X 10.5.
  */
 #define LocalTransactionId unsigned int
-#define LWLockId int
-#define LWLockMode int
+#define FlexLockId int
+#define FlexLockMode int
 #define LOCKMODE int
 #define BlockNumber unsigned int
 #define Oid unsigned int
@@ -29,12 +29,12 @@ provider postgresql {
        probe transaction__commit(LocalTransactionId);
        probe transaction__abort(LocalTransactionId);
 
-       probe lwlock__acquire(LWLockId, LWLockMode);
-       probe lwlock__release(LWLockId);
-       probe lwlock__wait__start(LWLockId, LWLockMode);
-       probe lwlock__wait__done(LWLockId, LWLockMode);
-       probe lwlock__condacquire(LWLockId, LWLockMode);
-       probe lwlock__condacquire__fail(LWLockId, LWLockMode);
+       probe flexlock__acquire(FlexLockId, FlexLockMode);
+       probe flexlock__release(FlexLockId);
+       probe flexlock__wait__start(FlexLockId, FlexLockMode);
+       probe flexlock__wait__done(FlexLockId, FlexLockMode);
+       probe flexlock__condacquire(FlexLockId, FlexLockMode);
+       probe flexlock__condacquire__fail(FlexLockId, FlexLockMode);
 
        probe lock__wait__start(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE);
        probe lock__wait__done(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE);
index e48743f55d7e6e3e00df0beb6f0c6d7c13dacbd4..680a87f550338c2aa2e3f9055ac45a4c93876b1c 100644 (file)
@@ -55,7 +55,7 @@ typedef enum
  */
 typedef struct SlruSharedData
 {
-       LWLockId        ControlLock;
+       FlexLockId      ControlLock;
 
        /* Number of buffers managed by this SLRU structure */
        int                     num_slots;
@@ -69,7 +69,7 @@ typedef struct SlruSharedData
        bool       *page_dirty;
        int                *page_number;
        int                *page_lru_count;
-       LWLockId   *buffer_locks;
+       FlexLockId *buffer_locks;
 
        /*
         * Optional array of WAL flush LSNs associated with entries in the SLRU
@@ -136,7 +136,7 @@ typedef SlruCtlData *SlruCtl;
 
 extern Size SimpleLruShmemSize(int nslots, int nlsns);
 extern void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
-                         LWLockId ctllock, const char *subdir);
+                         FlexLockId ctllock, const char *subdir);
 extern int     SimpleLruZeroPage(SlruCtl ctl, int pageno);
 extern int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
                                  TransactionId xid);
index 6c8e31269c51d5f4daa2be1f4ef30f38604c5a59..d3b74db5fa1001c590beeb93a88be4a0b849434a 100644 (file)
@@ -49,9 +49,9 @@
 #define SEQ_MINVALUE   (-SEQ_MAXVALUE)
 
 /*
- * Number of spare LWLocks to allocate for user-defined add-on code.
+ * Number of spare FlexLocks to allocate for user-defined add-on code.
  */
-#define NUM_USER_DEFINED_LWLOCKS       4
+#define NUM_USER_DEFINED_FLEXLOCKS     4
 
 /*
  * Define this if you want to allow the lo_import and lo_export SQL
index b7d4ea53a4d1749b93e464c5e941fa0f52b1f3f3..ac7f66501f561b09e4206645a62d9954bdeecb73 100644 (file)
@@ -103,7 +103,7 @@ typedef struct buftag
 #define BufTableHashPartition(hashcode) \
        ((hashcode) % NUM_BUFFER_PARTITIONS)
 #define BufMappingPartitionLock(hashcode) \
-       ((LWLockId) (FirstBufMappingLock + BufTableHashPartition(hashcode)))
+       ((FlexLockId) (FirstBufMappingLock + BufTableHashPartition(hashcode)))
 
 /*
  *     BufferDesc -- shared descriptor/state data for a single shared buffer.
@@ -143,8 +143,8 @@ typedef struct sbufdesc
        int                     buf_id;                 /* buffer's index number (from 0) */
        int                     freeNext;               /* link in freelist chain */
 
-       LWLockId        io_in_progress_lock;    /* to wait for I/O to complete */
-       LWLockId        content_lock;   /* to lock access to buffer contents */
+       FlexLockId      io_in_progress_lock;    /* to wait for I/O to complete */
+       FlexLockId      content_lock;   /* to lock access to buffer contents */
 } BufferDesc;
 
 #define BufferDescriptorGetBuffer(bdesc) ((bdesc)->buf_id + 1)
diff --git a/src/include/storage/flexlock.h b/src/include/storage/flexlock.h
new file mode 100644 (file)
index 0000000..612c21a
--- /dev/null
@@ -0,0 +1,102 @@
+/*-------------------------------------------------------------------------
+ *
+ * flexlock.h
+ *       Flex lock manager
+ *
+ * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/storage/flexlock.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef FLEXLOCK_H
+#define FLEXLOCK_H
+
+/*
+ * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
+ * here, but we need them to set up enum FlexLockId correctly, and having
+ * this file include lock.h or bufmgr.h would be backwards.
+ */
+
+/* Number of partitions of the shared buffer mapping hashtable */
+#define NUM_BUFFER_PARTITIONS  16
+
+/* Number of partitions the shared lock tables are divided into */
+#define LOG2_NUM_LOCK_PARTITIONS  4
+#define NUM_LOCK_PARTITIONS  (1 << LOG2_NUM_LOCK_PARTITIONS)
+
+/* Number of partitions the shared predicate lock tables are divided into */
+#define LOG2_NUM_PREDICATELOCK_PARTITIONS  4
+#define NUM_PREDICATELOCK_PARTITIONS  (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
+
+/*
+ * We have a number of predefined FlexLocks, plus a bunch of locks that are
+ * dynamically assigned (e.g., for shared buffers).  The FlexLock structures
+ * live in shared memory (since they contain shared data) and are identified
+ * by values of this enumerated type.  We abuse the notion of an enum somewhat
+ * by allowing values not listed in the enum declaration to be assigned.
+ * The extra value MaxDynamicFlexLock is there to keep the compiler from
+ * deciding that the enum can be represented as char or short ...
+ *
+ * If you remove a lock, please replace it with a placeholder. This retains
+ * the lock numbering, which is helpful for DTrace and other external
+ * debugging scripts.
+ */
+typedef enum FlexLockId
+{
+       BufFreelistLock,
+       ShmemIndexLock,
+       OidGenLock,
+       XidGenLock,
+       ProcArrayLock,
+       SInvalReadLock,
+       SInvalWriteLock,
+       WALInsertLock,
+       WALWriteLock,
+       ControlFileLock,
+       CheckpointLock,
+       CLogControlLock,
+       SubtransControlLock,
+       MultiXactGenLock,
+       MultiXactOffsetControlLock,
+       MultiXactMemberControlLock,
+       RelCacheInitLock,
+       BgWriterCommLock,
+       TwoPhaseStateLock,
+       TablespaceCreateLock,
+       BtreeVacuumLock,
+       AddinShmemInitLock,
+       AutovacuumLock,
+       AutovacuumScheduleLock,
+       SyncScanLock,
+       RelationMappingLock,
+       AsyncCtlLock,
+       AsyncQueueLock,
+       SerializableXactHashLock,
+       SerializableFinishedListLock,
+       SerializablePredicateLockListLock,
+       OldSerXidLock,
+       SyncRepLock,
+       /* Individual lock IDs end here */
+       FirstBufMappingLock,
+       FirstLockMgrLock = FirstBufMappingLock + NUM_BUFFER_PARTITIONS,
+       FirstPredicateLockMgrLock = FirstLockMgrLock + NUM_LOCK_PARTITIONS,
+
+       /* must be last except for MaxDynamicFlexLock: */
+       NumFixedFlexLocks = FirstPredicateLockMgrLock + NUM_PREDICATELOCK_PARTITIONS,
+
+       MaxDynamicFlexLock = 1000000000
+} FlexLockId;
+
+/* Shared memory setup. */
+extern int     NumFlexLocks(void);
+extern Size FlexLockShmemSize(void);
+extern void RequestAddinFlexLocks(int n);
+extern void CreateFlexLocks(void);
+
+/* Error recovery and debugging support functions. */
+extern void FlexLockReleaseAll(void);
+extern bool FlexLockHeldByMe(FlexLockId id);
+
+#endif   /* FLEXLOCK_H */
diff --git a/src/include/storage/flexlock_internals.h b/src/include/storage/flexlock_internals.h
new file mode 100644 (file)
index 0000000..4fcb342
--- /dev/null
@@ -0,0 +1,86 @@
+/*-------------------------------------------------------------------------
+ *
+ * flexlock_internals.h
+ *       Flex lock internals.  Only files which implement a FlexLock
+ *    type should need to include this.  Merging this with flexlock.h
+ *    creates a circular header dependency, but even if it didn't, this
+ *    is cleaner.
+ *
+ * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/storage/flexlock_internals.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef FLEXLOCK_INTERNALS_H
+#define FLEXLOCK_INTERNALS_H
+
+#include "storage/proc.h"
+#include "storage/s_lock.h"
+
+/*
+ * Individual FlexLock implementations each get this many bytes to store
+ * its state; of course, a given implementation could also allocate additional
+ * shmem elsewhere, but we provide this many bytes within the array.  The
+ * header fields common to all FlexLock types are included in this number.
+ * A power of two should probably be chosen, to avoid alignment issues and
+ * cache line splitting.  It might be useful to increase this on systems where
+ * a cache line is more than 64 bytes in size.
+ */
+#define FLEX_LOCK_BYTES                64
+
+typedef struct FlexLock
+{
+       char            locktype;               /* see FLEXLOCK_TYPE_* constants */
+       slock_t         mutex;                  /* Protects FlexLock state and wait queues */
+       bool            releaseOK;              /* T if ok to release waiters */
+       PGPROC     *head;                       /* head of list of waiting PGPROCs */
+       PGPROC     *tail;                       /* tail of list of waiting PGPROCs */
+       /* tail is undefined when head is NULL */
+} FlexLock;
+
+#define FLEXLOCK_TYPE_LWLOCK                   'l'
+
+typedef union FlexLockPadded
+{
+       FlexLock        flex;
+       char            pad[FLEX_LOCK_BYTES];
+} FlexLockPadded;
+
+extern FlexLockPadded *FlexLockArray;
+
+extern FlexLockId FlexLockAssign(char locktype);
+extern void FlexLockRemember(FlexLockId id);
+extern void FlexLockForget(FlexLockId id);
+extern int FlexLockWait(FlexLockId id, int mode);
+
+/*
+ * We must join the wait queue while holding the spinlock, so we define this
+ * as a macro, for speed.
+ */
+#define FlexLockJoinWaitQueue(lock, mode) \
+       do { \
+               Assert(MyProc != NULL); \
+               MyProc->flWaitResult = 0; \
+               MyProc->flWaitMode = mode; \
+               MyProc->flWaitLink = NULL; \
+               if (lock->flex.head == NULL) \
+                       lock->flex.head = MyProc; \
+               else \
+                       lock->flex.tail->flWaitLink = MyProc; \
+               lock->flex.tail = MyProc; \
+       } while (0)
+
+#ifdef LOCK_DEBUG
+extern bool    Trace_flexlocks;
+#define FlexLockDebug(where, id, msg) \
+       do { \
+               if (Trace_flexlocks) \
+                       elog(LOG, "%s(%d): %s", where, (int) id, msg); \
+       } while (0)
+#else
+#define FlexLockDebug(where, id, msg)
+#endif
+
+#endif   /* FLEXLOCK_H */
index e106ad54019bb915edf84cfca014cae0eebdcaa6..ba87db2817f4bda7cefc30eb3b6ab885d63b9fc7 100644 (file)
@@ -471,7 +471,7 @@ typedef enum
 #define LockHashPartition(hashcode) \
        ((hashcode) % NUM_LOCK_PARTITIONS)
 #define LockHashPartitionLock(hashcode) \
-       ((LWLockId) (FirstLockMgrLock + LockHashPartition(hashcode)))
+       ((FlexLockId) (FirstLockMgrLock + LockHashPartition(hashcode)))
 
 
 /*
index 438a48d8dc99578af487e68a738e17898f91d1a2..f68cddc0deec95add899399d645b1d8fe607de1b 100644 (file)
 #ifndef LWLOCK_H
 #define LWLOCK_H
 
-/*
- * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
- * here, but we need them to set up enum LWLockId correctly, and having
- * this file include lock.h or bufmgr.h would be backwards.
- */
-
-/* Number of partitions of the shared buffer mapping hashtable */
-#define NUM_BUFFER_PARTITIONS  16
-
-/* Number of partitions the shared lock tables are divided into */
-#define LOG2_NUM_LOCK_PARTITIONS  4
-#define NUM_LOCK_PARTITIONS  (1 << LOG2_NUM_LOCK_PARTITIONS)
-
-/* Number of partitions the shared predicate lock tables are divided into */
-#define LOG2_NUM_PREDICATELOCK_PARTITIONS  4
-#define NUM_PREDICATELOCK_PARTITIONS  (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
-
-/*
- * We have a number of predefined LWLocks, plus a bunch of LWLocks that are
- * dynamically assigned (e.g., for shared buffers).  The LWLock structures
- * live in shared memory (since they contain shared data) and are identified
- * by values of this enumerated type.  We abuse the notion of an enum somewhat
- * by allowing values not listed in the enum declaration to be assigned.
- * The extra value MaxDynamicLWLock is there to keep the compiler from
- * deciding that the enum can be represented as char or short ...
- *
- * If you remove a lock, please replace it with a placeholder. This retains
- * the lock numbering, which is helpful for DTrace and other external
- * debugging scripts.
- */
-typedef enum LWLockId
-{
-       BufFreelistLock,
-       ShmemIndexLock,
-       OidGenLock,
-       XidGenLock,
-       ProcArrayLock,
-       SInvalReadLock,
-       SInvalWriteLock,
-       WALInsertLock,
-       WALWriteLock,
-       ControlFileLock,
-       CheckpointLock,
-       CLogControlLock,
-       SubtransControlLock,
-       MultiXactGenLock,
-       MultiXactOffsetControlLock,
-       MultiXactMemberControlLock,
-       RelCacheInitLock,
-       BgWriterCommLock,
-       TwoPhaseStateLock,
-       TablespaceCreateLock,
-       BtreeVacuumLock,
-       AddinShmemInitLock,
-       AutovacuumLock,
-       AutovacuumScheduleLock,
-       SyncScanLock,
-       RelationMappingLock,
-       AsyncCtlLock,
-       AsyncQueueLock,
-       SerializableXactHashLock,
-       SerializableFinishedListLock,
-       SerializablePredicateLockListLock,
-       OldSerXidLock,
-       SyncRepLock,
-       /* Individual lock IDs end here */
-       FirstBufMappingLock,
-       FirstLockMgrLock = FirstBufMappingLock + NUM_BUFFER_PARTITIONS,
-       FirstPredicateLockMgrLock = FirstLockMgrLock + NUM_LOCK_PARTITIONS,
-
-       /* must be last except for MaxDynamicLWLock: */
-       NumFixedLWLocks = FirstPredicateLockMgrLock + NUM_PREDICATELOCK_PARTITIONS,
-
-       MaxDynamicLWLock = 1000000000
-} LWLockId;
-
+#include "storage/flexlock.h"
 
 typedef enum LWLockMode
 {
@@ -97,22 +22,10 @@ typedef enum LWLockMode
        LW_SHARED
 } LWLockMode;
 
-
-#ifdef LOCK_DEBUG
-extern bool Trace_lwlocks;
-#endif
-
-extern LWLockId LWLockAssign(void);
-extern void LWLockAcquire(LWLockId lockid, LWLockMode mode);
-extern bool LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode);
-extern void LWLockRelease(LWLockId lockid);
-extern void LWLockReleaseAll(void);
-extern bool LWLockHeldByMe(LWLockId lockid);
-
-extern int     NumLWLocks(void);
-extern Size LWLockShmemSize(void);
-extern void CreateLWLocks(void);
-
-extern void RequestAddinLWLocks(int n);
+extern FlexLockId LWLockAssign(void);
+extern void LWLockAcquire(FlexLockId lockid, LWLockMode mode);
+extern bool LWLockConditionalAcquire(FlexLockId lockid, LWLockMode mode);
+extern void LWLockRelease(FlexLockId lockid);
+extern bool LWLockHeldByMe(FlexLockId lockid);
 
 #endif   /* LWLOCK_H */
index c7cddc79931e8f3c62ee497fde0e344530e3c962..1f3a71defe0aae89b6e411ba87af655cc8c6ecbd 100644 (file)
@@ -99,10 +99,10 @@ struct PGPROC
         */
        bool            recoveryConflictPending;
 
-       /* Info about LWLock the process is currently waiting for, if any. */
-       bool            lwWaiting;              /* true if waiting for an LW lock */
-       bool            lwExclusive;    /* true if waiting for exclusive access */
-       struct PGPROC *lwWaitLink;      /* next waiter for same LW lock */
+       /* Info about FlexLock the process is currently waiting for, if any. */
+       int                     flWaitResult;   /* result of wait, or 0 if still waiting */
+       int                     flWaitMode;             /* lock mode sought */
+       struct PGPROC *flWaitLink;      /* next waiter for same FlexLock */
 
        /* Info about lock the process is currently waiting for, if any. */
        /* waitLock and waitProcLock are NULL if not currently waiting. */
@@ -132,7 +132,7 @@ struct PGPROC
        struct XidCache subxids;        /* cache for subtransaction XIDs */
 
        /* Per-backend LWLock.  Protects fields below. */
-       LWLockId        backendLock;    /* protects the fields below */
+       FlexLockId      backendLock;    /* protects the fields below */
 
        /* Lock manager data, recording fast-path locks taken by this backend. */
        uint64          fpLockBits;             /* lock modes held for each fast-path slot */