Fix inplace update buffer self-deadlock.
authorNoah Misch <noah@leadboat.com>
Sat, 2 Nov 2024 16:04:56 +0000 (09:04 -0700)
committerNoah Misch <noah@leadboat.com>
Sat, 2 Nov 2024 16:04:56 +0000 (09:04 -0700)
A CacheInvalidateHeapTuple* callee might call
CatalogCacheInitializeCache(), which needs a relcache entry.  Acquiring
a valid relcache entry might scan pg_class.  Hence, to prevent
undetected LWLock self-deadlock, CacheInvalidateHeapTuple* callers must
not hold BUFFER_LOCK_EXCLUSIVE on buffers of pg_class.  Move the
CacheInvalidateHeapTupleInplace() before the BUFFER_LOCK_EXCLUSIVE.  No
back-patch, since I've reverted commit
243e9b40f1b2dd09d6e5bf91ebf6e822a2cd3704 from non-master branches.

Reported by Alexander Lakhin.  Reviewed by Alexander Lakhin.

Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com

src/backend/access/heap/heapam.c
src/backend/utils/cache/inval.c
src/include/utils/inval.h

index 1748eafa100bd4e98327e18bff1f080fc73d0f38..9a31cdcd096814744de6bd9511923900fad29774 100644 (file)
@@ -6214,6 +6214,17 @@ heap_inplace_lock(Relation relation,
 
    Assert(BufferIsValid(buffer));
 
+   /*
+    * Construct shared cache inval if necessary.  Because we pass a tuple
+    * version without our own inplace changes or inplace changes other
+    * sessions complete while we wait for locks, inplace update mustn't
+    * change catcache lookup keys.  But we aren't bothering with index
+    * updates either, so that's true a fortiori.  After LockBuffer(), it
+    * would be too late, because this might reach a
+    * CatalogCacheInitializeCache() that locks "buffer".
+    */
+   CacheInvalidateHeapTupleInplace(relation, oldtup_ptr, NULL);
+
    LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
    LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 
@@ -6309,6 +6320,7 @@ heap_inplace_lock(Relation relation,
    if (!ret)
    {
        UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
+       ForgetInplace_Inval();
        InvalidateCatalogSnapshot();
    }
    return ret;
@@ -6345,14 +6357,6 @@ heap_inplace_update_and_unlock(Relation relation,
    dst = (char *) htup + htup->t_hoff;
    src = (char *) tuple->t_data + tuple->t_data->t_hoff;
 
-   /*
-    * Construct shared cache inval if necessary.  Note that because we only
-    * pass the new version of the tuple, this mustn't be used for any
-    * operations that could change catcache lookup keys.  But we aren't
-    * bothering with index updates either, so that's true a fortiori.
-    */
-   CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
-
    /* Like RecordTransactionCommit(), log only if needed */
    if (XLogStandbyInfoActive())
        nmsgs = inplaceGetInvalidationMessages(&invalMessages,
@@ -6481,6 +6485,7 @@ heap_inplace_unlock(Relation relation,
 {
    LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
+   ForgetInplace_Inval();
 }
 
 #define        FRM_NOOP                0x0001
index 986850ccda95c56c58f664161699e99bf0cb7954..fc972ed17d60dd8ee29d382444a6c9bf618fb207 100644 (file)
@@ -1201,6 +1201,18 @@ AtInplace_Inval(void)
    inplaceInvalInfo = NULL;
 }
 
+/*
+ * ForgetInplace_Inval
+ *     Alternative to PreInplace_Inval()+AtInplace_Inval(): discard queued-up
+ *     invalidations.  This lets inplace update enumerate invalidations
+ *     optimistically, before locking the buffer.
+ */
+void
+ForgetInplace_Inval(void)
+{
+   inplaceInvalInfo = NULL;
+}
+
 /*
  * AtEOSubXact_Inval
  *     Process queued-up invalidation messages at end of subtransaction.
index 3390e7ab8af9cf724339a2d756f531d312ae022f..299cd7585f23ee05cb6151de1dc917f56120503e 100644 (file)
@@ -30,6 +30,7 @@ extern void AtEOXact_Inval(bool isCommit);
 
 extern void PreInplace_Inval(void);
 extern void AtInplace_Inval(void);
+extern void ForgetInplace_Inval(void);
 
 extern void AtEOSubXact_Inval(bool isCommit);