Fix race condition where heap_delete() fails to pin VM page.
authorJeff Davis <jdavis@postgresql.org>
Thu, 22 Sep 2022 17:58:49 +0000 (10:58 -0700)
committerJeff Davis <jdavis@postgresql.org>
Thu, 22 Sep 2022 18:04:00 +0000 (11:04 -0700)
Similar to 5f12bc94dc, the code must re-check PageIsAllVisible() after
buffer lock is re-acquired. Backpatching to the same version, 12.

Discussion: https://postgr.es/m/CAEP4nAw9jYQDKd_5Y+-s2E4YiUJq1vqiikFjYGpLShtp-K3gag@mail.gmail.com
Reported-by: Robins Tharakan
Reviewed-by: Robins Tharakan
Backpatch-through: 12

src/backend/access/heap/heapam.c

index eb811d751e5534cd83c691e28f2feb4f80b56a3c..75b214824dfa90dc68621f55b93eb1bd2c72f835 100644 (file)
@@ -2711,6 +2711,15 @@ heap_delete(Relation relation, ItemPointer tid,
 
        LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 
+       lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
+       Assert(ItemIdIsNormal(lp));
+
+       tp.t_tableOid = RelationGetRelid(relation);
+       tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
+       tp.t_len = ItemIdGetLength(lp);
+       tp.t_self = *tid;
+
+l1:
        /*
         * If we didn't pin the visibility map page and the page has become all
         * visible while we were busy locking the buffer, we'll have to unlock and
@@ -2724,15 +2733,6 @@ heap_delete(Relation relation, ItemPointer tid,
                LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
        }
 
-       lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
-       Assert(ItemIdIsNormal(lp));
-
-       tp.t_tableOid = RelationGetRelid(relation);
-       tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
-       tp.t_len = ItemIdGetLength(lp);
-       tp.t_self = *tid;
-
-l1:
        result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
 
        if (result == TM_Invisible)
@@ -2791,8 +2791,12 @@ l1:
                                 * If xwait had just locked the tuple then some other xact
                                 * could update this tuple before we get to this point.  Check
                                 * for xmax change, and start over if so.
+                                *
+                                * We also must start over if we didn't pin the VM page, and
+                                * the page has become all visible.
                                 */
-                               if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
+                               if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
+                                       xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
                                        !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
                                                                                 xwait))
                                        goto l1;
@@ -2824,8 +2828,12 @@ l1:
                         * xwait is done, but if xwait had just locked the tuple then some
                         * other xact could update this tuple before we get to this point.
                         * Check for xmax change, and start over if so.
+                        *
+                        * We also must start over if we didn't pin the VM page, and the
+                        * page has become all visible.
                         */
-                       if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
+                       if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
+                               xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
                                !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
                                                                         xwait))
                                goto l1;