Assert(BufferIsValid(buffer));
+ /*
+ * Construct shared cache inval if necessary. Because we pass a tuple
+ * version without our own inplace changes or inplace changes other
+ * sessions complete while we wait for locks, inplace update mustn't
+ * change catcache lookup keys. But we aren't bothering with index
+ * updates either, so that's true a fortiori. After LockBuffer(), it
+ * would be too late, because this might reach a
+ * CatalogCacheInitializeCache() that locks "buffer".
+ */
+ CacheInvalidateHeapTupleInplace(relation, oldtup_ptr, NULL);
+
LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
if (!ret)
{
UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
+ ForgetInplace_Inval();
InvalidateCatalogSnapshot();
}
return ret;
dst = (char *) htup + htup->t_hoff;
src = (char *) tuple->t_data + tuple->t_data->t_hoff;
- /*
- * Construct shared cache inval if necessary. Note that because we only
- * pass the new version of the tuple, this mustn't be used for any
- * operations that could change catcache lookup keys. But we aren't
- * bothering with index updates either, so that's true a fortiori.
- */
- CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
-
/* Like RecordTransactionCommit(), log only if needed */
if (XLogStandbyInfoActive())
nmsgs = inplaceGetInvalidationMessages(&invalMessages,
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
+ ForgetInplace_Inval();
}
#define FRM_NOOP 0x0001
inplaceInvalInfo = NULL;
}
+/*
+ * ForgetInplace_Inval
+ * Alternative to PreInplace_Inval()+AtInplace_Inval(): discard queued-up
+ * invalidations. This lets inplace update enumerate invalidations
+ * optimistically, before locking the buffer.
+ */
+void
+ForgetInplace_Inval(void)
+{
+ inplaceInvalInfo = NULL;
+}
+
/*
* AtEOSubXact_Inval
* Process queued-up invalidation messages at end of subtransaction.