/* scan the relation */
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- HTSU_Result htsu;
+ TM_Result htsu;
TransactionId xmax;
uint16 infomask;
infomask = tuple->t_data->t_infomask;
/*
- * A tuple is locked if HTSU returns BeingUpdated.
+ * A tuple is locked if HTSU returns BeingModified.
*/
- if (htsu == HeapTupleBeingUpdated)
+ if (htsu == TM_BeingModified)
{
char **values;
LockTupleMode mode, bool is_update,
TransactionId *result_xmax, uint16 *result_infomask,
uint16 *result_infomask2);
-static HTSU_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
+static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
ItemPointer ctid, TransactionId xid,
LockTupleMode mode);
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
Snapshot snapshot,
HeapTuple tuple,
Buffer *userbuf,
- bool keep_buf,
Relation stats_relation)
{
ItemPointer tid = &(tuple->t_self);
if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- if (keep_buf)
- *userbuf = buffer;
- else
- {
- ReleaseBuffer(buffer);
- *userbuf = InvalidBuffer;
- }
+ ReleaseBuffer(buffer);
+ *userbuf = InvalidBuffer;
tuple->t_data = NULL;
return false;
}
if (!ItemIdIsNormal(lp))
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- if (keep_buf)
- *userbuf = buffer;
- else
- {
- ReleaseBuffer(buffer);
- *userbuf = InvalidBuffer;
- }
+ ReleaseBuffer(buffer);
+ *userbuf = InvalidBuffer;
tuple->t_data = NULL;
return false;
}
return true;
}
- /* Tuple failed time qual, but maybe caller wants to see it anyway. */
- if (keep_buf)
- *userbuf = buffer;
- else
- {
- ReleaseBuffer(buffer);
- *userbuf = InvalidBuffer;
- }
+ /* Tuple failed time qual */
+ ReleaseBuffer(buffer);
+ *userbuf = InvalidBuffer;
return false;
}
* The new tuple is stamped with current transaction ID and the specified
* command ID.
*
- * If the HEAP_INSERT_SKIP_WAL option is specified, the new tuple is not
- * logged in WAL, even for a non-temp relation. Safe usage of this behavior
- * requires that we arrange that all new tuples go into new pages not
- * containing any tuples from other transactions, and that the relation gets
- * fsync'd before commit. (See also heap_sync() comments)
- *
- * The HEAP_INSERT_SKIP_FSM option is passed directly to
- * RelationGetBufferForTuple, which see for more info.
- *
- * HEAP_INSERT_FROZEN should only be specified for inserts into
- * relfilenodes created during the current subtransaction and when
- * there are no prior snapshots or pre-existing portals open.
- * This causes rows to be frozen, which is an MVCC violation and
- * requires explicit options chosen by user.
- *
- * HEAP_INSERT_SPECULATIVE is used on so-called "speculative insertions",
- * which can be backed out afterwards without aborting the whole transaction.
- * Other sessions can wait for the speculative insertion to be confirmed,
- * turning it into a regular tuple, or aborted, as if it never existed.
- * Speculatively inserted tuples behave as "value locks" of short duration,
- * used to implement INSERT .. ON CONFLICT.
- *
- * HEAP_INSERT_NO_LOGICAL force-disables the emitting of logical decoding
- * information for the tuple. This should solely be used during table rewrites
- * where RelationIsLogicallyLogged(relation) is not yet accurate for the new
- * relation.
- *
- * Note that most of these options will be applied when inserting into the
- * heap's TOAST table, too, if the tuple requires any out-of-line data. Only
- * HEAP_INSERT_SPECULATIVE is explicitly ignored, as the toast data does not
- * partake in speculative insertion.
+ * See table_insert for comments about most of the input flags, except that
+ * this routine directly takes a tuple rather than a slot.
*
- * The BulkInsertState object (if any; bistate can be NULL for default
- * behavior) is also just passed through to RelationGetBufferForTuple.
+ * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
+ * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
+ * implement table_insert_speculative().
*
* On return the header fields of *tup are updated to match the stored tuple;
* in particular tup->t_self receives the actual TID where the tuple was
/*
* heap_delete - delete a tuple
*
- * NB: do not call this directly unless you are prepared to deal with
- * concurrent-update conditions. Use simple_heap_delete instead.
+ * See table_delete() for an explanation of the parameters, except that this
+ * routine directly takes a tuple rather than a slot.
*
- * relation - table to be modified (caller must hold suitable lock)
- * tid - TID of tuple to be deleted
- * cid - delete command ID (used for visibility test, and stored into
- * cmax if successful)
- * crosscheck - if not InvalidSnapshot, also check tuple against this
- * wait - true if should wait for any conflicting update to commit/abort
- * hufd - output parameter, filled in failure cases (see below)
- * changingPart - true iff the tuple is being moved to another partition
- * table due to an update of the partition key. Otherwise, false.
- *
- * Normal, successful return value is HeapTupleMayBeUpdated, which
- * actually means we did delete it. Failure return codes are
- * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
- * (the last only possible if wait == false).
- *
- * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
- * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
- * (the last only for HeapTupleSelfUpdated, since we
- * cannot obtain cmax from a combocid generated by another transaction).
- * See comments for struct HeapUpdateFailureData for additional info.
+ * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
+ * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
+ * only for TM_SelfModified, since we cannot obtain cmax from a combocid
+ * generated by another transaction).
*/
-HTSU_Result
+TM_Result
heap_delete(Relation relation, ItemPointer tid,
CommandId cid, Snapshot crosscheck, bool wait,
- HeapUpdateFailureData *hufd, bool changingPart)
+ TM_FailureData *tmfd, bool changingPart)
{
- HTSU_Result result;
+ TM_Result result;
TransactionId xid = GetCurrentTransactionId();
ItemId lp;
HeapTupleData tp;
l1:
result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
- if (result == HeapTupleInvisible)
+ if (result == TM_Invisible)
{
UnlockReleaseBuffer(buffer);
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("attempted to delete invisible tuple")));
}
- else if (result == HeapTupleBeingUpdated && wait)
+ else if (result == TM_BeingModified && wait)
{
TransactionId xwait;
uint16 infomask;
if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
HeapTupleHeaderIsOnlyLocked(tp.t_data))
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
+ else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid) ||
+ HeapTupleHeaderIndicatesMovedPartitions(tp.t_data))
+ result = TM_Updated;
else
- result = HeapTupleUpdated;
+ result = TM_Deleted;
}
- if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
+ if (crosscheck != InvalidSnapshot && result == TM_Ok)
{
/* Perform additional check for transaction-snapshot mode RI updates */
if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
- result = HeapTupleUpdated;
+ result = TM_Updated;
}
- if (result != HeapTupleMayBeUpdated)
+ if (result != TM_Ok)
{
- Assert(result == HeapTupleSelfUpdated ||
- result == HeapTupleUpdated ||
- result == HeapTupleBeingUpdated);
+ Assert(result == TM_SelfModified ||
+ result == TM_Updated ||
+ result == TM_Deleted ||
+ result == TM_BeingModified);
Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
- hufd->ctid = tp.t_data->t_ctid;
- hufd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
- if (result == HeapTupleSelfUpdated)
- hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
+ Assert(result != TM_Updated ||
+ !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
+ tmfd->ctid = tp.t_data->t_ctid;
+ tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
+ if (result == TM_SelfModified)
+ tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
else
- hufd->cmax = InvalidCommandId;
+ tmfd->cmax = InvalidCommandId;
UnlockReleaseBuffer(buffer);
if (have_tuple_lock)
UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
if (old_key_tuple != NULL && old_key_copied)
heap_freetuple(old_key_tuple);
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
/*
void
simple_heap_delete(Relation relation, ItemPointer tid)
{
- HTSU_Result result;
- HeapUpdateFailureData hufd;
+ TM_Result result;
+ TM_FailureData tmfd;
result = heap_delete(relation, tid,
GetCurrentCommandId(true), InvalidSnapshot,
true /* wait for commit */ ,
- &hufd, false /* changingPart */ );
+ &tmfd, false /* changingPart */ );
switch (result)
{
- case HeapTupleSelfUpdated:
+ case TM_SelfModified:
/* Tuple was already updated in current command? */
elog(ERROR, "tuple already updated by self");
break;
- case HeapTupleMayBeUpdated:
+ case TM_Ok:
/* done successfully */
break;
- case HeapTupleUpdated:
+ case TM_Updated:
elog(ERROR, "tuple concurrently updated");
break;
+ case TM_Deleted:
+ elog(ERROR, "tuple concurrently deleted");
+ break;
+
default:
elog(ERROR, "unrecognized heap_delete status: %u", result);
break;
/*
* heap_update - replace a tuple
*
- * NB: do not call this directly unless you are prepared to deal with
- * concurrent-update conditions. Use simple_heap_update instead.
- *
- * relation - table to be modified (caller must hold suitable lock)
- * otid - TID of old tuple to be replaced
- * newtup - newly constructed tuple data to store
- * cid - update command ID (used for visibility test, and stored into
- * cmax/cmin if successful)
- * crosscheck - if not InvalidSnapshot, also check old tuple against this
- * wait - true if should wait for any conflicting update to commit/abort
- * hufd - output parameter, filled in failure cases (see below)
- * lockmode - output parameter, filled with lock mode acquired on tuple
- *
- * Normal, successful return value is HeapTupleMayBeUpdated, which
- * actually means we *did* update it. Failure return codes are
- * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
- * (the last only possible if wait == false).
+ * See table_update() for an explanation of the parameters, except that this
+ * routine directly takes a tuple rather than a slot.
*
- * On success, the header fields of *newtup are updated to match the new
- * stored tuple; in particular, newtup->t_self is set to the TID where the
- * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
- * update was done. However, any TOAST changes in the new tuple's
- * data are not reflected into *newtup.
- *
- * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
- * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
- * (the last only for HeapTupleSelfUpdated, since we
- * cannot obtain cmax from a combocid generated by another transaction).
- * See comments for struct HeapUpdateFailureData for additional info.
+ * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
+ * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
+ * only for TM_SelfModified, since we cannot obtain cmax from a combocid
+ * generated by another transaction).
*/
-HTSU_Result
+TM_Result
heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
CommandId cid, Snapshot crosscheck, bool wait,
- HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
+ TM_FailureData *tmfd, LockTupleMode *lockmode)
{
- HTSU_Result result;
+ TM_Result result;
TransactionId xid = GetCurrentTransactionId();
Bitmapset *hot_attrs;
Bitmapset *key_attrs;
result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
/* see below about the "no wait" case */
- Assert(result != HeapTupleBeingUpdated || wait);
+ Assert(result != TM_BeingModified || wait);
- if (result == HeapTupleInvisible)
+ if (result == TM_Invisible)
{
UnlockReleaseBuffer(buffer);
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("attempted to update invisible tuple")));
}
- else if (result == HeapTupleBeingUpdated && wait)
+ else if (result == TM_BeingModified && wait)
{
TransactionId xwait;
uint16 infomask;
* MultiXact. In that case, we need to check whether it committed
* or aborted. If it aborted we are safe to update it again;
* otherwise there is an update conflict, and we have to return
- * HeapTupleUpdated below.
+ * TableTuple{Deleted, Updated} below.
*
* In the LockTupleExclusive case, we still need to preserve the
* surviving members: those would include the tuple locks we had
can_continue = true;
}
- result = can_continue ? HeapTupleMayBeUpdated : HeapTupleUpdated;
+ if (can_continue)
+ result = TM_Ok;
+ else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid) ||
+ HeapTupleHeaderIndicatesMovedPartitions(oldtup.t_data))
+ result = TM_Updated;
+ else
+ result = TM_Deleted;
}
- if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
+ if (crosscheck != InvalidSnapshot && result == TM_Ok)
{
/* Perform additional check for transaction-snapshot mode RI updates */
if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
- result = HeapTupleUpdated;
+ {
+ result = TM_Updated;
+ Assert(!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
+ }
}
- if (result != HeapTupleMayBeUpdated)
+ if (result != TM_Ok)
{
- Assert(result == HeapTupleSelfUpdated ||
- result == HeapTupleUpdated ||
- result == HeapTupleBeingUpdated);
+ Assert(result == TM_SelfModified ||
+ result == TM_Updated ||
+ result == TM_Deleted ||
+ result == TM_BeingModified);
Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
- hufd->ctid = oldtup.t_data->t_ctid;
- hufd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
- if (result == HeapTupleSelfUpdated)
- hufd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
+ Assert(result != TM_Updated ||
+ !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
+ tmfd->ctid = oldtup.t_data->t_ctid;
+ tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
+ if (result == TM_SelfModified)
+ tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
else
- hufd->cmax = InvalidCommandId;
+ tmfd->cmax = InvalidCommandId;
UnlockReleaseBuffer(buffer);
if (have_tuple_lock)
UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
bms_free(modified_attrs);
bms_free(interesting_attrs);
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
/*
void
simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
{
- HTSU_Result result;
- HeapUpdateFailureData hufd;
+ TM_Result result;
+ TM_FailureData tmfd;
LockTupleMode lockmode;
result = heap_update(relation, otid, tup,
GetCurrentCommandId(true), InvalidSnapshot,
true /* wait for commit */ ,
- &hufd, &lockmode);
+ &tmfd, &lockmode);
switch (result)
{
- case HeapTupleSelfUpdated:
+ case TM_SelfModified:
/* Tuple was already updated in current command? */
elog(ERROR, "tuple already updated by self");
break;
- case HeapTupleMayBeUpdated:
+ case TM_Ok:
/* done successfully */
break;
- case HeapTupleUpdated:
+ case TM_Updated:
elog(ERROR, "tuple concurrently updated");
break;
+ case TM_Deleted:
+ elog(ERROR, "tuple concurrently deleted");
+ break;
+
default:
elog(ERROR, "unrecognized heap_update status: %u", result);
break;
*
* Input parameters:
* relation: relation containing tuple (caller must hold suitable lock)
- * tuple->t_self: TID of tuple to lock (rest of struct need not be valid)
+ * tid: TID of tuple to lock
* cid: current command ID (used for visibility test, and stored into
* tuple's cmax if lock is successful)
* mode: indicates if shared or exclusive tuple lock is desired
* Output parameters:
* *tuple: all fields filled in
* *buffer: set to buffer holding tuple (pinned but not locked at exit)
- * *hufd: filled in failure cases (see below)
+ * *tmfd: filled in failure cases (see below)
*
- * Function result may be:
- * HeapTupleMayBeUpdated: lock was successfully acquired
- * HeapTupleInvisible: lock failed because tuple was never visible to us
- * HeapTupleSelfUpdated: lock failed because tuple updated by self
- * HeapTupleUpdated: lock failed because tuple updated by other xact
- * HeapTupleWouldBlock: lock couldn't be acquired and wait_policy is skip
+ * Function results are the same as the ones for table_lock_tuple().
*
- * In the failure cases other than HeapTupleInvisible, the routine fills
- * *hufd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
- * if necessary), and t_cmax (the last only for HeapTupleSelfUpdated,
+ * In the failure cases other than TM_Invisible, the routine fills
+ * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
+ * if necessary), and t_cmax (the last only for TM_SelfModified,
* since we cannot obtain cmax from a combocid generated by another
* transaction).
- * See comments for struct HeapUpdateFailureData for additional info.
+ * See comments for struct TM_FailureData for additional info.
*
* See README.tuplock for a thorough explanation of this mechanism.
*/
-HTSU_Result
+TM_Result
heap_lock_tuple(Relation relation, HeapTuple tuple,
CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
bool follow_updates,
- Buffer *buffer, HeapUpdateFailureData *hufd)
+ Buffer *buffer, TM_FailureData *tmfd)
{
- HTSU_Result result;
+ TM_Result result;
ItemPointer tid = &(tuple->t_self);
ItemId lp;
Page page;
l3:
result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
- if (result == HeapTupleInvisible)
+ if (result == TM_Invisible)
{
/*
* This is possible, but only when locking a tuple for ON CONFLICT
* order to give that case the opportunity to throw a more specific
* error.
*/
- result = HeapTupleInvisible;
+ result = TM_Invisible;
goto out_locked;
}
- else if (result == HeapTupleBeingUpdated || result == HeapTupleUpdated)
+ else if (result == TM_BeingModified ||
+ result == TM_Updated ||
+ result == TM_Deleted)
{
TransactionId xwait;
uint16 infomask;
if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
{
pfree(members);
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
goto out_unlocked;
}
}
Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
HEAP_XMAX_IS_EXCL_LOCKED(infomask));
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
goto out_unlocked;
case LockTupleShare:
if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
HEAP_XMAX_IS_EXCL_LOCKED(infomask))
{
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
goto out_unlocked;
}
break;
case LockTupleNoKeyExclusive:
if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
{
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
goto out_unlocked;
}
break;
if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
infomask2 & HEAP_KEYS_UPDATED)
{
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
goto out_unlocked;
}
break;
*/
if (follow_updates && updated)
{
- HTSU_Result res;
+ TM_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
mode);
- if (res != HeapTupleMayBeUpdated)
+ if (res != TM_Ok)
{
result = res;
/* recovery code expects to have buffer lock held */
/*
* Time to sleep on the other transaction/multixact, if necessary.
*
- * If the other transaction is an update that's already committed,
- * then sleeping cannot possibly do any good: if we're required to
- * sleep, get out to raise an error instead.
+ * If the other transaction is an update/delete that's already
+ * committed, then sleeping cannot possibly do any good: if we're
+ * required to sleep, get out to raise an error instead.
*
* By here, we either have already acquired the buffer exclusive lock,
* or we must wait for the locking transaction or multixact; so below
* we ensure that we grab buffer lock after the sleep.
*/
- if (require_sleep && result == HeapTupleUpdated)
+ if (require_sleep && (result == TM_Updated || result == TM_Deleted))
{
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
* This can only happen if wait_policy is Skip and the lock
* couldn't be obtained.
*/
- result = HeapTupleWouldBlock;
+ result = TM_WouldBlock;
/* recovery code expects to have buffer lock held */
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
status, infomask, relation,
NULL))
{
- result = HeapTupleWouldBlock;
+ result = TM_WouldBlock;
/* recovery code expects to have buffer lock held */
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
case LockWaitSkip:
if (!ConditionalXactLockTableWait(xwait))
{
- result = HeapTupleWouldBlock;
+ result = TM_WouldBlock;
/* recovery code expects to have buffer lock held */
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
goto failed;
/* if there are updates, follow the update chain */
if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
{
- HTSU_Result res;
+ TM_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
mode);
- if (res != HeapTupleMayBeUpdated)
+ if (res != TM_Ok)
{
result = res;
/* recovery code expects to have buffer lock held */
(tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
HeapTupleHeaderIsOnlyLocked(tuple->t_data))
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
+ else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid) ||
+ HeapTupleHeaderIndicatesMovedPartitions(tuple->t_data))
+ result = TM_Updated;
else
- result = HeapTupleUpdated;
+ result = TM_Deleted;
}
failed:
- if (result != HeapTupleMayBeUpdated)
+ if (result != TM_Ok)
{
- Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated ||
- result == HeapTupleWouldBlock);
+ Assert(result == TM_SelfModified || result == TM_Updated ||
+ result == TM_Deleted || result == TM_WouldBlock);
Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
- hufd->ctid = tuple->t_data->t_ctid;
- hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
- if (result == HeapTupleSelfUpdated)
- hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
+ Assert(result != TM_Updated ||
+ !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
+ tmfd->ctid = tuple->t_data->t_ctid;
+ tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
+ if (result == TM_SelfModified)
+ tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
else
- hufd->cmax = InvalidCommandId;
+ tmfd->cmax = InvalidCommandId;
goto out_locked;
}
END_CRIT_SECTION();
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
out_locked:
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
* Given a hypothetical multixact status held by the transaction identified
* with the given xid, does the current transaction need to wait, fail, or can
* it continue if it wanted to acquire a lock of the given mode? "needwait"
- * is set to true if waiting is necessary; if it can continue, then
- * HeapTupleMayBeUpdated is returned. If the lock is already held by the
- * current transaction, return HeapTupleSelfUpdated. In case of a conflict
- * with another transaction, a different HeapTupleSatisfiesUpdate return code
- * is returned.
+ * is set to true if waiting is necessary; if it can continue, then TM_Ok is
+ * returned. If the lock is already held by the current transaction, return
+ * TM_SelfModified. In case of a conflict with another transaction, a
+ * different HeapTupleSatisfiesUpdate return code is returned.
*
* The held status is said to be hypothetical because it might correspond to a
* lock held by a single Xid, i.e. not a real MultiXactId; we express it this
* way for simplicity of API.
*/
-static HTSU_Result
+static TM_Result
test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
- LockTupleMode mode, bool *needwait)
+ LockTupleMode mode, HeapTuple tup,
+ bool *needwait)
{
MultiXactStatus wantedstatus;
* very rare but can happen if multiple transactions are trying to
* lock an ancient version of the same tuple.
*/
- return HeapTupleSelfUpdated;
+ return TM_SelfModified;
}
else if (TransactionIdIsInProgress(xid))
{
* If we set needwait above, then this value doesn't matter;
* otherwise, this value signals to caller that it's okay to proceed.
*/
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
else if (TransactionIdDidAbort(xid))
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
else if (TransactionIdDidCommit(xid))
{
/*
* always be checked.
*/
if (!ISUPDATE_from_mxstatus(status))
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
LOCKMODE_from_mxstatus(wantedstatus)))
+ {
/* bummer */
- return HeapTupleUpdated;
+ if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid) ||
+ HeapTupleHeaderIndicatesMovedPartitions(tup->t_data))
+ return TM_Updated;
+ else
+ return TM_Deleted;
+ }
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
/* Not in progress, not aborted, not committed -- must have crashed */
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
* xid with the given mode; if this tuple is updated, recurse to lock the new
* version as well.
*/
-static HTSU_Result
+static TM_Result
heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
LockTupleMode mode)
{
- HTSU_Result result;
+ TM_Result result;
ItemPointerData tupid;
HeapTupleData mytup;
Buffer buf;
block = ItemPointerGetBlockNumber(&tupid);
ItemPointerCopy(&tupid, &(mytup.t_self));
- if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false, NULL))
+ if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, NULL))
{
/*
* if we fail to find the updated version of the tuple, it's
* chain, and there's no further tuple to lock: return success to
* caller.
*/
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
goto out_unlocked;
}
!TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
priorXmax))
{
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
goto out_locked;
}
*/
if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
{
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
goto out_locked;
}
{
result = test_lockmode_for_conflict(members[i].status,
members[i].xid,
- mode, &needwait);
+ mode,
+ &mytup,
+ &needwait);
/*
* If the tuple was already locked by ourselves in a
* this tuple and continue locking the next version in the
* update chain.
*/
- if (result == HeapTupleSelfUpdated)
+ if (result == TM_SelfModified)
{
pfree(members);
goto next;
pfree(members);
goto l4;
}
- if (result != HeapTupleMayBeUpdated)
+ if (result != TM_Ok)
{
pfree(members);
goto out_locked;
}
result = test_lockmode_for_conflict(status, rawxmax, mode,
- &needwait);
+ &mytup, &needwait);
/*
* If the tuple was already locked by ourselves in a previous
* either. We just need to skip this tuple and continue
* locking the next version in the update chain.
*/
- if (result == HeapTupleSelfUpdated)
+ if (result == TM_SelfModified)
goto next;
if (needwait)
XLTW_LockUpdated);
goto l4;
}
- if (result != HeapTupleMayBeUpdated)
+ if (result != TM_Ok)
{
goto out_locked;
}
ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
HeapTupleHeaderIsOnlyLocked(mytup.t_data))
{
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
goto out_locked;
}
UnlockReleaseBuffer(buf);
}
- result = HeapTupleMayBeUpdated;
+ result = TM_Ok;
out_locked:
UnlockReleaseBuffer(buf);
* transaction cannot be using repeatable read or serializable isolation
* levels, because that would lead to a serializability failure.
*/
-static HTSU_Result
+static TM_Result
heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
TransactionId xid, LockTupleMode mode)
{
}
/* nothing to lock */
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
/*
* An explicit confirmation WAL record also makes logical decoding simpler.
*/
void
-heap_finish_speculative(Relation relation, HeapTuple tuple)
+heap_finish_speculative(Relation relation, ItemPointer tid)
{
Buffer buffer;
Page page;
ItemId lp = NULL;
HeapTupleHeader htup;
- buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
+ buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
page = (Page) BufferGetPage(buffer);
- offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
+ offnum = ItemPointerGetOffsetNumber(tid);
if (PageGetMaxOffsetNumber(page) >= offnum)
lp = PageGetItemId(page, offnum);
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
- Assert(HeapTupleHeaderIsSpeculative(tuple->t_data));
+ Assert(HeapTupleHeaderIsSpeculative(htup));
MarkBufferDirty(buffer);
* Replace the speculative insertion token with a real t_ctid, pointing to
* itself like it does on regular tuples.
*/
- htup->t_ctid = tuple->t_self;
+ htup->t_ctid = *tid;
/* XLOG stuff */
if (RelationNeedsWAL(relation))
xl_heap_confirm xlrec;
XLogRecPtr recptr;
- xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
+ xlrec.offnum = ItemPointerGetOffsetNumber(tid);
XLogBeginInsert();
* confirmation records.
*/
void
-heap_abort_speculative(Relation relation, HeapTuple tuple)
+heap_abort_speculative(Relation relation, ItemPointer tid)
{
TransactionId xid = GetCurrentTransactionId();
- ItemPointer tid = &(tuple->t_self);
ItemId lp;
HeapTupleData tp;
Page page;
#include "access/heapam.h"
#include "access/tableam.h"
+#include "access/xact.h"
#include "storage/bufmgr.h"
+#include "storage/lmgr.h"
#include "utils/builtins.h"
}
+/* ----------------------------------------------------------------------------
+ * Functions for manipulations of physical tuples for heap AM.
+ * ----------------------------------------------------------------------------
+ */
+
+static void
+heapam_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid,
+ int options, BulkInsertState bistate)
+{
+ bool shouldFree = true;
+ HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
+
+ /* Update the tuple with table oid */
+ slot->tts_tableOid = RelationGetRelid(relation);
+ tuple->t_tableOid = slot->tts_tableOid;
+
+ /* Perform the insertion, and copy the resulting ItemPointer */
+ heap_insert(relation, tuple, cid, options, bistate);
+ ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
+
+ if (shouldFree)
+ pfree(tuple);
+}
+
+static void
+heapam_tuple_insert_speculative(Relation relation, TupleTableSlot *slot, CommandId cid,
+ int options, BulkInsertState bistate, uint32 specToken)
+{
+ bool shouldFree = true;
+ HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
+
+ /* Update the tuple with table oid */
+ slot->tts_tableOid = RelationGetRelid(relation);
+ tuple->t_tableOid = slot->tts_tableOid;
+
+ HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
+ options |= HEAP_INSERT_SPECULATIVE;
+
+ /* Perform the insertion, and copy the resulting ItemPointer */
+ heap_insert(relation, tuple, cid, options, bistate);
+ ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
+
+ if (shouldFree)
+ pfree(tuple);
+}
+
+static void
+heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot, uint32 spekToken,
+ bool succeeded)
+{
+ bool shouldFree = true;
+ HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
+
+ /* adjust the tuple's state accordingly */
+ if (!succeeded)
+ heap_finish_speculative(relation, &slot->tts_tid);
+ else
+ heap_abort_speculative(relation, &slot->tts_tid);
+
+ if (shouldFree)
+ pfree(tuple);
+}
+
+static TM_Result
+heapam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid,
+ Snapshot snapshot, Snapshot crosscheck, bool wait,
+ TM_FailureData *tmfd, bool changingPart)
+{
+ /*
+ * Currently Deleting of index tuples are handled at vacuum, in case if
+ * the storage itself is cleaning the dead tuples by itself, it is the
+ * time to call the index tuple deletion also.
+ */
+ return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart);
+}
+
+
+static TM_Result
+heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
+ CommandId cid, Snapshot snapshot, Snapshot crosscheck,
+ bool wait, TM_FailureData *tmfd,
+ LockTupleMode *lockmode, bool *update_indexes)
+{
+ bool shouldFree = true;
+ HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
+ TM_Result result;
+
+ /* Update the tuple with table oid */
+ slot->tts_tableOid = RelationGetRelid(relation);
+ tuple->t_tableOid = slot->tts_tableOid;
+
+ result = heap_update(relation, otid, tuple, cid, crosscheck, wait,
+ tmfd, lockmode);
+ ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
+
+ /*
+ * Decide whether new index entries are needed for the tuple
+ *
+ * Note: heap_update returns the tid (location) of the new tuple in the
+ * t_self field.
+ *
+ * If it's a HOT update, we mustn't insert new index entries.
+ */
+ *update_indexes = result == TM_Ok && !HeapTupleIsHeapOnly(tuple);
+
+ if (shouldFree)
+ pfree(tuple);
+
+ return result;
+}
+
+static TM_Result
+heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot,
+ TupleTableSlot *slot, CommandId cid, LockTupleMode mode,
+ LockWaitPolicy wait_policy, uint8 flags,
+ TM_FailureData *tmfd)
+{
+ BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
+ TM_Result result;
+ Buffer buffer;
+ HeapTuple tuple = &bslot->base.tupdata;
+ bool follow_updates;
+
+ follow_updates = (flags & TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS) != 0;
+ tmfd->traversed = false;
+
+ Assert(TTS_IS_BUFFERTUPLE(slot));
+
+tuple_lock_retry:
+ tuple->t_self = *tid;
+ result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
+ follow_updates, &buffer, tmfd);
+
+ if (result == TM_Updated &&
+ (flags & TUPLE_LOCK_FLAG_FIND_LAST_VERSION))
+ {
+ ReleaseBuffer(buffer);
+ /* Should not encounter speculative tuple on recheck */
+ Assert(!HeapTupleHeaderIsSpeculative(tuple->t_data));
+
+ if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self))
+ {
+ SnapshotData SnapshotDirty;
+ TransactionId priorXmax;
+
+ /* it was updated, so look at the updated version */
+ *tid = tmfd->ctid;
+ /* updated row should have xmin matching this xmax */
+ priorXmax = tmfd->xmax;
+
+ /* signal that a tuple later in the chain is getting locked */
+ tmfd->traversed = true;
+
+ /*
+ * fetch target tuple
+ *
+ * Loop here to deal with updated or busy tuples
+ */
+ InitDirtySnapshot(SnapshotDirty);
+ for (;;)
+ {
+ if (ItemPointerIndicatesMovedPartitions(tid))
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
+
+ tuple->t_self = *tid;
+ if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer, NULL))
+ {
+ /*
+ * If xmin isn't what we're expecting, the slot must have
+ * been recycled and reused for an unrelated tuple. This
+ * implies that the latest version of the row was deleted,
+ * so we need do nothing. (Should be safe to examine xmin
+ * without getting buffer's content lock. We assume
+ * reading a TransactionId to be atomic, and Xmin never
+ * changes in an existing tuple, except to invalid or
+ * frozen, and neither of those can match priorXmax.)
+ */
+ if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple->t_data),
+ priorXmax))
+ {
+ ReleaseBuffer(buffer);
+ return TM_Deleted;
+ }
+
+ /* otherwise xmin should not be dirty... */
+ if (TransactionIdIsValid(SnapshotDirty.xmin))
+ elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
+
+ /*
+ * If tuple is being updated by other transaction then we
+ * have to wait for its commit/abort, or die trying.
+ */
+ if (TransactionIdIsValid(SnapshotDirty.xmax))
+ {
+ ReleaseBuffer(buffer);
+ switch (wait_policy)
+ {
+ case LockWaitBlock:
+ XactLockTableWait(SnapshotDirty.xmax,
+ relation, &tuple->t_self,
+ XLTW_FetchUpdated);
+ break;
+ case LockWaitSkip:
+ if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
+ /* skip instead of waiting */
+ return TM_WouldBlock;
+ break;
+ case LockWaitError:
+ if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
+ ereport(ERROR,
+ (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
+ break;
+ }
+ continue; /* loop back to repeat heap_fetch */
+ }
+
+ /*
+ * If tuple was inserted by our own transaction, we have
+ * to check cmin against cid: cmin >= current CID means
+ * our command cannot see the tuple, so we should ignore
+ * it. Otherwise heap_lock_tuple() will throw an error,
+ * and so would any later attempt to update or delete the
+ * tuple. (We need not check cmax because
+ * HeapTupleSatisfiesDirty will consider a tuple deleted
+ * by our transaction dead, regardless of cmax.) We just
+ * checked that priorXmax == xmin, so we can test that
+ * variable instead of doing HeapTupleHeaderGetXmin again.
+ */
+ if (TransactionIdIsCurrentTransactionId(priorXmax) &&
+ HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
+ {
+ ReleaseBuffer(buffer);
+ return TM_Invisible;
+ }
+
+ /*
+ * This is a live tuple, so try to lock it again.
+ */
+ ReleaseBuffer(buffer);
+ goto tuple_lock_retry;
+ }
+
+ /*
+ * If the referenced slot was actually empty, the latest
+ * version of the row must have been deleted, so we need do
+ * nothing.
+ */
+ if (tuple->t_data == NULL)
+ {
+ return TM_Deleted;
+ }
+
+ /*
+ * As above, if xmin isn't what we're expecting, do nothing.
+ */
+ if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple->t_data),
+ priorXmax))
+ {
+ if (BufferIsValid(buffer))
+ ReleaseBuffer(buffer);
+ return TM_Deleted;
+ }
+
+ /*
+ * If we get here, the tuple was found but failed
+ * SnapshotDirty. Assuming the xmin is either a committed xact
+ * or our own xact (as it certainly should be if we're trying
+ * to modify the tuple), this must mean that the row was
+ * updated or deleted by either a committed xact or our own
+ * xact. If it was deleted, we can ignore it; if it was
+ * updated then chain up to the next version and repeat the
+ * whole process.
+ *
+ * As above, it should be safe to examine xmax and t_ctid
+ * without the buffer content lock, because they can't be
+ * changing.
+ */
+ if (ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
+ {
+ /* deleted, so forget about it */
+ if (BufferIsValid(buffer))
+ ReleaseBuffer(buffer);
+ return TM_Deleted;
+ }
+
+ /* updated, so look at the updated row */
+ *tid = tuple->t_data->t_ctid;
+ /* updated row should have xmin matching this xmax */
+ priorXmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
+ if (BufferIsValid(buffer))
+ ReleaseBuffer(buffer);
+ /* loop back to fetch next in chain */
+ }
+ }
+ else
+ {
+ /* tuple was deleted, so give up */
+ return TM_Deleted;
+ }
+ }
+
+ slot->tts_tableOid = RelationGetRelid(relation);
+ tuple->t_tableOid = slot->tts_tableOid;
+
+ /* store in slot, transferring existing pin */
+ ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
+
+ return result;
+}
+
+
/* ------------------------------------------------------------------------
* Definition of the heap table access method.
* ------------------------------------------------------------------------
.index_fetch_end = heapam_index_fetch_end,
.index_fetch_tuple = heapam_index_fetch_tuple,
+ .tuple_insert = heapam_tuple_insert,
+ .tuple_insert_speculative = heapam_tuple_insert_speculative,
+ .tuple_complete_speculative = heapam_tuple_complete_speculative,
+ .tuple_delete = heapam_tuple_delete,
+ .tuple_update = heapam_tuple_update,
+ .tuple_lock = heapam_tuple_lock,
+
.tuple_satisfies_snapshot = heapam_tuple_satisfies_snapshot,
};
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/subtrans.h"
+#include "access/tableam.h"
#include "access/transam.h"
#include "access/xact.h"
#include "access/xlog.h"
*
* The possible return codes are:
*
- * HeapTupleInvisible: the tuple didn't exist at all when the scan started,
- * e.g. it was created by a later CommandId.
+ * TM_Invisible: the tuple didn't exist at all when the scan started, e.g. it
+ * was created by a later CommandId.
*
- * HeapTupleMayBeUpdated: The tuple is valid and visible, so it may be
- * updated.
+ * TM_Ok: The tuple is valid and visible, so it may be updated.
*
- * HeapTupleSelfUpdated: The tuple was updated by the current transaction,
- * after the current scan started.
+ * TM_SelfModified: The tuple was updated by the current transaction, after
+ * the current scan started.
*
- * HeapTupleUpdated: The tuple was updated by a committed transaction.
+ * TM_Updated: The tuple was updated by a committed transaction (including
+ * the case where the tuple was moved into a different partition).
*
- * HeapTupleBeingUpdated: The tuple is being updated by an in-progress
- * transaction other than the current transaction. (Note: this includes
- * the case where the tuple is share-locked by a MultiXact, even if the
- * MultiXact includes the current transaction. Callers that want to
- * distinguish that case must test for it themselves.)
+ * TM_Deleted: The tuple was deleted by a committed transaction.
+ *
+ * TM_BeingModified: The tuple is being updated by an in-progress transaction
+ * other than the current transaction. (Note: this includes the case where
+ * the tuple is share-locked by a MultiXact, even if the MultiXact includes
+ * the current transaction. Callers that want to distinguish that case must
+ * test for it themselves.)
*/
-HTSU_Result
+TM_Result
HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
Buffer buffer)
{
if (!HeapTupleHeaderXminCommitted(tuple))
{
if (HeapTupleHeaderXminInvalid(tuple))
- return HeapTupleInvisible;
+ return TM_Invisible;
/* Used by pre-9.0 binary upgrades */
if (tuple->t_infomask & HEAP_MOVED_OFF)
TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
if (TransactionIdIsCurrentTransactionId(xvac))
- return HeapTupleInvisible;
+ return TM_Invisible;
if (!TransactionIdIsInProgress(xvac))
{
if (TransactionIdDidCommit(xvac))
{
SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
InvalidTransactionId);
- return HeapTupleInvisible;
+ return TM_Invisible;
}
SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
InvalidTransactionId);
if (!TransactionIdIsCurrentTransactionId(xvac))
{
if (TransactionIdIsInProgress(xvac))
- return HeapTupleInvisible;
+ return TM_Invisible;
if (TransactionIdDidCommit(xvac))
SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
InvalidTransactionId);
{
SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
InvalidTransactionId);
- return HeapTupleInvisible;
+ return TM_Invisible;
}
}
}
else if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmin(tuple)))
{
if (HeapTupleHeaderGetCmin(tuple) >= curcid)
- return HeapTupleInvisible; /* inserted after scan started */
+ return TM_Invisible; /* inserted after scan started */
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
{
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
if (MultiXactIdIsRunning(xmax, true))
- return HeapTupleBeingUpdated;
+ return TM_BeingModified;
else
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
/*
* locked/updated.
*/
if (!TransactionIdIsInProgress(xmax))
- return HeapTupleMayBeUpdated;
- return HeapTupleBeingUpdated;
+ return TM_Ok;
+ return TM_BeingModified;
}
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple),
false))
- return HeapTupleBeingUpdated;
- return HeapTupleMayBeUpdated;
+ return TM_BeingModified;
+ return TM_Ok;
}
else
{
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan
- * started */
+ return TM_SelfModified; /* updated after scan started */
else
- return HeapTupleInvisible; /* updated before scan
- * started */
+ return TM_Invisible; /* updated before scan started */
}
}
/* deleting subtransaction must have aborted */
SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
InvalidTransactionId);
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan started */
+ return TM_SelfModified; /* updated after scan started */
else
- return HeapTupleInvisible; /* updated before scan started */
+ return TM_Invisible; /* updated before scan started */
}
else if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmin(tuple)))
- return HeapTupleInvisible;
+ return TM_Invisible;
else if (TransactionIdDidCommit(HeapTupleHeaderGetRawXmin(tuple)))
SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
HeapTupleHeaderGetRawXmin(tuple));
/* it must have aborted or crashed */
SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
InvalidTransactionId);
- return HeapTupleInvisible;
+ return TM_Invisible;
}
}
/* by here, the inserting transaction has committed */
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid or aborted */
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
if (tuple->t_infomask & HEAP_XMAX_COMMITTED)
{
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
- return HeapTupleMayBeUpdated;
- return HeapTupleUpdated; /* updated by other */
+ return TM_Ok;
+ if (!ItemPointerEquals(&htup->t_self, &tuple->t_ctid) ||
+ HeapTupleHeaderIndicatesMovedPartitions(tuple))
+ return TM_Updated; /* updated by other */
+ else
+ return TM_Deleted; /* deleted by other */
}
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
TransactionId xmax;
if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
{
if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple), true))
- return HeapTupleBeingUpdated;
+ return TM_BeingModified;
SetHintBits(tuple, buffer, HEAP_XMAX_INVALID, InvalidTransactionId);
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
{
if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple), false))
- return HeapTupleBeingUpdated;
+ return TM_BeingModified;
}
/* not LOCKED_ONLY, so it has to have an xmax */
if (TransactionIdIsCurrentTransactionId(xmax))
{
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan started */
+ return TM_SelfModified; /* updated after scan started */
else
- return HeapTupleInvisible; /* updated before scan started */
+ return TM_Invisible; /* updated before scan started */
}
if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple), false))
- return HeapTupleBeingUpdated;
+ return TM_BeingModified;
if (TransactionIdDidCommit(xmax))
- return HeapTupleUpdated;
+ {
+ if (!ItemPointerEquals(&htup->t_self, &tuple->t_ctid) ||
+ HeapTupleHeaderIndicatesMovedPartitions(tuple))
+ return TM_Updated;
+ else
+ return TM_Deleted;
+ }
/*
* By here, the update in the Xmax is either aborted or crashed, but
*/
SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
InvalidTransactionId);
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
else
{
/* There are lockers running */
- return HeapTupleBeingUpdated;
+ return TM_BeingModified;
}
}
if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
{
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
- return HeapTupleBeingUpdated;
+ return TM_BeingModified;
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan started */
+ return TM_SelfModified; /* updated after scan started */
else
- return HeapTupleInvisible; /* updated before scan started */
+ return TM_Invisible; /* updated before scan started */
}
if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmax(tuple)))
- return HeapTupleBeingUpdated;
+ return TM_BeingModified;
if (!TransactionIdDidCommit(HeapTupleHeaderGetRawXmax(tuple)))
{
/* it must have aborted or crashed */
SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
InvalidTransactionId);
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
/* xmax transaction committed */
{
SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
InvalidTransactionId);
- return HeapTupleMayBeUpdated;
+ return TM_Ok;
}
SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
HeapTupleHeaderGetRawXmax(tuple));
- return HeapTupleUpdated; /* updated by other */
+ if (!ItemPointerEquals(&htup->t_self, &tuple->t_ctid) ||
+ HeapTupleHeaderIndicatesMovedPartitions(tuple))
+ return TM_Updated; /* updated by other */
+ else
+ return TM_Deleted; /* deleted by other */
}
/*
* Have a chunk, delete it
*/
if (is_speculative)
- heap_abort_speculative(toastrel, toasttup);
+ heap_abort_speculative(toastrel, &toasttup->t_self);
else
simple_heap_delete(toastrel, &toasttup->t_self);
}
}
+/* ----------------------------------------------------------------------------
+ * Functions to make modifications a bit simpler.
+ * ----------------------------------------------------------------------------
+ */
+
+/*
+ * simple_table_insert - insert a tuple
+ *
+ * Currently, this routine differs from table_insert only in supplying a
+ * default command ID and not allowing access to the speedup options.
+ */
+void
+simple_table_insert(Relation rel, TupleTableSlot *slot)
+{
+ table_insert(rel, slot, GetCurrentCommandId(true), 0, NULL);
+}
+
+/*
+ * simple_table_delete - delete a tuple
+ *
+ * This routine may be used to delete a tuple when concurrent updates of
+ * the target tuple are not expected (for example, because we have a lock
+ * on the relation associated with the tuple). Any failure is reported
+ * via ereport().
+ */
+void
+simple_table_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
+{
+ TM_Result result;
+ TM_FailureData tmfd;
+
+ result = table_delete(rel, tid,
+ GetCurrentCommandId(true),
+ snapshot, InvalidSnapshot,
+ true /* wait for commit */ ,
+ &tmfd, false /* changingPart */ );
+
+ switch (result)
+ {
+ case TM_SelfModified:
+ /* Tuple was already updated in current command? */
+ elog(ERROR, "tuple already updated by self");
+ break;
+
+ case TM_Ok:
+ /* done successfully */
+ break;
+
+ case TM_Updated:
+ elog(ERROR, "tuple concurrently updated");
+ break;
+
+ case TM_Deleted:
+ elog(ERROR, "tuple concurrently deleted");
+ break;
+
+ default:
+ elog(ERROR, "unrecognized table_delete status: %u", result);
+ break;
+ }
+}
+
+/*
+ * simple_table_update - replace a tuple
+ *
+ * This routine may be used to update a tuple when concurrent updates of
+ * the target tuple are not expected (for example, because we have a lock
+ * on the relation associated with the tuple). Any failure is reported
+ * via ereport().
+ */
+void
+simple_table_update(Relation rel, ItemPointer otid,
+ TupleTableSlot *slot,
+ Snapshot snapshot,
+ bool *update_indexes)
+{
+ TM_Result result;
+ TM_FailureData tmfd;
+ LockTupleMode lockmode;
+
+ result = table_update(rel, otid, slot,
+ GetCurrentCommandId(true),
+ snapshot, InvalidSnapshot,
+ true /* wait for commit */ ,
+ &tmfd, &lockmode, update_indexes);
+
+ switch (result)
+ {
+ case TM_SelfModified:
+ /* Tuple was already updated in current command? */
+ elog(ERROR, "tuple already updated by self");
+ break;
+
+ case TM_Ok:
+ /* done successfully */
+ break;
+
+ case TM_Updated:
+ elog(ERROR, "tuple concurrently updated");
+ break;
+
+ case TM_Deleted:
+ elog(ERROR, "tuple concurrently deleted");
+ break;
+
+ default:
+ elog(ERROR, "unrecognized table_update status: %u", result);
+ break;
+ }
+
+}
+
+
/* ----------------------------------------------------------------------------
* Helper functions to implement parallel scans for block oriented AMs.
* ----------------------------------------------------------------------------
Assert(routine->tuple_satisfies_snapshot != NULL);
+ Assert(routine->tuple_insert != NULL);
+
+ /*
+ * Could be made optional, but would require throwing error during
+ * parse-analysis.
+ */
+ Assert(routine->tuple_insert_speculative != NULL);
+ Assert(routine->tuple_complete_speculative != NULL);
+
+ Assert(routine->tuple_delete != NULL);
+ Assert(routine->tuple_update != NULL);
+ Assert(routine->tuple_lock != NULL);
+
return routine;
}
/* And create index entries for it */
if (resultRelInfo->ri_NumIndices > 0)
recheckIndexes = ExecInsertIndexTuples(slot,
- &(tuple->t_self),
estate,
false,
NULL,
cstate->cur_lineno = firstBufferedLineNo + i;
ExecStoreHeapTuple(bufferedTuples[i], myslot, false);
recheckIndexes =
- ExecInsertIndexTuples(myslot, &(bufferedTuples[i]->t_self),
+ ExecInsertIndexTuples(myslot,
estate, false, NULL, NIL);
ExecARInsertTriggers(estate, resultRelInfo,
myslot,
#include "access/genam.h"
#include "access/heapam.h"
+#include "access/tableam.h"
#include "access/sysattr.h"
#include "access/htup_details.h"
#include "access/xact.h"
TupleTableSlot **newSlot)
{
Relation relation = relinfo->ri_RelationDesc;
- HeapTuple tuple;
- Buffer buffer;
- BufferHeapTupleTableSlot *boldslot;
-
- Assert(TTS_IS_BUFFERTUPLE(oldslot));
- ExecClearTuple(oldslot);
- boldslot = (BufferHeapTupleTableSlot *) oldslot;
- tuple = &boldslot->base.tupdata;
if (newSlot != NULL)
{
- HTSU_Result test;
- HeapUpdateFailureData hufd;
+ TM_Result test;
+ TM_FailureData tmfd;
+ int lockflags = 0;
*newSlot = NULL;
/*
* lock tuple for update
*/
-ltrmark:;
- tuple->t_self = *tid;
- test = heap_lock_tuple(relation, tuple,
- estate->es_output_cid,
- lockmode, LockWaitBlock,
- false, &buffer, &hufd);
+ if (!IsolationUsesXactSnapshot())
+ lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
+ test = table_lock_tuple(relation, tid, estate->es_snapshot, oldslot,
+ estate->es_output_cid,
+ lockmode, LockWaitBlock,
+ lockflags,
+ &tmfd);
+
switch (test)
{
- case HeapTupleSelfUpdated:
+ case TM_SelfModified:
/*
* The target tuple was already updated or deleted by the
* enumerated in ExecUpdate and ExecDelete in
* nodeModifyTable.c.
*/
- if (hufd.cmax != estate->es_output_cid)
+ if (tmfd.cmax != estate->es_output_cid)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
/* treat it as deleted; do not process */
- ReleaseBuffer(buffer);
return false;
- case HeapTupleMayBeUpdated:
- ExecStorePinnedBufferHeapTuple(tuple, oldslot, buffer);
-
- break;
-
- case HeapTupleUpdated:
- ReleaseBuffer(buffer);
- if (IsolationUsesXactSnapshot())
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
-
- if (!ItemPointerEquals(&hufd.ctid, &tuple->t_self))
+ case TM_Ok:
+ if (tmfd.traversed)
{
- /* it was updated, so look at the updated version */
TupleTableSlot *epqslot;
epqslot = EvalPlanQual(estate,
epqstate,
relation,
relinfo->ri_RangeTableIndex,
- lockmode,
- &hufd.ctid,
- hufd.xmax);
- if (!TupIsNull(epqslot))
- {
- *tid = hufd.ctid;
+ oldslot);
- *newSlot = epqslot;
+ /*
+ * If PlanQual failed for updated tuple - we must not
+ * process this tuple!
+ */
+ if (TupIsNull(epqslot))
+ return false;
- /*
- * EvalPlanQual already locked the tuple, but we
- * re-call heap_lock_tuple anyway as an easy way of
- * re-fetching the correct tuple. Speed is hardly a
- * criterion in this path anyhow.
- */
- goto ltrmark;
- }
+ *newSlot = epqslot;
}
+ break;
- /*
- * if tuple was deleted or PlanQual failed for updated tuple -
- * we must not process this tuple!
- */
+ case TM_Updated:
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
+ elog(ERROR, "unexpected table_lock_tuple status: %u", test);
+ break;
+
+ case TM_Deleted:
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent delete")));
+ /* tuple was deleted */
return false;
- case HeapTupleInvisible:
+ case TM_Invisible:
elog(ERROR, "attempted to lock invisible tuple");
break;
default:
- ReleaseBuffer(buffer);
- elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
+ elog(ERROR, "unrecognized table_lock_tuple status: %u", test);
return false; /* keep compiler quiet */
}
}
{
Page page;
ItemId lp;
+ Buffer buffer;
+ BufferHeapTupleTableSlot *boldslot;
+ HeapTuple tuple;
+
+ Assert(TTS_IS_BUFFERTUPLE(oldslot));
+ ExecClearTuple(oldslot);
+ boldslot = (BufferHeapTupleTableSlot *) oldslot;
+ tuple = &boldslot->base.tupdata;
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
- if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer, false, NULL))
+ if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer, NULL))
elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
ExecStorePinnedBufferHeapTuple(&tuple1,
LocTriggerData.tg_trigslot,
LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
- if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer, false, NULL))
+ if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer, NULL))
elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
ExecStorePinnedBufferHeapTuple(&tuple2,
LocTriggerData.tg_newslot,
*/
List *
ExecInsertIndexTuples(TupleTableSlot *slot,
- ItemPointer tupleid,
EState *estate,
bool noDupErr,
bool *specConflict,
List *arbiterIndexes)
{
+ ItemPointer tupleid = &slot->tts_tid;
List *result = NIL;
ResultRelInfo *resultRelInfo;
int i;
Datum values[INDEX_MAX_KEYS];
bool isnull[INDEX_MAX_KEYS];
+ Assert(ItemPointerIsValid(tupleid));
+
/*
* Get information from the result relation info structure.
*/
/*
- * Check a modified tuple to see if we want to process its updated version
- * under READ COMMITTED rules.
+ * Check the updated version of a tuple to see if we want to process it under
+ * READ COMMITTED rules.
*
* estate - outer executor state data
* epqstate - state for EvalPlanQual rechecking
* relation - table containing tuple
* rti - rangetable index of table containing tuple
- * lockmode - requested tuple lock mode
- * *tid - t_ctid from the outdated tuple (ie, next updated version)
- * priorXmax - t_xmax from the outdated tuple
+ * inputslot - tuple for processing - this can be the slot from
+ * EvalPlanQualSlot(), for the increased efficiency.
*
- * *tid is also an output parameter: it's modified to hold the TID of the
- * latest version of the tuple (note this may be changed even on failure)
+ * This tests whether the tuple in inputslot still matches the relvant
+ * quals. For that result to be useful, typically the input tuple has to be
+ * last row version (otherwise the result isn't particularly useful) and
+ * locked (otherwise the result might be out of date). That's typically
+ * achieved by using table_lock_tuple() with the
+ * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
*
* Returns a slot containing the new candidate update/delete tuple, or
* NULL if we determine we shouldn't process the row.
*/
TupleTableSlot *
EvalPlanQual(EState *estate, EPQState *epqstate,
- Relation relation, Index rti, LockTupleMode lockmode,
- ItemPointer tid, TransactionId priorXmax)
+ Relation relation, Index rti, TupleTableSlot *inputslot)
{
TupleTableSlot *slot;
TupleTableSlot *testslot;
EvalPlanQualBegin(epqstate, estate);
/*
- * Get and lock the updated version of the row; if fail, return NULL.
+ * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
+ * an unnecessary copy.
*/
testslot = EvalPlanQualSlot(epqstate, relation, rti);
- if (!EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
- tid, priorXmax,
- testslot))
- return NULL;
-
- /*
- * For UPDATE/DELETE we have to return tid of actual row we're executing
- * PQ for.
- */
- *tid = testslot->tts_tid;
+ if (testslot != inputslot)
+ ExecCopySlot(testslot, inputslot);
/*
* Fetch any non-locked source rows
return slot;
}
-/*
- * Fetch a copy of the newest version of an outdated tuple
- *
- * estate - executor state data
- * relation - table containing tuple
- * lockmode - requested tuple lock mode
- * wait_policy - requested lock wait policy
- * *tid - t_ctid from the outdated tuple (ie, next updated version)
- * priorXmax - t_xmax from the outdated tuple
- * slot - slot to store newest tuple version
- *
- * Returns true, with slot containing the newest tuple version, or false if we
- * find that there is no newest version (ie, the row was deleted not updated).
- * We also return false if the tuple is locked and the wait policy is to skip
- * such tuples.
- *
- * If successful, we have locked the newest tuple version, so caller does not
- * need to worry about it changing anymore.
- */
-bool
-EvalPlanQualFetch(EState *estate, Relation relation, LockTupleMode lockmode,
- LockWaitPolicy wait_policy,
- ItemPointer tid, TransactionId priorXmax,
- TupleTableSlot *slot)
-{
- HeapTupleData tuple;
- SnapshotData SnapshotDirty;
-
- /*
- * fetch target tuple
- *
- * Loop here to deal with updated or busy tuples
- */
- InitDirtySnapshot(SnapshotDirty);
- tuple.t_self = *tid;
- for (;;)
- {
- Buffer buffer;
-
- if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
- {
- HTSU_Result test;
- HeapUpdateFailureData hufd;
-
- /*
- * If xmin isn't what we're expecting, the slot must have been
- * recycled and reused for an unrelated tuple. This implies that
- * the latest version of the row was deleted, so we need do
- * nothing. (Should be safe to examine xmin without getting
- * buffer's content lock. We assume reading a TransactionId to be
- * atomic, and Xmin never changes in an existing tuple, except to
- * invalid or frozen, and neither of those can match priorXmax.)
- */
- if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
- priorXmax))
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /* otherwise xmin should not be dirty... */
- if (TransactionIdIsValid(SnapshotDirty.xmin))
- elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
-
- /*
- * If tuple is being updated by other transaction then we have to
- * wait for its commit/abort, or die trying.
- */
- if (TransactionIdIsValid(SnapshotDirty.xmax))
- {
- ReleaseBuffer(buffer);
- switch (wait_policy)
- {
- case LockWaitBlock:
- XactLockTableWait(SnapshotDirty.xmax,
- relation, &tuple.t_self,
- XLTW_FetchUpdated);
- break;
- case LockWaitSkip:
- if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
- return false; /* skip instead of waiting */
- break;
- case LockWaitError:
- if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
- ereport(ERROR,
- (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
- break;
- }
- continue; /* loop back to repeat heap_fetch */
- }
-
- /*
- * If tuple was inserted by our own transaction, we have to check
- * cmin against es_output_cid: cmin >= current CID means our
- * command cannot see the tuple, so we should ignore it. Otherwise
- * heap_lock_tuple() will throw an error, and so would any later
- * attempt to update or delete the tuple. (We need not check cmax
- * because HeapTupleSatisfiesDirty will consider a tuple deleted
- * by our transaction dead, regardless of cmax.) We just checked
- * that priorXmax == xmin, so we can test that variable instead of
- * doing HeapTupleHeaderGetXmin again.
- */
- if (TransactionIdIsCurrentTransactionId(priorXmax) &&
- HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /*
- * This is a live tuple, so now try to lock it.
- */
- test = heap_lock_tuple(relation, &tuple,
- estate->es_output_cid,
- lockmode, wait_policy,
- false, &buffer, &hufd);
- /* We now have two pins on the buffer, get rid of one */
- ReleaseBuffer(buffer);
-
- switch (test)
- {
- case HeapTupleSelfUpdated:
-
- /*
- * The target tuple was already updated or deleted by the
- * current command, or by a later command in the current
- * transaction. We *must* ignore the tuple in the former
- * case, so as to avoid the "Halloween problem" of
- * repeated update attempts. In the latter case it might
- * be sensible to fetch the updated tuple instead, but
- * doing so would require changing heap_update and
- * heap_delete to not complain about updating "invisible"
- * tuples, which seems pretty scary (heap_lock_tuple will
- * not complain, but few callers expect
- * HeapTupleInvisible, and we're not one of them). So for
- * now, treat the tuple as deleted and do not process.
- */
- ReleaseBuffer(buffer);
- return false;
-
- case HeapTupleMayBeUpdated:
- /* successfully locked */
- break;
-
- case HeapTupleUpdated:
- ReleaseBuffer(buffer);
- if (IsolationUsesXactSnapshot())
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
-
- /* Should not encounter speculative tuple on recheck */
- Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
- if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
- {
- /* it was updated, so look at the updated version */
- tuple.t_self = hufd.ctid;
- /* updated row should have xmin matching this xmax */
- priorXmax = hufd.xmax;
- continue;
- }
- /* tuple was deleted, so give up */
- return false;
-
- case HeapTupleWouldBlock:
- ReleaseBuffer(buffer);
- return false;
-
- case HeapTupleInvisible:
- elog(ERROR, "attempted to lock invisible tuple");
- break;
-
- default:
- ReleaseBuffer(buffer);
- elog(ERROR, "unrecognized heap_lock_tuple status: %u",
- test);
- return false; /* keep compiler quiet */
- }
-
- /*
- * We got tuple - store it for use by the recheck query.
- */
- ExecStorePinnedBufferHeapTuple(&tuple, slot, buffer);
- ExecMaterializeSlot(slot);
- break;
- }
-
- /*
- * If the referenced slot was actually empty, the latest version of
- * the row must have been deleted, so we need do nothing.
- */
- if (tuple.t_data == NULL)
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /*
- * As above, if xmin isn't what we're expecting, do nothing.
- */
- if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
- priorXmax))
- {
- ReleaseBuffer(buffer);
- return false;
- }
-
- /*
- * If we get here, the tuple was found but failed SnapshotDirty.
- * Assuming the xmin is either a committed xact or our own xact (as it
- * certainly should be if we're trying to modify the tuple), this must
- * mean that the row was updated or deleted by either a committed xact
- * or our own xact. If it was deleted, we can ignore it; if it was
- * updated then chain up to the next version and repeat the whole
- * process.
- *
- * As above, it should be safe to examine xmax and t_ctid without the
- * buffer content lock, because they can't be changing.
- */
-
- /* check whether next version would be in a different partition */
- if (HeapTupleHeaderIndicatesMovedPartitions(tuple.t_data))
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
-
- /* check whether tuple has been deleted */
- if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
- {
- /* deleted, so forget about it */
- ReleaseBuffer(buffer);
- return false;
- }
-
- /* updated, so look at the updated row */
- tuple.t_self = tuple.t_data->t_ctid;
- /* updated row should have xmin matching this xmax */
- priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
- ReleaseBuffer(buffer);
- /* loop back to fetch next in chain */
- }
-
- /* signal success */
- return true;
-}
-
/*
* EvalPlanQualInit -- initialize during creation of a plan state node
* that might need to invoke EPQ processing.
tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
- false, NULL))
+ NULL))
elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
/* successful, store tuple */
#include "postgres.h"
#include "access/genam.h"
-#include "access/heapam.h"
#include "access/relscan.h"
#include "access/tableam.h"
#include "access/transam.h"
/* Found tuple, try to lock it in the lockmode. */
if (found)
{
- Buffer buf;
- HeapUpdateFailureData hufd;
- HTSU_Result res;
- HeapTupleData locktup;
- HeapTupleTableSlot *hslot = (HeapTupleTableSlot *)outslot;
-
- /* Only a heap tuple has item pointers. */
- Assert(TTS_IS_HEAPTUPLE(outslot) || TTS_IS_BUFFERTUPLE(outslot));
- ItemPointerCopy(&hslot->tuple->t_self, &locktup.t_self);
+ TM_FailureData tmfd;
+ TM_Result res;
PushActiveSnapshot(GetLatestSnapshot());
- res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
- lockmode,
- LockWaitBlock,
- false /* don't follow updates */ ,
- &buf, &hufd);
- /* the tuple slot already has the buffer pinned */
- ReleaseBuffer(buf);
+ res = table_lock_tuple(rel, &(outslot->tts_tid), GetLatestSnapshot(),
+ outslot,
+ GetCurrentCommandId(false),
+ lockmode,
+ LockWaitBlock,
+ 0 /* don't follow updates */ ,
+ &tmfd);
PopActiveSnapshot();
switch (res)
{
- case HeapTupleMayBeUpdated:
+ case TM_Ok:
break;
- case HeapTupleUpdated:
+ case TM_Updated:
/* XXX: Improve handling here */
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
+ if (ItemPointerIndicatesMovedPartitions(&tmfd.ctid))
ereport(LOG,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("tuple to be locked was already moved to another partition due to concurrent update, retrying")));
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("concurrent update, retrying")));
goto retry;
- case HeapTupleInvisible:
+ case TM_Deleted:
+ /* XXX: Improve handling here */
+ ereport(LOG,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("concurrent delete, retrying")));
+ goto retry;
+ case TM_Invisible:
elog(ERROR, "attempted to lock invisible tuple");
break;
default:
- elog(ERROR, "unexpected heap_lock_tuple status: %u", res);
+ elog(ERROR, "unexpected table_lock_tuple status: %u", res);
break;
}
}
/* Found tuple, try to lock it in the lockmode. */
if (found)
{
- Buffer buf;
- HeapUpdateFailureData hufd;
- HTSU_Result res;
- HeapTupleData locktup;
- HeapTupleTableSlot *hslot = (HeapTupleTableSlot *)outslot;
-
- /* Only a heap tuple has item pointers. */
- Assert(TTS_IS_HEAPTUPLE(outslot) || TTS_IS_BUFFERTUPLE(outslot));
- ItemPointerCopy(&hslot->tuple->t_self, &locktup.t_self);
+ TM_FailureData tmfd;
+ TM_Result res;
PushActiveSnapshot(GetLatestSnapshot());
- res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
- lockmode,
- LockWaitBlock,
- false /* don't follow updates */ ,
- &buf, &hufd);
- /* the tuple slot already has the buffer pinned */
- ReleaseBuffer(buf);
+ res = table_lock_tuple(rel, &(outslot->tts_tid), GetLatestSnapshot(),
+ outslot,
+ GetCurrentCommandId(false),
+ lockmode,
+ LockWaitBlock,
+ 0 /* don't follow updates */ ,
+ &tmfd);
PopActiveSnapshot();
switch (res)
{
- case HeapTupleMayBeUpdated:
+ case TM_Ok:
break;
- case HeapTupleUpdated:
+ case TM_Updated:
/* XXX: Improve handling here */
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
+ if (ItemPointerIndicatesMovedPartitions(&tmfd.ctid))
ereport(LOG,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("tuple to be locked was already moved to another partition due to concurrent update, retrying")));
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("concurrent update, retrying")));
goto retry;
- case HeapTupleInvisible:
+ case TM_Deleted:
+ /* XXX: Improve handling here */
+ ereport(LOG,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("concurrent delete, retrying")));
+ goto retry;
+ case TM_Invisible:
elog(ERROR, "attempted to lock invisible tuple");
break;
default:
- elog(ERROR, "unexpected heap_lock_tuple status: %u", res);
+ elog(ERROR, "unexpected table_lock_tuple status: %u", res);
break;
}
}
ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
{
bool skip_tuple = false;
- HeapTuple tuple;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
if (resultRelInfo->ri_PartitionCheck)
ExecPartitionCheck(resultRelInfo, slot, estate, true);
- /* Materialize slot into a tuple that we can scribble upon. */
- tuple = ExecFetchSlotHeapTuple(slot, true, NULL);
-
/* OK, store the tuple and create index entries for it */
- simple_heap_insert(rel, tuple);
- ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
+ simple_table_insert(resultRelInfo->ri_RelationDesc, slot);
if (resultRelInfo->ri_NumIndices > 0)
- recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
- estate, false, NULL,
+ recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL,
NIL);
/* AFTER ROW INSERT Triggers */
TupleTableSlot *searchslot, TupleTableSlot *slot)
{
bool skip_tuple = false;
- HeapTuple tuple;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
- HeapTupleTableSlot *hsearchslot = (HeapTupleTableSlot *)searchslot;
-
- /* We expect the searchslot to contain a heap tuple. */
- Assert(TTS_IS_HEAPTUPLE(searchslot) || TTS_IS_BUFFERTUPLE(searchslot));
+ ItemPointer tid = &(searchslot->tts_tid);
/* For now we support only tables. */
Assert(rel->rd_rel->relkind == RELKIND_RELATION);
resultRelInfo->ri_TrigDesc->trig_update_before_row)
{
if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
- &hsearchslot->tuple->t_self,
- NULL, slot))
+ tid, NULL, slot))
skip_tuple = true; /* "do nothing" */
}
if (!skip_tuple)
{
List *recheckIndexes = NIL;
+ bool update_indexes;
/* Check the constraints of the tuple */
if (rel->rd_att->constr)
if (resultRelInfo->ri_PartitionCheck)
ExecPartitionCheck(resultRelInfo, slot, estate, true);
- /* Materialize slot into a tuple that we can scribble upon. */
- tuple = ExecFetchSlotHeapTuple(slot, true, NULL);
+ simple_table_update(rel, tid, slot,estate->es_snapshot,
+ &update_indexes);
- /* OK, update the tuple and index entries for it */
- simple_heap_update(rel, &hsearchslot->tuple->t_self, tuple);
- ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
-
- if (resultRelInfo->ri_NumIndices > 0 &&
- !HeapTupleIsHeapOnly(tuple))
- recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
- estate, false, NULL,
+ if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
+ recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL,
NIL);
/* AFTER ROW UPDATE Triggers */
ExecARUpdateTriggers(estate, resultRelInfo,
- &(tuple->t_self),
- NULL, slot,
+ tid, NULL, slot,
recheckIndexes, NULL);
list_free(recheckIndexes);
bool skip_tuple = false;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
- HeapTupleTableSlot *hsearchslot = (HeapTupleTableSlot *)searchslot;
-
- /* For now we support only tables and heap tuples. */
- Assert(rel->rd_rel->relkind == RELKIND_RELATION);
- Assert(TTS_IS_HEAPTUPLE(searchslot) || TTS_IS_BUFFERTUPLE(searchslot));
+ ItemPointer tid = &searchslot->tts_tid;
CheckCmdReplicaIdentity(rel, CMD_DELETE);
resultRelInfo->ri_TrigDesc->trig_delete_before_row)
{
skip_tuple = !ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
- &hsearchslot->tuple->t_self,
- NULL, NULL);
+ tid, NULL, NULL);
}
if (!skip_tuple)
{
- List *recheckIndexes = NIL;
-
/* OK, delete the tuple */
- simple_heap_delete(rel, &hsearchslot->tuple->t_self);
+ simple_table_delete(rel, tid, estate->es_snapshot);
/* AFTER ROW DELETE Triggers */
ExecARDeleteTriggers(estate, resultRelInfo,
- &hsearchslot->tuple->t_self, NULL, NULL);
-
- list_free(recheckIndexes);
+ tid, NULL, NULL);
}
}
#include "postgres.h"
-#include "access/heapam.h"
-#include "access/htup_details.h"
+#include "access/tableam.h"
#include "access/xact.h"
#include "executor/executor.h"
#include "executor/nodeLockRows.h"
#include "foreign/fdwapi.h"
#include "miscadmin.h"
-#include "storage/bufmgr.h"
#include "utils/rel.h"
ExecRowMark *erm = aerm->rowmark;
Datum datum;
bool isNull;
- HeapTupleData tuple;
- Buffer buffer;
- HeapUpdateFailureData hufd;
+ ItemPointerData tid;
+ TM_FailureData tmfd;
LockTupleMode lockmode;
- HTSU_Result test;
+ int lockflags = 0;
+ TM_Result test;
TupleTableSlot *markSlot;
/* clear any leftover test tuple for this rel */
/* this child is inactive right now */
erm->ermActive = false;
ItemPointerSetInvalid(&(erm->curCtid));
+ ExecClearTuple(markSlot);
continue;
}
}
continue;
}
- /* okay, try to lock the tuple */
- tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
+ /* okay, try to lock (and fetch) the tuple */
+ tid = *((ItemPointer) DatumGetPointer(datum));
switch (erm->markType)
{
case ROW_MARK_EXCLUSIVE:
break;
}
- test = heap_lock_tuple(erm->relation, &tuple,
- estate->es_output_cid,
- lockmode, erm->waitPolicy, true,
- &buffer, &hufd);
- ReleaseBuffer(buffer);
+ lockflags = TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS;
+ if (!IsolationUsesXactSnapshot())
+ lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
+
+ test = table_lock_tuple(erm->relation, &tid, estate->es_snapshot,
+ markSlot, estate->es_output_cid,
+ lockmode, erm->waitPolicy,
+ lockflags,
+ &tmfd);
+
switch (test)
{
- case HeapTupleWouldBlock:
+ case TM_WouldBlock:
/* couldn't lock tuple in SKIP LOCKED mode */
goto lnext;
- case HeapTupleSelfUpdated:
+ case TM_SelfModified:
/*
* The target tuple was already updated or deleted by the
* to fetch the updated tuple instead, but doing so would
* require changing heap_update and heap_delete to not
* complain about updating "invisible" tuples, which seems
- * pretty scary (heap_lock_tuple will not complain, but few
- * callers expect HeapTupleInvisible, and we're not one of
- * them). So for now, treat the tuple as deleted and do not
- * process.
+ * pretty scary (table_lock_tuple will not complain, but few
+ * callers expect TM_Invisible, and we're not one of them). So
+ * for now, treat the tuple as deleted and do not process.
*/
goto lnext;
- case HeapTupleMayBeUpdated:
- /* got the lock successfully */
+ case TM_Ok:
+
+ /*
+ * Got the lock successfully, the locked tuple saved in
+ * markSlot for, if needed, EvalPlanQual testing below.
+ */
+ if (tmfd.traversed)
+ epq_needed = true;
break;
- case HeapTupleUpdated:
+ case TM_Updated:
if (IsolationUsesXactSnapshot())
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
- if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
+ elog(ERROR, "unexpected table_lock_tuple status: %u",
+ test);
+ break;
+
+ case TM_Deleted:
+ if (IsolationUsesXactSnapshot())
ereport(ERROR,