Assert(prev_first_lsn < cur_txn->first_lsn);
/* known-as-subtxn txns must not be listed */
- Assert(!cur_txn->is_known_as_subxact);
+ Assert(!rbtxn_is_known_subxact(cur_txn));
prev_first_lsn = cur_txn->first_lsn;
}
Assert(prev_base_snap_lsn < cur_txn->base_snapshot_lsn);
/* known-as-subtxn txns must not be listed */
- Assert(!cur_txn->is_known_as_subxact);
+ Assert(!rbtxn_is_known_subxact(cur_txn));
prev_base_snap_lsn = cur_txn->base_snapshot_lsn;
}
txn = dlist_head_element(ReorderBufferTXN, node, &rb->toplevel_by_lsn);
- Assert(!txn->is_known_as_subxact);
+ Assert(!rbtxn_is_known_subxact(txn));
Assert(txn->first_lsn != InvalidXLogRecPtr);
return txn;
}
if (!new_sub)
{
- if (subtxn->is_known_as_subxact)
+ if (rbtxn_is_known_subxact(subtxn))
{
/* already associated, nothing to do */
return;
}
}
- subtxn->is_known_as_subxact = true;
+ subtxn->txn_flags |= RBTXN_IS_SUBXACT;
subtxn->toplevel_xid = xid;
Assert(subtxn->nsubtxns == 0);
{
ReorderBufferChange *cur_change;
- if (txn->serialized)
+ if (rbtxn_is_serialized(txn))
{
/* serialize remaining changes */
ReorderBufferSerializeTXN(rb, txn);
{
ReorderBufferChange *cur_change;
- if (cur_txn->serialized)
+ if (rbtxn_is_serialized(cur_txn))
{
/* serialize remaining changes */
ReorderBufferSerializeTXN(rb, cur_txn);
* they originally were happening inside another subtxn, so we won't
* ever recurse more than one level deep here.
*/
- Assert(subtxn->is_known_as_subxact);
+ Assert(rbtxn_is_known_subxact(subtxn));
Assert(subtxn->nsubtxns == 0);
ReorderBufferCleanupTXN(rb, subtxn);
/*
* Remove TXN from its containing list.
*
- * Note: if txn->is_known_as_subxact, we are deleting the TXN from its
+ * Note: if txn is known as subxact, we are deleting the TXN from its
* parent's list of known subxacts; this leaves the parent's nsubxacts
* count too high, but we don't care. Otherwise, we are deleting the TXN
* from the LSN-ordered list of toplevel TXNs.
Assert(found);
/* remove entries spilled to disk */
- if (txn->serialized)
+ if (rbtxn_is_serialized(txn))
ReorderBufferRestoreCleanup(rb, txn);
/* deallocate */
dlist_iter iter;
HASHCTL hash_ctl;
- if (!txn->has_catalog_changes || dlist_is_empty(&txn->tuplecids))
+ if (!rbtxn_has_catalog_changes(txn) || dlist_is_empty(&txn->tuplecids))
return;
memset(&hash_ctl, 0, sizeof(hash_ctl));
* final_lsn to that of their last change; this causes
* ReorderBufferRestoreCleanup to do the right thing.
*/
- if (txn->serialized && txn->final_lsn == 0)
+ if (rbtxn_is_serialized(txn) && txn->final_lsn == 0)
{
ReorderBufferChange *last =
dlist_tail_element(ReorderBufferChange, node, &txn->changes);
* operate on its top-level transaction instead.
*/
txn = ReorderBufferTXNByXid(rb, xid, true, &is_new, lsn, true);
- if (txn->is_known_as_subxact)
+ if (rbtxn_is_known_subxact(txn))
txn = ReorderBufferTXNByXid(rb, txn->toplevel_xid, false,
NULL, InvalidXLogRecPtr, false);
Assert(txn->base_snapshot == NULL);
txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
- txn->has_catalog_changes = true;
+ txn->txn_flags |= RBTXN_HAS_CATALOG_CHANGES;
}
/*
if (txn == NULL)
return false;
- return txn->has_catalog_changes;
+ return rbtxn_has_catalog_changes(txn);
}
/*
return false;
/* a known subtxn? operate on top-level txn instead */
- if (txn->is_known_as_subxact)
+ if (rbtxn_is_known_subxact(txn))
txn = ReorderBufferTXNByXid(rb, txn->toplevel_xid, false,
NULL, InvalidXLogRecPtr, false);
rb->spillCount += 1;
rb->spillBytes += size;
- /* Don't consider already serialized transaction. */
- rb->spillTxns += txn->serialized ? 0 : 1;
+ /* Don't consider already serialized transactions. */
+ rb->spillTxns += rbtxn_is_serialized(txn) ? 0 : 1;
Assert(spilled == txn->nentries_mem);
Assert(dlist_is_empty(&txn->changes));
txn->nentries_mem = 0;
- txn->serialized = true;
+ txn->txn_flags |= RBTXN_IS_SERIALIZED;
if (fd != -1)
CloseTransientFile(fd);
dlist_node node;
} ReorderBufferChange;
+/* ReorderBufferTXN txn_flags */
+#define RBTXN_HAS_CATALOG_CHANGES 0x0001
+#define RBTXN_IS_SUBXACT 0x0002
+#define RBTXN_IS_SERIALIZED 0x0004
+
+/* Does the transaction have catalog changes? */
+#define rbtxn_has_catalog_changes(txn) \
+( \
+ ((txn)->txn_flags & RBTXN_HAS_CATALOG_CHANGES) != 0 \
+)
+
+/* Is the transaction known as a subxact? */
+#define rbtxn_is_known_subxact(txn) \
+( \
+ ((txn)->txn_flags & RBTXN_IS_SUBXACT) != 0 \
+)
+
+/* Has this transaction been spilled to disk? */
+#define rbtxn_is_serialized(txn) \
+( \
+ ((txn)->txn_flags & RBTXN_IS_SERIALIZED) != 0 \
+)
+
typedef struct ReorderBufferTXN
{
- /*
- * The transactions transaction id, can be a toplevel or sub xid.
- */
- TransactionId xid;
+ /* See above */
+ bits32 txn_flags;
- /* did the TX have catalog changes */
- bool has_catalog_changes;
+ /* The transaction's transaction id, can be a toplevel or sub xid. */
+ TransactionId xid;
- /* Do we know this is a subxact? Xid of top-level txn if so */
- bool is_known_as_subxact;
+ /* Xid of top-level transaction, if known */
TransactionId toplevel_xid;
/*
*/
uint64 nentries_mem;
- /*
- * Has this transaction been spilled to disk? It's not always possible to
- * deduce that fact by comparing nentries with nentries_mem, because e.g.
- * subtransactions of a large transaction might get serialized together
- * with the parent - if they're restored to memory they'd have
- * nentries_mem == nentries.
- */
- bool serialized;
-
/*
* List of ReorderBufferChange structs, including new Snapshots and new
* CommandIds
* Size of this transaction (changes currently in memory, in bytes).
*/
Size size;
-
} ReorderBufferTXN;
/* so we can define the callbacks used inside struct ReorderBuffer itself */