/*
* Total shared-memory state for XLOG.
- *
- * This small structure is accessed by many backends, so we take care to
- * pad out the parts of the structure so they can be accessed by separate
- * CPUs without causing false sharing cache flushes. Padding is generous
- * to allow for a wide variety of CPU architectures.
*/
-#define XLOGCTL_BUFFER_SPACING 128
typedef struct XLogCtlData
{
/* Protected by WALInsertLock: */
XLogCtlInsert Insert;
- char InsertPadding[XLOGCTL_BUFFER_SPACING - sizeof(XLogCtlInsert)];
/* Protected by info_lck: */
XLogwrtRqst LogwrtRqst;
uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
TransactionId ckptXid;
XLogRecPtr asyncCommitLSN; /* LSN of newest async commit */
- /* add data structure padding for above info_lck declarations */
- char InfoPadding[XLOGCTL_BUFFER_SPACING - sizeof(XLogwrtRqst)
- - sizeof(XLogwrtResult)
- - sizeof(uint32)
- - sizeof(TransactionId)
- - sizeof(XLogRecPtr)];
/* Protected by WALWriteLock: */
XLogCtlWrite Write;
- char WritePadding[XLOGCTL_BUFFER_SPACING - sizeof(XLogCtlWrite)];
/*
* These values do not change after startup, although the pointed-to pages
* always during Recovery Processing Mode. This allows us to identify
* code executed *during* Recovery Processing Mode but not necessarily
* by Startup process itself.
- *
- * Protected by mode_lck
*/
bool SharedRecoveryProcessingMode;
- slock_t mode_lck;
-
- char InfoLockPadding[XLOGCTL_BUFFER_SPACING];
slock_t info_lck; /* locks shared variables shown above */
} XLogCtlData;
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
XLogCtl->Insert.currpage = (XLogPageHeader) (XLogCtl->pages);
SpinLockInit(&XLogCtl->info_lck);
- SpinLockInit(&XLogCtl->mode_lck);
/*
* If we are not in bootstrap mode, pg_control should already exist. Read