diff options
Diffstat (limited to 'src/backend/access')
| -rw-r--r-- | src/backend/access/common/toast_internals.c | 6 | ||||
| -rw-r--r-- | src/backend/access/heap/pruneheap.c | 12 | ||||
| -rw-r--r-- | src/backend/access/heap/vacuumlazy.c | 14 | ||||
| -rw-r--r-- | src/backend/access/rmgrdesc/xactdesc.c | 4 | ||||
| -rw-r--r-- | src/backend/access/rmgrdesc/xlogdesc.c | 2 | ||||
| -rw-r--r-- | src/backend/access/transam/rmgr.c | 6 | ||||
| -rw-r--r-- | src/backend/access/transam/twophase.c | 6 | ||||
| -rw-r--r-- | src/backend/access/transam/xlog.c | 28 | ||||
| -rw-r--r-- | src/backend/access/transam/xlogarchive.c | 16 | ||||
| -rw-r--r-- | src/backend/access/transam/xlogfuncs.c | 8 | ||||
| -rw-r--r-- | src/backend/access/transam/xlogrecovery.c | 15 | ||||
| -rw-r--r-- | src/backend/access/transam/xlogstats.c | 10 | ||||
| -rw-r--r-- | src/backend/access/transam/xlogutils.c | 13 |
13 files changed, 69 insertions, 71 deletions
diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c index 7052ac99780..576e585a89f 100644 --- a/src/backend/access/common/toast_internals.c +++ b/src/backend/access/common/toast_internals.c @@ -663,9 +663,9 @@ init_toast_snapshot(Snapshot toast_snapshot) /* * Catalog snapshots can be returned by GetOldestSnapshot() even if not * registered or active. That easily hides bugs around not having a - * snapshot set up - most of the time there is a valid catalog - * snapshot. So additionally insist that the current snapshot is - * registered or active. + * snapshot set up - most of the time there is a valid catalog snapshot. + * So additionally insist that the current snapshot is registered or + * active. */ Assert(HaveRegisteredOrActiveSnapshot()); diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 98d31de0031..9f43bbe25f5 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -68,9 +68,9 @@ typedef struct /* * Tuple visibility is only computed once for each tuple, for correctness - * and efficiency reasons; see comment in heap_page_prune() for - * details. This is of type int8[,] instead of HTSV_Result[], so we can use - * -1 to indicate no visibility has been computed, e.g. for LP_DEAD items. + * and efficiency reasons; see comment in heap_page_prune() for details. + * This is of type int8[], instead of HTSV_Result[], so we can use -1 to + * indicate no visibility has been computed, e.g. for LP_DEAD items. * * Same indexing as ->marked. */ @@ -203,8 +203,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer) */ if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree) { - int ndeleted, - nnewlpdead; + int ndeleted, + nnewlpdead; ndeleted = heap_page_prune(relation, buffer, vistest, limited_xmin, limited_ts, &nnewlpdead, NULL); @@ -267,7 +267,7 @@ heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, - int *nnewlpdead, + int *nnewlpdead, OffsetNumber *off_loc) { int ndeleted = 0; diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 9482f99e68b..b802ed247e7 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -326,7 +326,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, PGRUsage ru0; TimestampTz starttime = 0; PgStat_Counter startreadtime = 0, - startwritetime = 0; + startwritetime = 0; WalUsage startwalusage = pgWalUsage; int64 StartPageHit = VacuumPageHit, StartPageMiss = VacuumPageMiss, @@ -2232,12 +2232,12 @@ lazy_vacuum(LVRelState *vacrel) * dead_items space is not CPU cache resident. * * We don't take any special steps to remember the LP_DEAD items (such - * as counting them in our final update to the stats system) when - * the optimization is applied. Though the accounting used in - * analyze.c's acquire_sample_rows() will recognize the same LP_DEAD - * items as dead rows in its own stats report, that's okay. - * The discrepancy should be negligible. If this optimization is ever - * expanded to cover more cases then this may need to be reconsidered. + * as counting them in our final update to the stats system) when the + * optimization is applied. Though the accounting used in analyze.c's + * acquire_sample_rows() will recognize the same LP_DEAD items as dead + * rows in its own stats report, that's okay. The discrepancy should + * be negligible. If this optimization is ever expanded to cover more + * cases then this may need to be reconsidered. */ threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES; bypass = (vacrel->lpdead_item_pages < threshold && diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c index e739c4a3bd9..90b6ac2884d 100644 --- a/src/backend/access/rmgrdesc/xactdesc.c +++ b/src/backend/access/rmgrdesc/xactdesc.c @@ -411,8 +411,8 @@ xact_desc_prepare(StringInfo buf, uint8 info, xl_xact_prepare *xlrec, RepOriginI parsed.tsId, xlrec->initfileinval); /* - * Check if the replication origin has been set in this record in the - * same way as PrepareRedoAdd(). + * Check if the replication origin has been set in this record in the same + * way as PrepareRedoAdd(). */ if (origin_id != InvalidRepOriginId) appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s", diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c index c0dfea40c70..fefc563323d 100644 --- a/src/backend/access/rmgrdesc/xlogdesc.c +++ b/src/backend/access/rmgrdesc/xlogdesc.c @@ -210,7 +210,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty, bool detailed_format, StringInfo buf, uint32 *fpi_len) { - int block_id; + int block_id; Assert(record != NULL); diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c index e1d6ebbd3db..8ed69244e39 100644 --- a/src/backend/access/transam/rmgr.c +++ b/src/backend/access/transam/rmgr.c @@ -38,7 +38,7 @@ #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask,decode) \ { name, redo, desc, identify, startup, cleanup, mask, decode }, -RmgrData RmgrTable[RM_MAX_ID + 1] = { +RmgrData RmgrTable[RM_MAX_ID + 1] = { #include "access/rmgrlist.h" }; @@ -125,8 +125,8 @@ RegisterCustomRmgr(RmgrId rmid, RmgrData *rmgr) if (!pg_strcasecmp(RmgrTable[existing_rmid].rm_name, rmgr->rm_name)) ereport(ERROR, - (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid), - errdetail("Existing resource manager with ID %d has the same name.", existing_rmid))); + (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid), + errdetail("Existing resource manager with ID %d has the same name.", existing_rmid))); } /* register it */ diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index dc0266693e3..75551f60cbc 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -1119,7 +1119,7 @@ StartPrepare(GlobalTransaction gxact) if (hdr.nabortstats > 0) { save_state_data(abortstats, - hdr.nabortstats * sizeof(xl_xact_stats_item)); + hdr.nabortstats * sizeof(xl_xact_stats_item)); pfree(abortstats); } if (hdr.ninvalmsgs > 0) @@ -1529,9 +1529,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit) bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode)); abortrels = (RelFileNode *) bufptr; bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode)); - commitstats = (xl_xact_stats_item*) bufptr; + commitstats = (xl_xact_stats_item *) bufptr; bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item)); - abortstats = (xl_xact_stats_item*) bufptr; + abortstats = (xl_xact_stats_item *) bufptr; bufptr += MAXALIGN(hdr->nabortstats * sizeof(xl_xact_stats_item)); invalmsgs = (SharedInvalidationMessage *) bufptr; bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage)); diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 36852f23277..71136b11a2a 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -435,10 +435,10 @@ typedef struct XLogCtlInsert bool fullPageWrites; /* - * runningBackups is a counter indicating the number of backups currently in - * progress. forcePageWrites is set to true when runningBackups is non-zero. - * lastBackupStart is the latest checkpoint redo location used as a starting - * point for an online backup. + * runningBackups is a counter indicating the number of backups currently + * in progress. forcePageWrites is set to true when runningBackups is + * non-zero. lastBackupStart is the latest checkpoint redo location used + * as a starting point for an online backup. */ int runningBackups; XLogRecPtr lastBackupStart; @@ -5307,14 +5307,14 @@ StartupXLOG(void) * When recovering from a backup (we are in recovery, and archive recovery * was requested), complain if we did not roll forward far enough to reach * the point where the database is consistent. For regular online - * backup-from-primary, that means reaching the end-of-backup WAL record (at - * which point we reset backupStartPoint to be Invalid), for + * backup-from-primary, that means reaching the end-of-backup WAL record + * (at which point we reset backupStartPoint to be Invalid), for * backup-from-replica (which can't inject records into the WAL stream), * that point is when we reach the minRecoveryPoint in pg_control (which - * we purposfully copy last when backing up from a replica). For pg_rewind - * (which creates a backup_label with a method of "pg_rewind") or - * snapshot-style backups (which don't), backupEndRequired will be set to - * false. + * we purposefully copy last when backing up from a replica). For + * pg_rewind (which creates a backup_label with a method of "pg_rewind") + * or snapshot-style backups (which don't), backupEndRequired will be set + * to false. * * Note: it is indeed okay to look at the local variable * LocalMinRecoveryPoint here, even though ControlFile->minRecoveryPoint @@ -5328,8 +5328,8 @@ StartupXLOG(void) /* * Ran off end of WAL before reaching end-of-backup WAL record, or * minRecoveryPoint. That's a bad sign, indicating that you tried to - * recover from an online backup but never called pg_backup_stop(), - * or you didn't archive all the WAL needed. + * recover from an online backup but never called pg_backup_stop(), or + * you didn't archive all the WAL needed. */ if (ArchiveRecoveryRequested || ControlFile->backupEndRequired) { @@ -8481,8 +8481,8 @@ do_pg_backup_stop(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) WALInsertLockAcquireExclusive(); /* - * It is expected that each do_pg_backup_start() call is matched by exactly - * one do_pg_backup_stop() call. + * It is expected that each do_pg_backup_start() call is matched by + * exactly one do_pg_backup_stop() call. */ Assert(XLogCtl->Insert.runningBackups > 0); XLogCtl->Insert.runningBackups--; diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c index a2657a20058..4101a30e374 100644 --- a/src/backend/access/transam/xlogarchive.c +++ b/src/backend/access/transam/xlogarchive.c @@ -497,15 +497,15 @@ XLogArchiveNotify(const char *xlog) } /* - * Timeline history files are given the highest archival priority to - * lower the chance that a promoted standby will choose a timeline that - * is already in use. However, the archiver ordinarily tries to gather + * Timeline history files are given the highest archival priority to lower + * the chance that a promoted standby will choose a timeline that is + * already in use. However, the archiver ordinarily tries to gather * multiple files to archive from each scan of the archive_status - * directory, which means that newly created timeline history files - * could be left unarchived for a while. To ensure that the archiver - * picks up timeline history files as soon as possible, we force the - * archiver to scan the archive_status directory the next time it looks - * for a file to archive. + * directory, which means that newly created timeline history files could + * be left unarchived for a while. To ensure that the archiver picks up + * timeline history files as soon as possible, we force the archiver to + * scan the archive_status directory the next time it looks for a file to + * archive. */ if (IsTLHistoryFileName(xlog)) PgArchForceDirScan(); diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index b61ae6c0b4a..02bd919ff64 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -74,8 +74,8 @@ pg_backup_start(PG_FUNCTION_ARGS) errmsg("a backup is already in progress in this session"))); /* - * Label file and tablespace map file need to be long-lived, since - * they are read in pg_backup_stop. + * Label file and tablespace map file need to be long-lived, since they + * are read in pg_backup_stop. */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); label_file = makeStringInfo(); @@ -127,8 +127,8 @@ pg_backup_stop(PG_FUNCTION_ARGS) errhint("Did you call pg_backup_start()?"))); /* - * Stop the backup. Return a copy of the backup label and tablespace map so - * they can be written to disk by the caller. + * Stop the backup. Return a copy of the backup label and tablespace map + * so they can be written to disk by the caller. */ stoppoint = do_pg_backup_stop(label_file->data, waitforarchive, NULL); diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index 39ef865ed92..6eba6264202 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -1205,9 +1205,9 @@ read_backup_label(XLogRecPtr *checkPointLoc, TimeLineID *backupLabelTLI, * method was used) or if this label came from somewhere else (the only * other option today being from pg_rewind). If this was a streamed * backup then we know that we need to play through until we get to the - * end of the WAL which was generated during the backup (at which point - * we will have reached consistency and backupEndRequired will be reset - * to be false). + * end of the WAL which was generated during the backup (at which point we + * will have reached consistency and backupEndRequired will be reset to be + * false). */ if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1) { @@ -2055,10 +2055,9 @@ CheckRecoveryConsistency(void) /* * Have we passed our safe starting point? Note that minRecoveryPoint is - * known to be incorrectly set if recovering from a backup, until - * the XLOG_BACKUP_END arrives to advise us of the correct - * minRecoveryPoint. All we know prior to that is that we're not - * consistent yet. + * known to be incorrectly set if recovering from a backup, until the + * XLOG_BACKUP_END arrives to advise us of the correct minRecoveryPoint. + * All we know prior to that is that we're not consistent yet. */ if (!reachedConsistency && !backupEndRequired && minRecoveryPoint <= lastReplayedEndRecPtr) @@ -3802,7 +3801,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, HandleStartupProcInterrupts(); } - return XLREAD_FAIL; /* not reached */ + return XLREAD_FAIL; /* not reached */ } diff --git a/src/backend/access/transam/xlogstats.c b/src/backend/access/transam/xlogstats.c index 6524a1ad0b9..514181792dc 100644 --- a/src/backend/access/transam/xlogstats.c +++ b/src/backend/access/transam/xlogstats.c @@ -22,7 +22,7 @@ void XLogRecGetLen(XLogReaderState *record, uint32 *rec_len, uint32 *fpi_len) { - int block_id; + int block_id; /* * Calculate the amount of FPI data in the record. @@ -53,10 +53,10 @@ XLogRecGetLen(XLogReaderState *record, uint32 *rec_len, void XLogRecStoreStats(XLogStats *stats, XLogReaderState *record) { - RmgrId rmid; - uint8 recid; - uint32 rec_len; - uint32 fpi_len; + RmgrId rmid; + uint8 recid; + uint32 rec_len; + uint32 fpi_len; Assert(stats != NULL && record != NULL); diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index 29419c10a88..48516694f08 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -80,10 +80,9 @@ typedef struct xl_invalid_page static HTAB *invalid_page_tab = NULL; -static int -read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr, - int reqLen, XLogRecPtr targetRecPtr, - char *cur_page, bool wait_for_wal); +static int read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr, + int reqLen, XLogRecPtr targetRecPtr, + char *cur_page, bool wait_for_wal); /* Report a reference to an invalid page */ static void @@ -940,8 +939,8 @@ read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr, * archive in the timeline will get renamed to .partial by * StartupXLOG(). * - * If that happens after our caller determined the TLI but before - * we actually read the xlog page, we might still try to read from the + * If that happens after our caller determined the TLI but before we + * actually read the xlog page, we might still try to read from the * old (now renamed) segment and fail. There's not much we can do * about this, but it can only happen when we're a leaf of a cascading * standby whose primary gets promoted while we're decoding, so a @@ -965,7 +964,7 @@ read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr, * end of WAL has been reached. */ private_data = (ReadLocalXLogPageNoWaitPrivate *) - state->private_data; + state->private_data; private_data->end_of_wal = true; break; } |
