pgindent run prior to branching
authorAndrew Dunstan <andrew@dunslane.net>
Sat, 30 Jun 2018 16:25:49 +0000 (12:25 -0400)
committerAndrew Dunstan <andrew@dunslane.net>
Sat, 30 Jun 2018 16:25:49 +0000 (12:25 -0400)
18 files changed:
contrib/postgres_fdw/postgres_fdw.c
src/backend/access/gin/ginfast.c
src/backend/access/gin/ginget.c
src/backend/access/nbtree/nbtree.c
src/backend/access/nbtree/nbtutils.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xlogreader.c
src/backend/commands/indexcmds.c
src/backend/executor/execMain.c
src/backend/replication/logical/reorderbuffer.c
src/backend/storage/ipc/procarray.c
src/backend/storage/ipc/standby.c
src/backend/tcop/utility.c
src/backend/utils/adt/jsonb.c
src/bin/pg_dump/pg_dump.h
src/bin/psql/tab-complete.c
src/include/access/xlogreader.h
src/include/replication/reorderbuffer.h

index 78b0f43ca8376bb6d4435732cb83a7d7b2a718dd..9369cfc265639732a136d20609880061fb830cb7 100644 (file)
@@ -2020,12 +2020,11 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
        /*
         * If the foreign table is a partition, we need to create a new RTE
         * describing the foreign table for use by deparseInsertSql and
-        * create_foreign_modify() below, after first copying the parent's
-        * RTE and modifying some fields to describe the foreign partition to
-        * work on. However, if this is invoked by UPDATE, the existing RTE
-        * may already correspond to this partition if it is one of the
-        * UPDATE subplan target rels; in that case, we can just use the
-        * existing RTE as-is.
+        * create_foreign_modify() below, after first copying the parent's RTE and
+        * modifying some fields to describe the foreign partition to work on.
+        * However, if this is invoked by UPDATE, the existing RTE may already
+        * correspond to this partition if it is one of the UPDATE subplan target
+        * rels; in that case, we can just use the existing RTE as-is.
         */
        rte = list_nth(estate->es_range_table, resultRelation - 1);
        if (rte->relid != RelationGetRelid(rel))
@@ -2035,10 +2034,10 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
                rte->relkind = RELKIND_FOREIGN_TABLE;
 
                /*
-                * For UPDATE, we must use the RT index of the first subplan
-                * target rel's RTE, because the core code would have built
-                * expressions for the partition, such as RETURNING, using that
-                * RT index as varno of Vars contained in those expressions.
+                * For UPDATE, we must use the RT index of the first subplan target
+                * rel's RTE, because the core code would have built expressions for
+                * the partition, such as RETURNING, using that RT index as varno of
+                * Vars contained in those expressions.
                 */
                if (plan && plan->operation == CMD_UPDATE &&
                        resultRelation == plan->nominalRelation)
index 5f624cf6facb12902d4919ac78f268b40c796dc6..e32807e62ac194a4c3dde6bd73bc4688414790bf 100644 (file)
@@ -247,9 +247,9 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
        metapage = BufferGetPage(metabuffer);
 
        /*
-        * An insertion to the pending list could logically belong anywhere in
-        * the tree, so it conflicts with all serializable scans.  All scans
-        * acquire a predicate lock on the metabuffer to represent that.
+        * An insertion to the pending list could logically belong anywhere in the
+        * tree, so it conflicts with all serializable scans.  All scans acquire a
+        * predicate lock on the metabuffer to represent that.
         */
        CheckForSerializableConflictIn(index, NULL, metabuffer);
 
index ef3cd7dbe2ab32d5e4b1f2cd4f4eda03861abb46..8466d947eaba5a7d18e5664628e12bc30a8b0908 100644 (file)
@@ -235,8 +235,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
                        LockBuffer(stack->buffer, GIN_UNLOCK);
 
                        /*
-                        * Acquire predicate lock on the posting tree.  We already hold
-                        * lock on the entry page, but insertions to the posting tree
+                        * Acquire predicate lock on the posting tree.  We already hold a
+                        * lock on the entry page, but insertions to the posting tree
                         * don't check for conflicts on that level.
                         */
                        PredicateLockPage(btree->index, rootPostingTree, snapshot);
@@ -1766,8 +1766,8 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
        *ntids = 0;
 
        /*
-        * Acquire predicate lock on the metapage, to conflict with any
-        * fastupdate insertions.
+        * Acquire predicate lock on the metapage, to conflict with any fastupdate
+        * insertions.
         */
        PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot);
 
index cdd0403e1d86e19cea90e0e584129135119b45a5..e8725fbbe1eec84fc9b8e03092b54d96b3bbd8cd 100644 (file)
@@ -820,10 +820,10 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
 
                /*
                 * If table receives enough insertions and no cleanup was performed,
-                * then index would appear have stale statistics.  If scale factor
-                * is set, we avoid that by performing cleanup if the number of
-                * inserted tuples exceeds vacuum_cleanup_index_scale_factor fraction
-                * of original tuples count.
+                * then index would appear have stale statistics.  If scale factor is
+                * set, we avoid that by performing cleanup if the number of inserted
+                * tuples exceeds vacuum_cleanup_index_scale_factor fraction of
+                * original tuples count.
                 */
                relopts = (StdRdOptions *) info->index->rd_options;
                cleanup_scale_factor = (relopts &&
@@ -873,8 +873,8 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
                                         &oldestBtpoXact);
 
                /*
-                * Update cleanup-related information in metapage. This information
-                * is used only for cleanup but keeping them up to date can avoid
+                * Update cleanup-related information in metapage. This information is
+                * used only for cleanup but keeping them up to date can avoid
                 * unnecessary cleanup even after bulkdelete.
                 */
                _bt_update_meta_cleanup_info(info->index, oldestBtpoXact,
index acb944357a36ab0af2abf3a8371f7d4b571970cf..4528e87c8336b912e10dafbe69cc56147f7c1afd 100644 (file)
@@ -2196,8 +2196,8 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
                         * non-zero, or when there is no explicit representation and the
                         * tuple is evidently not a pre-pg_upgrade tuple.
                         *
-                        * Prior to v11, downlinks always had P_HIKEY as their offset.
-                        * Use that to decide if the tuple is a pre-v11 tuple.
+                        * Prior to v11, downlinks always had P_HIKEY as their offset. Use
+                        * that to decide if the tuple is a pre-v11 tuple.
                         */
                        return BTreeTupleGetNAtts(itup, rel) == 0 ||
                                ((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&
index 1a419aa49bfee2d0b48ec9ffe2ef0914d940d46d..dcfef365916a4c85f7f41e18d2ee5f35351fc080 100644 (file)
@@ -4512,7 +4512,7 @@ ReadControlFile(void)
                                         errmsg("could not read from control file: %m")));
                else
                        ereport(PANIC,
-                                        (errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData))));
+                                       (errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData))));
        }
        pgstat_report_wait_end();
 
index 1b000a2ef1dbf01978ff0e5600992b1b2e4a191d..dd96cef8f0102f04cef20e7a08714f4d5265088d 100644 (file)
@@ -829,9 +829,9 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr,
        }
 
        /*
-        * Check that the address on the page agrees with what we expected.
-        * This check typically fails when an old WAL segment is recycled,
-        * and hasn't yet been overwritten with new data yet.
+        * Check that the address on the page agrees with what we expected. This
+        * check typically fails when an old WAL segment is recycled, and hasn't
+        * yet been overwritten with new data yet.
         */
        if (hdr->xlp_pageaddr != recaddr)
        {
index 576c85f732d77f23426594c13e0204524839b543..0053832195796d5cfb348a4375a4186ba7e18773 100644 (file)
@@ -1002,7 +1002,7 @@ DefineIndex(Oid relationId,
                                         */
                                        foreach(lc, childStmt->indexParams)
                                        {
-                                               IndexElem *ielem = lfirst(lc);
+                                               IndexElem  *ielem = lfirst(lc);
 
                                                /*
                                                 * If the index parameter is an expression, we must
index 969944cc12ace12cfd70f58a6553f12be17d23fb..8026fe2438f89c9e9b073eb469d657fb7b22e2c8 100644 (file)
@@ -1865,7 +1865,7 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
                                   EState *estate, bool emitError)
 {
        ExprContext *econtext;
-       bool    success;
+       bool            success;
 
        /*
         * If first time through, build expression state tree for the partition
index 5f4aa07131011b16386bc24823a70912dfb877f3..5792cd14a09fc545c4438e77202aad35d2f38574 100644 (file)
@@ -754,9 +754,9 @@ ReorderBufferAssignChild(ReorderBuffer *rb, TransactionId xid,
                else
                {
                        /*
-                        * We already saw this transaction, but initially added it to the list
-                        * of top-level txns.  Now that we know it's not top-level, remove
-                        * it from there.
+                        * We already saw this transaction, but initially added it to the
+                        * list of top-level txns.  Now that we know it's not top-level,
+                        * remove it from there.
                         */
                        dlist_delete(&subtxn->node);
                }
index 7f293d989b5917ad2bce93fde2c904a4507f3aa0..bd20497d81aecfdac83f99757ef5a4faf605c178 100644 (file)
@@ -2014,8 +2014,8 @@ GetRunningTransactionData(void)
                /*
                 * If we wished to exclude xids this would be the right place for it.
                 * Procs with the PROC_IN_VACUUM flag set don't usually assign xids,
-                * but they do during truncation at the end when they get the lock
-                * and truncate, so it is not much of a problem to include them if they
+                * but they do during truncation at the end when they get the lock and
+                * truncate, so it is not much of a problem to include them if they
                 * are seen and it is cleaner to include them.
                 */
 
index 147784c4b67d221d3d84c8616b5070777204d268..2e077028951282f2daa2b3bc531a5be79f916f3d 100644 (file)
@@ -53,8 +53,8 @@ static void LogAccessExclusiveLocks(int nlocks, xl_standby_lock *locks);
  */
 typedef struct RecoveryLockListsEntry
 {
-       TransactionId   xid;
-       List               *locks;
+       TransactionId xid;
+       List       *locks;
 } RecoveryLockListsEntry;
 
 /*
@@ -73,7 +73,7 @@ void
 InitRecoveryTransactionEnvironment(void)
 {
        VirtualTransactionId vxid;
-       HASHCTL                 hash_ctl;
+       HASHCTL         hash_ctl;
 
        /*
         * Initialize the hash table for tracking the list of locks held by each
@@ -671,6 +671,7 @@ StandbyReleaseLockList(List *locks)
        {
                xl_standby_lock *lock = (xl_standby_lock *) linitial(locks);
                LOCKTAG         locktag;
+
                elog(trace_recovery(DEBUG4),
                         "releasing recovery lock: xid %u db %u rel %u",
                         lock->xid, lock->dbOid, lock->relOid);
@@ -728,7 +729,7 @@ StandbyReleaseLockTree(TransactionId xid, int nsubxids, TransactionId *subxids)
 void
 StandbyReleaseAllLocks(void)
 {
-       HASH_SEQ_STATUS status;
+       HASH_SEQ_STATUS status;
        RecoveryLockListsEntry *entry;
 
        elog(trace_recovery(DEBUG2), "release all standby locks");
@@ -749,7 +750,7 @@ StandbyReleaseAllLocks(void)
 void
 StandbyReleaseOldLocks(TransactionId oldxid)
 {
-       HASH_SEQ_STATUS status;
+       HASH_SEQ_STATUS status;
        RecoveryLockListsEntry *entry;
 
        hash_seq_init(&status, RecoveryLockLists);
index bdfb66fa74b365b6912fb16682f944df35739ba3..4e1c21298e80754248900a1b6a63dcb6e57286df 100644 (file)
@@ -1316,8 +1316,8 @@ ProcessUtilitySlow(ParseState *pstate,
                                         * acquire locks early to avoid deadlocks.
                                         *
                                         * We also take the opportunity to verify that all
-                                        * partitions are something we can put an index on,
-                                        * to avoid building some indexes only to fail later.
+                                        * partitions are something we can put an index on, to
+                                        * avoid building some indexes only to fail later.
                                         */
                                        if (stmt->relation->inh &&
                                                get_rel_relkind(relid) == RELKIND_PARTITIONED_TABLE)
@@ -1328,7 +1328,7 @@ ProcessUtilitySlow(ParseState *pstate,
                                                inheritors = find_all_inheritors(relid, lockmode, NULL);
                                                foreach(lc, inheritors)
                                                {
-                                                       char    relkind = get_rel_relkind(lfirst_oid(lc));
+                                                       char            relkind = get_rel_relkind(lfirst_oid(lc));
 
                                                        if (relkind != RELKIND_RELATION &&
                                                                relkind != RELKIND_MATVIEW &&
index 6940b11c29057e27182f40084b32163d3c689531..0ae9d7b9c54bbd5ffdc622d604832cd8c0632e50 100644 (file)
@@ -1902,29 +1902,29 @@ cannotCastJsonbValue(enum jbvType type, const char *sqltype)
 {
        static const struct
        {
-               enum jbvType    type;
-               const char         *msg;
+               enum jbvType type;
+               const char *msg;
        }
-               messages[] =
+                               messages[] =
        {
-               { jbvNull,              gettext_noop("cannot cast jsonb null to type %s") },
-               { jbvString,    gettext_noop("cannot cast jsonb string to type %s") },
-               { jbvNumeric,   gettext_noop("cannot cast jsonb numeric to type %s") },
-               { jbvBool,              gettext_noop("cannot cast jsonb boolean to type %s") },
-               { jbvArray,             gettext_noop("cannot cast jsonb array to type %s") },
-               { jbvObject,    gettext_noop("cannot cast jsonb object to type %s") },
-               { jbvBinary,    gettext_noop("cannot cast jsonb array or object to type %s") }
+               {jbvNull, gettext_noop("cannot cast jsonb null to type %s")},
+               {jbvString, gettext_noop("cannot cast jsonb string to type %s")},
+               {jbvNumeric, gettext_noop("cannot cast jsonb numeric to type %s")},
+               {jbvBool, gettext_noop("cannot cast jsonb boolean to type %s")},
+               {jbvArray, gettext_noop("cannot cast jsonb array to type %s")},
+               {jbvObject, gettext_noop("cannot cast jsonb object to type %s")},
+               {jbvBinary, gettext_noop("cannot cast jsonb array or object to type %s")}
        };
-       int i;
+       int                     i;
 
-       for(i=0; i<lengthof(messages); i++)
+       for (i = 0; i < lengthof(messages); i++)
                if (messages[i].type == type)
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                         errmsg(messages[i].msg, sqltype)));
 
        /* should be unreachable */
-       elog(ERROR, "unknown jsonb type: %d", (int)type);
+       elog(ERROR, "unknown jsonb type: %d", (int) type);
 }
 
 Datum
index b9043799c7fb5019490249d6a3b444c76bc25934..1448005f303114a4c75542f6ba0542e65c9735cd 100644 (file)
@@ -316,7 +316,7 @@ typedef struct _tableInfo
        char      **attoptions;         /* per-attribute options */
        Oid                *attcollation;       /* per-attribute collation selection */
        char      **attfdwoptions;      /* per-attribute fdw options */
-       char      **attmissingval;  /* per attribute missing value */
+       char      **attmissingval;      /* per attribute missing value */
        bool       *notnull;            /* NOT NULL constraints on attributes */
        bool       *inhNotNull;         /* true if NOT NULL is inherited */
        struct _attrDefInfo **attrdefs; /* DEFAULT expressions */
index 7bb47eadc6c169e62fabbc67267426adae5fccfe..bb696f8ee90119ffec45d4ebd0f79d20a383c61c 100644 (file)
@@ -1856,14 +1856,14 @@ psql_completion(const char *text, int start, int end)
        /* ALTER INDEX <foo> SET|RESET ( */
        else if (Matches5("ALTER", "INDEX", MatchAny, "RESET", "("))
                COMPLETE_WITH_LIST8("fillfactor", "recheck_on_update",
-                                                       "vacuum_cleanup_index_scale_factor", /* BTREE */
+                                                       "vacuum_cleanup_index_scale_factor",    /* BTREE */
                                                        "fastupdate", "gin_pending_list_limit", /* GIN */
                                                        "buffering",    /* GiST */
                                                        "pages_per_range", "autosummarize"      /* BRIN */
                        );
        else if (Matches5("ALTER", "INDEX", MatchAny, "SET", "("))
                COMPLETE_WITH_LIST8("fillfactor =", "recheck_on_update =",
-                                                       "vacuum_cleanup_index_scale_factor =", /* BTREE */
+                                                       "vacuum_cleanup_index_scale_factor =",  /* BTREE */
                                                        "fastupdate =", "gin_pending_list_limit =", /* GIN */
                                                        "buffering =",  /* GiST */
                                                        "pages_per_range =", "autosummarize ="  /* BRIN */
index f307b6318ddd19b3065360f435d055829df67521..40116f8ecb4a813b82a8020004a4d97c16a381c4 100644 (file)
@@ -207,7 +207,7 @@ extern struct XLogRecord *XLogReadRecord(XLogReaderState *state,
 
 /* Validate a page */
 extern bool XLogReaderValidatePageHeader(XLogReaderState *state,
-                                       XLogRecPtr recptr, char *phdr);
+                                                        XLogRecPtr recptr, char *phdr);
 
 /* Invalidate read state */
 extern void XLogReaderInvalReadState(XLogReaderState *state);
index f8a295bddc895f10c396b705331ad6bb8e6dd23e..1f52f6bde73430f29bc7c15aac17ef159ae2e87d 100644 (file)
@@ -214,7 +214,7 @@ typedef struct ReorderBufferTXN
         */
        Snapshot        base_snapshot;
        XLogRecPtr      base_snapshot_lsn;
-       dlist_node      base_snapshot_node;     /* link in txns_by_base_snapshot_lsn */
+       dlist_node      base_snapshot_node; /* link in txns_by_base_snapshot_lsn */
 
        /*
         * How many ReorderBufferChange's do we have in this txn.