Manual cleanup of pgindent results.
authorTom Lane <tgl@sss.pgh.pa.us>
Sun, 24 May 2015 19:04:10 +0000 (15:04 -0400)
committerTom Lane <tgl@sss.pgh.pa.us>
Sun, 24 May 2015 19:04:10 +0000 (15:04 -0400)
Fix some places where pgindent did silly stuff, often because project
style wasn't followed to begin with.  (I've not touched the atomics
headers, though.)

contrib/pg_audit/pg_audit.c
src/backend/access/tablesample/bernoulli.c
src/backend/access/tablesample/tablesample.c
src/backend/executor/execUtils.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeHash.c
src/backend/optimizer/plan/planner.c
src/backend/rewrite/rowsecurity.c
src/backend/utils/adt/jsonb.c
src/backend/utils/adt/ruleutils.c

index a664d0804bc6020a04fa71918e6d85977f325d17..ffe13eb6b397f19798be8eb42711176285f8e0f0 100644 (file)
@@ -212,19 +212,19 @@ typedef struct
        int64           statementId;    /* Simple counter */
        int64           substatementId; /* Simple counter */
 
-       LogStmtLevel logStmtLevel;      /* From GetCommandLogLevel when possible, */
-       /* generated when not. */
+       LogStmtLevel logStmtLevel;      /* From GetCommandLogLevel when possible,
+                                                                * generated when not. */
        NodeTag         commandTag;             /* same here */
        const char *command;            /* same here */
-       const char *objectType;         /* From event trigger when possible */
-       /* generated when not. */
+       const char *objectType;         /* From event trigger when possible, generated
+                                                                * when not. */
        char       *objectName;         /* Fully qualified object identification */
        const char *commandText;        /* sourceText / queryString */
        ParamListInfo paramList;        /* QueryDesc/ProcessUtility parameters */
 
        bool            granted;                /* Audit role has object permissions? */
-       bool            logged;                 /* Track if we have logged this event, used */
-       /* post-ProcessUtility to make sure we log */
+       bool            logged;                 /* Track if we have logged this event, used
+                                                                * post-ProcessUtility to make sure we log */
        bool            statementLogged;        /* Track if we have logged the statement */
 } AuditEvent;
 
@@ -467,7 +467,7 @@ log_audit_event(AuditEventStackItem *stackItem)
        /* Classify the statement using log stmt level and the command tag */
        switch (stackItem->auditEvent.logStmtLevel)
        {
-                       /* All mods go in WRITE class, execpt EXECUTE */
+                       /* All mods go in WRITE class, except EXECUTE */
                case LOGSTMT_MOD:
                        className = CLASS_WRITE;
                        class = LOG_WRITE;
@@ -553,13 +553,14 @@ log_audit_event(AuditEventStackItem *stackItem)
                        break;
        }
 
-       /*
+       /*----------
         * Only log the statement if:
         *
-        * 1. If object was selected for audit logging (granted) 2. The statement
-        * belongs to a class that is being logged
+        * 1. If object was selected for audit logging (granted), or
+        * 2. The statement belongs to a class that is being logged
         *
         * If neither of these is true, return.
+        *----------
         */
        if (!stackItem->auditEvent.granted && !(auditLogBitmap & class))
                return;
@@ -979,57 +980,39 @@ log_select_dml(Oid auditOid, List *rangeTabls)
                switch (rte->relkind)
                {
                        case RELKIND_RELATION:
-                               auditEventStack->auditEvent.objectType =
-                               OBJECT_TYPE_TABLE;
-
+                               auditEventStack->auditEvent.objectType = OBJECT_TYPE_TABLE;
                                break;
 
                        case RELKIND_INDEX:
-                               auditEventStack->auditEvent.objectType =
-                               OBJECT_TYPE_INDEX;
-
+                               auditEventStack->auditEvent.objectType = OBJECT_TYPE_INDEX;
                                break;
 
                        case RELKIND_SEQUENCE:
-                               auditEventStack->auditEvent.objectType =
-                               OBJECT_TYPE_SEQUENCE;
-
+                               auditEventStack->auditEvent.objectType = OBJECT_TYPE_SEQUENCE;
                                break;
 
                        case RELKIND_TOASTVALUE:
-                               auditEventStack->auditEvent.objectType =
-                               OBJECT_TYPE_TOASTVALUE;
-
+                               auditEventStack->auditEvent.objectType = OBJECT_TYPE_TOASTVALUE;
                                break;
 
                        case RELKIND_VIEW:
-                               auditEventStack->auditEvent.objectType =
-                               OBJECT_TYPE_VIEW;
-
+                               auditEventStack->auditEvent.objectType = OBJECT_TYPE_VIEW;
                                break;
 
                        case RELKIND_COMPOSITE_TYPE:
-                               auditEventStack->auditEvent.objectType =
-                               OBJECT_TYPE_COMPOSITE_TYPE;
-
+                               auditEventStack->auditEvent.objectType = OBJECT_TYPE_COMPOSITE_TYPE;
                                break;
 
                        case RELKIND_FOREIGN_TABLE:
-                               auditEventStack->auditEvent.objectType =
-                               OBJECT_TYPE_FOREIGN_TABLE;
-
+                               auditEventStack->auditEvent.objectType = OBJECT_TYPE_FOREIGN_TABLE;
                                break;
 
                        case RELKIND_MATVIEW:
-                               auditEventStack->auditEvent.objectType =
-                               OBJECT_TYPE_MATVIEW;
-
+                               auditEventStack->auditEvent.objectType = OBJECT_TYPE_MATVIEW;
                                break;
 
                        default:
-                               auditEventStack->auditEvent.objectType =
-                               OBJECT_TYPE_UNKNOWN;
-
+                               auditEventStack->auditEvent.objectType = OBJECT_TYPE_UNKNOWN;
                                break;
                }
 
@@ -1043,9 +1026,7 @@ log_select_dml(Oid auditOid, List *rangeTabls)
                /* Perform object auditing only if the audit role is valid */
                if (auditOid != InvalidOid)
                {
-                       AclMode         auditPerms =
-                       (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
-                       rte->requiredPerms;
+                       AclMode         auditPerms = (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) & rte->requiredPerms;
 
                        /*
                         * If any of the required permissions for the relation are granted
@@ -1166,7 +1147,6 @@ log_function_execute(Oid objectId)
        stackItem->auditEvent.commandTag = T_DoStmt;
        stackItem->auditEvent.command = COMMAND_EXECUTE;
        stackItem->auditEvent.objectType = OBJECT_TYPE_FUNCTION;
-
        stackItem->auditEvent.commandText = stackItem->next->auditEvent.commandText;
 
        log_audit_event(stackItem);
@@ -1459,8 +1439,7 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
 
                /* Supply object name and type for audit event */
                auditEventStack->auditEvent.objectType =
-               SPI_getvalue(spiTuple, spiTupDesc, 1);
-
+                       SPI_getvalue(spiTuple, spiTupDesc, 1);
                auditEventStack->auditEvent.objectName =
                        SPI_getvalue(spiTuple, spiTupDesc, 2);
 
@@ -1545,8 +1524,7 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
                spiTuple = SPI_tuptable->vals[row];
 
                auditEventStack->auditEvent.objectType =
-               SPI_getvalue(spiTuple, spiTupDesc, 1);
-
+                       SPI_getvalue(spiTuple, spiTupDesc, 1);
                auditEventStack->auditEvent.objectName =
                        SPI_getvalue(spiTuple, spiTupDesc, 2);
 
@@ -1603,16 +1581,14 @@ check_pg_audit_log(char **newVal, void **extra, GucSource source)
 
        foreach(lt, flagRawList)
        {
+               char       *token = (char *) lfirst(lt);
                bool            subtract = false;
                int                     class;
 
-               /* Retrieve a token */
-               char       *token = (char *) lfirst(lt);
-
                /* If token is preceded by -, then the token is subtractive */
-               if (strstr(token, "-") == token)
+               if (token[0] == '-')
                {
-                       token = token + 1;
+                       token++;
                        subtract = true;
                }
 
index 563a9168f0f06a49f228bcc4b95d970b20e9a456..0a539008221a5592febbeb9cf1a652eb9da0a1d6 100644 (file)
@@ -80,8 +80,7 @@ Datum
 tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
 {
        TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       BernoulliSamplerData *sampler =
-       (BernoulliSamplerData *) tsdesc->tsmdata;
+       BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata;
 
        /*
         * Bernoulli sampling scans all blocks on the table and supports syncscan
@@ -117,10 +116,10 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
  * tuples have same probability of being returned the visible and invisible
  * tuples will be returned in same ratio as they have in the actual table.
  * This means that there is no skew towards either visible or invisible tuples
- * and the     number returned visible tuples to from the executor node is the
- * fraction of visible tuples which was specified in input.
+ * and the number of visible tuples returned from the executor node should
+ * match the fraction of visible tuples which was specified by user.
  *
- * This is faster than doing the coinflip in the examinetuple because we don't
+ * This is faster than doing the coinflip in examinetuple because we don't
  * have to do visibility checks on uninteresting tuples.
  *
  * If we reach end of the block return InvalidOffsetNumber which tells
@@ -131,8 +130,7 @@ tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
 {
        TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
        OffsetNumber maxoffset = PG_GETARG_UINT16(2);
-       BernoulliSamplerData *sampler =
-       (BernoulliSamplerData *) tsdesc->tsmdata;
+       BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata;
        OffsetNumber tupoffset = sampler->lt;
        float4          probability = sampler->probability;
 
@@ -185,8 +183,7 @@ Datum
 tsm_bernoulli_reset(PG_FUNCTION_ARGS)
 {
        TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       BernoulliSamplerData *sampler =
-       (BernoulliSamplerData *) tsdesc->tsmdata;
+       BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata;
 
        sampler->blockno = InvalidBlockNumber;
        sampler->lt = InvalidOffsetNumber;
index 3398d02f854bfb48be7611028811fb4e6bc4ca44..44a24340f6b2d925e6cfbc9e54114f2d44ecb579 100644 (file)
@@ -78,9 +78,12 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
        fcinfo.argnull[0] = false;
 
        /*
-        * Second arg for init function is always REPEATABLE When
-        * tablesample->repeatable is NULL then REPEATABLE clause was not
-        * specified. When specified, the expression cannot evaluate to NULL.
+        * Second arg for init function is always REPEATABLE.
+        *
+        * If tablesample->repeatable is NULL then REPEATABLE clause was not
+        * specified, and we insert a random value as default.
+        *
+        * When specified, the expression cannot evaluate to NULL.
         */
        if (tablesample->repeatable)
        {
index 7e15b797a7e70b8dde15217d0711fe9afb01cf60..3c611b938bceaab7cff3a71995a9366c0ab5d748 100644 (file)
@@ -645,10 +645,12 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
         * overall targetlist's econtext.  GroupingFunc arguments are never
         * evaluated at all.
         */
-       if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
+       if (IsA(node, Aggref))
                return false;
        if (IsA(node, WindowFunc))
                return false;
+       if (IsA(node, GroupingFunc))
+               return false;
        return expression_tree_walker(node, get_last_attnums,
                                                                  (void *) projInfo);
 }
index 31d74e94778a229553e548d03d687a086aa34b6c..2bf48c54e3cd19badfb891e8ef7811dbed2a7c3a 100644 (file)
@@ -1519,8 +1519,9 @@ agg_retrieve_direct(AggState *aggstate)
        /*
         * get state info from node
         *
-        * econtext is the per-output-tuple expression context tmpcontext is the
-        * per-input-tuple expression context
+        * econtext is the per-output-tuple expression context
+        *
+        * tmpcontext is the per-input-tuple expression context
         */
        econtext = aggstate->ss.ps.ps_ExprContext;
        tmpcontext = aggstate->tmpcontext;
@@ -1609,7 +1610,7 @@ agg_retrieve_direct(AggState *aggstate)
                else
                        nextSetSize = 0;
 
-               /*-
+               /*----------
                 * If a subgroup for the current grouping set is present, project it.
                 *
                 * We have a new group if:
@@ -1624,6 +1625,7 @@ agg_retrieve_direct(AggState *aggstate)
                 *        AND
                 *        - the previous and pending rows differ on the grouping columns
                 *              of the next grouping set
+                *----------
                 */
                if (aggstate->input_done ||
                        (node->aggstrategy == AGG_SORTED &&
index 2a049240549490b6fb61f010a7a1d0206995fe30..906cb46b65892c7b0fb325c8fb0057589de6d981 100644 (file)
@@ -527,8 +527,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
                 * Buckets are simple pointers to hashjoin tuples, while tupsize
                 * includes the pointer, hash code, and MinimalTupleData.  So buckets
                 * should never really exceed 25% of work_mem (even for
-                * NTUP_PER_BUCKET=1); except maybe for work_mem values that are not
-                * 2^N bytes, where we might get more because of doubling. So let's
+                * NTUP_PER_BUCKET=1); except maybe for work_mem values that are not
+                * 2^N bytes, where we might get more because of doubling. So let's
                 * look for 50% here.
                 */
                Assert(bucket_bytes <= hash_table_bytes / 2);
@@ -691,9 +691,9 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
                        if (batchno == curbatch)
                        {
                                /* keep tuple in memory - copy it into the new chunk */
-                               HashJoinTuple copyTuple =
-                               (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
+                               HashJoinTuple copyTuple;
 
+                               copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
                                memcpy(copyTuple, hashTuple, hashTupleSize);
 
                                /* and add it back to the appropriate bucket */
index 920c2b77fffec8a625e4902e526e7c8b793af785..8afde2b7d5069707e346901f819bed888a2333ee 100644 (file)
@@ -1918,10 +1918,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
                                 * whether HAVING succeeds.  Furthermore, there cannot be any
                                 * variables in either HAVING or the targetlist, so we
                                 * actually do not need the FROM table at all!  We can just
-                                * throw away the plan-so-far and generate a Result node.
-                                * This is a sufficiently unusual corner case that it's not
-                                * worth contorting the structure of this routine to avoid
-                                * having to generate the plan in the first place.
+                                * throw away the plan-so-far and generate a Result node. This
+                                * is a sufficiently unusual corner case that it's not worth
+                                * contorting the structure of this routine to avoid having to
+                                * generate the plan in the first place.
                                 */
                                result_plan = (Plan *) make_result(root,
                                                                                                   tlist,
@@ -3157,22 +3157,23 @@ extract_rollup_sets(List *groupingSets)
        if (!lc1)
                return list_make1(groupingSets);
 
-       /*
+       /*----------
         * We don't strictly need to remove duplicate sets here, but if we don't,
         * they tend to become scattered through the result, which is a bit
-        * confusing (and irritating if we ever decide to optimize them out). So
-        * we remove them here and add them back after.
+        * confusing (and irritating if we ever decide to optimize them out).
+        * So we remove them here and add them back after.
         *
         * For each non-duplicate set, we fill in the following:
         *
-        * orig_sets[i] = list of the original set lists set_masks[i] = bitmapset
-        * for testing inclusion adjacency[i] = array [n, v1, v2, ... vn] of
-        * adjacency indices
+        * orig_sets[i] = list of the original set lists
+        * set_masks[i] = bitmapset for testing inclusion
+        * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
         *
         * chains[i] will be the result group this set is assigned to.
         *
-        * We index all of these from 1 rather than 0 because it is convenient to
-        * leave 0 free for the NIL node in the graph algorithm.
+        * We index all of these from 1 rather than 0 because it is convenient
+        * to leave 0 free for the NIL node in the graph algorithm.
+        *----------
         */
        orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
        set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
index 5a2f696934a8e5071c48babd3cacca5ab2728cfa..aaf0061164b2969f727ec9adad2272280f4f622a 100644 (file)
@@ -596,8 +596,8 @@ process_policies(Query *root, List *policies, int rt_index, Expr **qual_eval,
                *qual_eval = (Expr *) linitial(quals);
 
        /*
-        * Similairly, if more than one WITH CHECK qual is returned, then they
-        * need to be combined together.
+        * Similarly, if more than one WITH CHECK qual is returned, then they need
+        * to be combined together.
         *
         * with_check_quals is allowed to be NIL here since this might not be the
         * resultRelation (see above).
index c0959a0ee2a9bd2286f3d18037ebb2e390b1facd..e68972221ab7c6f5db2b64bcf6fac3188751beec 100644 (file)
@@ -584,7 +584,7 @@ add_indent(StringInfo out, bool indent, int level)
  *
  * Given the datatype OID, return its JsonbTypeCategory, as well as the type's
  * output function OID.  If the returned category is JSONBTYPE_JSONCAST,
- *     we return the OID of the relevant cast function instead.
+ * we return the OID of the relevant cast function instead.
  */
 static void
 jsonb_categorize_type(Oid typoid,
index c404ae5e4c8544b37cef98b6c80f42f0795a1676..5517113151871454a005f71540f00c0c63bc0ce6 100644 (file)
@@ -106,8 +106,8 @@ typedef struct
        int                     wrapColumn;             /* max line length, or -1 for no limit */
        int                     indentLevel;    /* current indent level for prettyprint */
        bool            varprefix;              /* TRUE to print prefixes on Vars */
-       ParseExprKind special_exprkind;         /* set only for exprkinds needing */
-       /* special handling */
+       ParseExprKind special_exprkind;         /* set only for exprkinds needing
+                                                                                * special handling */
 } deparse_context;
 
 /*