summaryrefslogtreecommitdiff
path: root/src/backend/optimizer
diff options
context:
space:
mode:
authorBruce Momjian2012-06-10 19:20:04 +0000
committerBruce Momjian2012-06-10 19:20:04 +0000
commit927d61eeff78363ea3938c818d07e511ebaf75cf (patch)
tree2f0bcecf53327f76272a8ce690fa62505520fab9 /src/backend/optimizer
parent60801944fa105252b48ea5688d47dfc05c695042 (diff)
Run pgindent on 9.2 source tree in preparation for first 9.3
commit-fest.
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/geqo/geqo_selection.c4
-rw-r--r--src/backend/optimizer/path/allpaths.c40
-rw-r--r--src/backend/optimizer/path/costsize.c33
-rw-r--r--src/backend/optimizer/path/equivclass.c22
-rw-r--r--src/backend/optimizer/path/indxpath.c177
-rw-r--r--src/backend/optimizer/path/joinpath.c58
-rw-r--r--src/backend/optimizer/path/joinrels.c4
-rw-r--r--src/backend/optimizer/path/orindxpath.c6
-rw-r--r--src/backend/optimizer/path/pathkeys.c2
-rw-r--r--src/backend/optimizer/plan/createplan.c61
-rw-r--r--src/backend/optimizer/plan/initsplan.c6
-rw-r--r--src/backend/optimizer/plan/planagg.c4
-rw-r--r--src/backend/optimizer/plan/planmain.c4
-rw-r--r--src/backend/optimizer/plan/planner.c44
-rw-r--r--src/backend/optimizer/plan/setrefs.c12
-rw-r--r--src/backend/optimizer/plan/subselect.c4
-rw-r--r--src/backend/optimizer/prep/prepjointree.c22
-rw-r--r--src/backend/optimizer/prep/prepunion.c6
-rw-r--r--src/backend/optimizer/util/clauses.c56
-rw-r--r--src/backend/optimizer/util/pathnode.c139
-rw-r--r--src/backend/optimizer/util/placeholder.c4
-rw-r--r--src/backend/optimizer/util/plancat.c8
-rw-r--r--src/backend/optimizer/util/predtest.c2
-rw-r--r--src/backend/optimizer/util/relnode.c10
-rw-r--r--src/backend/optimizer/util/var.c2
25 files changed, 371 insertions, 359 deletions
diff --git a/src/backend/optimizer/geqo/geqo_selection.c b/src/backend/optimizer/geqo/geqo_selection.c
index be64576c2fd..fbdcc5ff0c9 100644
--- a/src/backend/optimizer/geqo/geqo_selection.c
+++ b/src/backend/optimizer/geqo/geqo_selection.c
@@ -65,8 +65,8 @@ geqo_selection(PlannerInfo *root, Chromosome *momma, Chromosome *daddy,
* one, when we can't.
*
* This code was observed to hang up in an infinite loop when the
- * platform's implementation of erand48() was broken. We now always
- * use our own version.
+ * platform's implementation of erand48() was broken. We now always use
+ * our own version.
*/
if (pool->size > 1)
{
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 0563cae1d7e..f02954982a7 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -50,19 +50,19 @@ join_search_hook_type join_search_hook = NULL;
static void set_base_rel_sizes(PlannerInfo *root);
static void set_base_rel_pathlists(PlannerInfo *root);
static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
- Index rti, RangeTblEntry *rte);
+ Index rti, RangeTblEntry *rte);
static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
- Index rti, RangeTblEntry *rte);
+ Index rti, RangeTblEntry *rte);
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
@@ -118,7 +118,7 @@ make_one_rel(PlannerInfo *root, List *joinlist)
if (brel == NULL)
continue;
- Assert(brel->relid == rti); /* sanity check on array */
+ Assert(brel->relid == rti); /* sanity check on array */
/* ignore RTEs that are "other rels" */
if (brel->reloptkind != RELOPT_BASEREL)
@@ -211,7 +211,7 @@ set_base_rel_pathlists(PlannerInfo *root)
*/
static void
set_rel_size(PlannerInfo *root, RelOptInfo *rel,
- Index rti, RangeTblEntry *rte)
+ Index rti, RangeTblEntry *rte)
{
if (rel->reloptkind == RELOPT_BASEREL &&
relation_excluded_by_constraints(root, rel, rte))
@@ -251,6 +251,7 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
}
break;
case RTE_SUBQUERY:
+
/*
* Subqueries don't support parameterized paths, so just go
* ahead and build their paths immediately.
@@ -264,6 +265,7 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
set_values_size_estimates(root, rel);
break;
case RTE_CTE:
+
/*
* CTEs don't support parameterized paths, so just go ahead
* and build their paths immediately.
@@ -574,8 +576,8 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* It is possible that constraint exclusion detected a contradiction
- * within a child subquery, even though we didn't prove one above.
- * If so, we can skip this child.
+ * within a child subquery, even though we didn't prove one above. If
+ * so, we can skip this child.
*/
if (IS_DUMMY_REL(childrel))
continue;
@@ -590,7 +592,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* Accumulate per-column estimates too. We need not do anything
- * for PlaceHolderVars in the parent list. If child expression
+ * for PlaceHolderVars in the parent list. If child expression
* isn't a Var, or we didn't record a width estimate for it, we
* have to fall back on a datatype-based estimate.
*
@@ -609,7 +611,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
if (IsA(childvar, Var))
{
- int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
+ int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
child_width = childrel->attr_widths[cndx];
}
@@ -664,7 +666,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* Generate access paths for each member relation, and remember the
- * cheapest path for each one. Also, identify all pathkeys (orderings)
+ * cheapest path for each one. Also, identify all pathkeys (orderings)
* and parameterizations (required_outer sets) available for the member
* relations.
*/
@@ -708,7 +710,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* Collect lists of all the available path orderings and
- * parameterizations for all the children. We use these as a
+ * parameterizations for all the children. We use these as a
* heuristic to indicate which sort orderings and parameterizations we
* should build Append and MergeAppend paths for.
*/
@@ -753,7 +755,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/* Have we already seen this param set? */
foreach(lco, all_child_outers)
{
- Relids existing_outers = (Relids) lfirst(lco);
+ Relids existing_outers = (Relids) lfirst(lco);
if (bms_equal(existing_outers, childouter))
{
@@ -791,7 +793,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
* so that not that many cases actually get considered here.)
*
* The Append node itself cannot enforce quals, so all qual checking must
- * be done in the child paths. This means that to have a parameterized
+ * be done in the child paths. This means that to have a parameterized
* Append path, we must have the exact same parameterization for each
* child path; otherwise some children might be failing to check the
* moved-down quals. To make them match up, we can try to increase the
@@ -799,7 +801,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
*/
foreach(l, all_child_outers)
{
- Relids required_outer = (Relids) lfirst(l);
+ Relids required_outer = (Relids) lfirst(l);
bool ok = true;
ListCell *lcr;
@@ -1115,9 +1117,9 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
rel->subroot = subroot;
/*
- * It's possible that constraint exclusion proved the subquery empty.
- * If so, it's convenient to turn it back into a dummy path so that we
- * will recognize appropriate optimizations at this level.
+ * It's possible that constraint exclusion proved the subquery empty. If
+ * so, it's convenient to turn it back into a dummy path so that we will
+ * recognize appropriate optimizations at this level.
*/
if (is_dummy_plan(rel->subplan))
{
@@ -1639,7 +1641,7 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
/*
* It would be unsafe to push down window function calls, but at least for
- * the moment we could never see any in a qual anyhow. (The same applies
+ * the moment we could never see any in a qual anyhow. (The same applies
* to aggregates, which we check for in pull_var_clause below.)
*/
Assert(!contain_window_function(qual));
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index e45bc121e4e..480c1b7425c 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -432,7 +432,7 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
* qual clauses that we have to evaluate as qpquals. We approximate that
* list as allclauses minus any clauses appearing in indexquals. (We
* assume that pointer equality is enough to recognize duplicate
- * RestrictInfos.) This method neglects some considerations such as
+ * RestrictInfos.) This method neglects some considerations such as
* clauses that needn't be checked because they are implied by a partial
* index's predicate. It does not seem worth the cycles to try to factor
* those things in at this stage, even though createplan.c will take pains
@@ -3135,7 +3135,7 @@ get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
* innerrel: inner relation under consideration
* jointype: must be JOIN_SEMI or JOIN_ANTI
* sjinfo: SpecialJoinInfo relevant to this join
- * restrictlist: join quals
+ * restrictlist: join quals
* Output parameters:
* *semifactors is filled in (see relation.h for field definitions)
*/
@@ -3221,8 +3221,8 @@ compute_semi_anti_join_factors(PlannerInfo *root,
*
* Note: it is correct to use the inner rel's "rows" count here, even
* though we might later be considering a parameterized inner path with
- * fewer rows. This is because we have included all the join clauses
- * in the selectivity estimate.
+ * fewer rows. This is because we have included all the join clauses in
+ * the selectivity estimate.
*/
if (jselec > 0) /* protect against zero divide */
{
@@ -3271,17 +3271,18 @@ has_indexed_join_quals(NestPath *joinpath)
indexclauses = ((IndexPath *) innerpath)->indexclauses;
break;
case T_BitmapHeapScan:
- {
- /* Accept only a simple bitmap scan, not AND/OR cases */
- Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
-
- if (IsA(bmqual, IndexPath))
- indexclauses = ((IndexPath *) bmqual)->indexclauses;
- else
- return false;
- break;
- }
+ {
+ /* Accept only a simple bitmap scan, not AND/OR cases */
+ Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
+
+ if (IsA(bmqual, IndexPath))
+ indexclauses = ((IndexPath *) bmqual)->indexclauses;
+ else
+ return false;
+ break;
+ }
default:
+
/*
* If it's not a simple indexscan, it probably doesn't run quickly
* for zero rows out, even if it's a parameterized path using all
@@ -3293,8 +3294,8 @@ has_indexed_join_quals(NestPath *joinpath)
/*
* Examine the inner path's param clauses. Any that are from the outer
* path must be found in the indexclauses list, either exactly or in an
- * equivalent form generated by equivclass.c. Also, we must find at
- * least one such clause, else it's a clauseless join which isn't fast.
+ * equivalent form generated by equivclass.c. Also, we must find at least
+ * one such clause, else it's a clauseless join which isn't fast.
*/
found_one = false;
foreach(lc, innerpath->param_info->ppi_clauses)
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index bb196b8f2a4..e34b9553bd4 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -494,11 +494,11 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
*
* If rel is not NULL, it identifies a specific relation we're considering
* a path for, and indicates that child EC members for that relation can be
- * considered. Otherwise child members are ignored. (Note: since child EC
+ * considered. Otherwise child members are ignored. (Note: since child EC
* members aren't guaranteed unique, a non-NULL value means that there could
* be more than one EC that matches the expression; if so it's order-dependent
* which one you get. This is annoying but it only happens in corner cases,
- * so for now we live with just reporting the first match. See also
+ * so for now we live with just reporting the first match. See also
* generate_implied_equalities_for_indexcol and match_pathkeys_to_index.)
*
* If create_it is TRUE, we'll build a new EquivalenceClass when there is no
@@ -922,8 +922,8 @@ generate_base_implied_equalities_broken(PlannerInfo *root,
* built any join RelOptInfos.
*
* An annoying special case for parameterized scans is that the inner rel can
- * be an appendrel child (an "other rel"). In this case we must generate
- * appropriate clauses using child EC members. add_child_rel_equivalences
+ * be an appendrel child (an "other rel"). In this case we must generate
+ * appropriate clauses using child EC members. add_child_rel_equivalences
* must already have been done for the child rel.
*
* The results are sufficient for use in merge, hash, and plain nestloop join
@@ -1002,9 +1002,9 @@ generate_join_implied_equalities(PlannerInfo *root,
if (ec->ec_broken)
sublist = generate_join_implied_equalities_broken(root,
ec,
- nominal_join_relids,
+ nominal_join_relids,
outer_relids,
- nominal_inner_relids,
+ nominal_inner_relids,
inner_appinfo);
result = list_concat(result, sublist);
@@ -1217,9 +1217,9 @@ generate_join_implied_equalities_broken(PlannerInfo *root,
/*
* If we have to translate, just brute-force apply adjust_appendrel_attrs
* to all the RestrictInfos at once. This will result in returning
- * RestrictInfos that are not listed in ec_derives, but there shouldn't
- * be any duplication, and it's a sufficiently narrow corner case that
- * we shouldn't sweat too much over it anyway.
+ * RestrictInfos that are not listed in ec_derives, but there shouldn't be
+ * any duplication, and it's a sufficiently narrow corner case that we
+ * shouldn't sweat too much over it anyway.
*/
if (inner_appinfo)
result = (List *) adjust_appendrel_attrs(root, (Node *) result,
@@ -1966,7 +1966,7 @@ mutate_eclass_expressions(PlannerInfo *root,
* is a redundant list of clauses equating the index column to each of
* the other-relation values it is known to be equal to. Any one of
* these clauses can be used to create a parameterized indexscan, and there
- * is no value in using more than one. (But it *is* worthwhile to create
+ * is no value in using more than one. (But it *is* worthwhile to create
* a separate parameterized path for each one, since that leads to different
* join orders.)
*/
@@ -2014,7 +2014,7 @@ generate_implied_equalities_for_indexcol(PlannerInfo *root,
* the target relation. (Unlike regular members, the same expression
* could be a child member of more than one EC. Therefore, it's
* potentially order-dependent which EC a child relation's index
- * column gets matched to. This is annoying but it only happens in
+ * column gets matched to. This is annoying but it only happens in
* corner cases, so for now we live with just reporting the first
* match. See also get_eclass_for_sort_expr.)
*/
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 05530054e13..2e8ccd05785 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -103,12 +103,12 @@ static List *build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
List *clauses, List *other_clauses);
static List *drop_indexable_join_clauses(RelOptInfo *rel, List *clauses);
static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
- List *paths);
+ List *paths);
static int path_usage_comparator(const void *a, const void *b);
static Cost bitmap_scan_cost_est(PlannerInfo *root, RelOptInfo *rel,
- Path *ipath);
+ Path *ipath);
static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel,
- List *paths);
+ List *paths);
static PathClauseUsage *classify_index_clause_usage(Path *path,
List **clauselist);
static Relids get_bitmap_tree_required_outer(Path *bitmapqual);
@@ -117,15 +117,15 @@ static int find_list_position(Node *node, List **nodelist);
static bool check_index_only(RelOptInfo *rel, IndexOptInfo *index);
static double get_loop_count(PlannerInfo *root, Relids outer_relids);
static void match_restriction_clauses_to_index(RelOptInfo *rel,
- IndexOptInfo *index,
- IndexClauseSet *clauseset);
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset);
static void match_join_clauses_to_index(PlannerInfo *root,
RelOptInfo *rel, IndexOptInfo *index,
IndexClauseSet *clauseset,
List **joinorclauses);
static void match_eclass_clauses_to_index(PlannerInfo *root,
- IndexOptInfo *index,
- IndexClauseSet *clauseset);
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset);
static void match_clauses_to_index(IndexOptInfo *index,
List *clauses,
IndexClauseSet *clauseset);
@@ -237,7 +237,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
match_restriction_clauses_to_index(rel, index, &rclauseset);
/*
- * Build index paths from the restriction clauses. These will be
+ * Build index paths from the restriction clauses. These will be
* non-parameterized paths. Plain paths go directly to add_path(),
* bitmap paths are added to bitindexpaths to be handled below.
*/
@@ -245,25 +245,25 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
&bitindexpaths);
/*
- * Identify the join clauses that can match the index. For the moment
- * we keep them separate from the restriction clauses. Note that
- * this finds only "loose" join clauses that have not been merged
- * into EquivalenceClasses. Also, collect join OR clauses for later.
+ * Identify the join clauses that can match the index. For the moment
+ * we keep them separate from the restriction clauses. Note that this
+ * finds only "loose" join clauses that have not been merged into
+ * EquivalenceClasses. Also, collect join OR clauses for later.
*/
MemSet(&jclauseset, 0, sizeof(jclauseset));
match_join_clauses_to_index(root, rel, index,
&jclauseset, &joinorclauses);
/*
- * Look for EquivalenceClasses that can generate joinclauses
- * matching the index.
+ * Look for EquivalenceClasses that can generate joinclauses matching
+ * the index.
*/
MemSet(&eclauseset, 0, sizeof(eclauseset));
match_eclass_clauses_to_index(root, index, &eclauseset);
/*
- * If we found any plain or eclass join clauses, decide what to
- * do with 'em.
+ * If we found any plain or eclass join clauses, decide what to do
+ * with 'em.
*/
if (jclauseset.nonempty || eclauseset.nonempty)
consider_index_join_clauses(root, rel, index,
@@ -287,7 +287,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
* the joinclause list. Add these to bitjoinpaths.
*/
indexpaths = generate_bitmap_or_paths(root, rel,
- joinorclauses, rel->baserestrictinfo,
+ joinorclauses, rel->baserestrictinfo,
false);
bitjoinpaths = list_concat(bitjoinpaths, indexpaths);
@@ -313,7 +313,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
* the most promising combination of join bitmap index paths. Note there
* will be only one such path no matter how many join clauses are
* available. (XXX is that good enough, or do we need to consider even
- * more paths for different subsets of possible join partners? Also,
+ * more paths for different subsets of possible join partners? Also,
* should we add in restriction bitmap paths as well?)
*/
if (bitjoinpaths != NIL)
@@ -366,19 +366,19 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
* We can always include any restriction clauses in the index clauses.
* However, it's not obvious which subsets of the join clauses are worth
* generating paths from, and it's unlikely that considering every
- * possible subset is worth the cycles. Our current heuristic is based
- * on the index columns, with the idea that later index columns are less
+ * possible subset is worth the cycles. Our current heuristic is based on
+ * the index columns, with the idea that later index columns are less
* useful than earlier ones; therefore it's unlikely to be worth trying
* combinations that would remove a clause from an earlier index column
- * while adding one to a later column. Also, we know that all the
- * eclass clauses for a particular column are redundant, so we should
- * use only one of them. However, eclass clauses will always represent
- * equality which is the strongest type of index constraint, so those
- * are high-value and we should try every available combination when we
- * have eclass clauses for more than one column. Furthermore, it's
- * unlikely to be useful to combine an eclass clause with non-eclass
- * clauses for the same index column. These considerations lead to the
- * following heuristics:
+ * while adding one to a later column. Also, we know that all the eclass
+ * clauses for a particular column are redundant, so we should use only
+ * one of them. However, eclass clauses will always represent equality
+ * which is the strongest type of index constraint, so those are
+ * high-value and we should try every available combination when we have
+ * eclass clauses for more than one column. Furthermore, it's unlikely to
+ * be useful to combine an eclass clause with non-eclass clauses for the
+ * same index column. These considerations lead to the following
+ * heuristics:
*
* First, start with the restriction clauses, and add on all simple join
* clauses for column 1. If there are any such join clauses, generate
@@ -387,7 +387,7 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
* any other clauses we have for column 1.
*
* Next, add on all simple join clauses for column 2. If there are any
- * such join clauses, generate paths with this collection. If there are
+ * such join clauses, generate paths with this collection. If there are
* eclass clauses for columns 1 or 2, generate paths with each such clause
* replacing other clauses for its index column, including cases where we
* use restriction or simple join clauses for one column and an eclass
@@ -519,7 +519,7 @@ expand_eclass_clause_combinations(PlannerInfo *root, RelOptInfo *rel,
* bitmap indexpaths are added to *bitindexpaths for later processing.
*
* This is a fairly simple frontend to build_index_paths(). Its reason for
- * existence is mainly to handle ScalarArrayOpExpr quals properly. If the
+ * existence is mainly to handle ScalarArrayOpExpr quals properly. If the
* index AM supports them natively, we should just include them in simple
* index paths. If not, we should exclude them while building simple index
* paths, and then make a separate attempt to include them in bitmap paths.
@@ -533,7 +533,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
ListCell *lc;
/*
- * Build simple index paths using the clauses. Allow ScalarArrayOpExpr
+ * Build simple index paths using the clauses. Allow ScalarArrayOpExpr
* clauses only if the index AM supports them natively.
*/
indexpaths = build_index_paths(root, rel,
@@ -542,17 +542,16 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
SAOP_PER_AM, ST_ANYSCAN);
/*
- * Submit all the ones that can form plain IndexScan plans to add_path.
- * (A plain IndexPath can represent either a plain IndexScan or an
+ * Submit all the ones that can form plain IndexScan plans to add_path. (A
+ * plain IndexPath can represent either a plain IndexScan or an
* IndexOnlyScan, but for our purposes here that distinction does not
- * matter. However, some of the indexes might support only bitmap scans,
+ * matter. However, some of the indexes might support only bitmap scans,
* and those we mustn't submit to add_path here.)
*
- * Also, pick out the ones that are usable as bitmap scans. For that,
- * we must discard indexes that don't support bitmap scans, and we
- * also are only interested in paths that have some selectivity; we
- * should discard anything that was generated solely for ordering
- * purposes.
+ * Also, pick out the ones that are usable as bitmap scans. For that, we
+ * must discard indexes that don't support bitmap scans, and we also are
+ * only interested in paths that have some selectivity; we should discard
+ * anything that was generated solely for ordering purposes.
*/
foreach(lc, indexpaths)
{
@@ -568,9 +567,9 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * If the index doesn't handle ScalarArrayOpExpr clauses natively,
- * check to see if there are any such clauses, and if so generate
- * bitmap scan paths relying on executor-managed ScalarArrayOpExpr.
+ * If the index doesn't handle ScalarArrayOpExpr clauses natively, check
+ * to see if there are any such clauses, and if so generate bitmap scan
+ * paths relying on executor-managed ScalarArrayOpExpr.
*/
if (!index->amsearcharray)
{
@@ -590,7 +589,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
* We return a list of paths because (1) this routine checks some cases
* that should cause us to not generate any IndexPath, and (2) in some
* cases we want to consider both a forward and a backward scan, so as
- * to obtain both sort orders. Note that the paths are just returned
+ * to obtain both sort orders. Note that the paths are just returned
* to the caller and not immediately fed to add_path().
*
* At top level, useful_predicate should be exactly the index's predOK flag
@@ -658,19 +657,19 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
/*
* 1. Collect the index clauses into a single list.
*
- * We build a list of RestrictInfo nodes for clauses to be used with
- * this index, along with an integer list of the index column numbers
- * (zero based) that each clause should be used with. The clauses are
- * ordered by index key, so that the column numbers form a nondecreasing
- * sequence. (This order is depended on by btree and possibly other
- * places.) The lists can be empty, if the index AM allows that.
+ * We build a list of RestrictInfo nodes for clauses to be used with this
+ * index, along with an integer list of the index column numbers (zero
+ * based) that each clause should be used with. The clauses are ordered
+ * by index key, so that the column numbers form a nondecreasing sequence.
+ * (This order is depended on by btree and possibly other places.) The
+ * lists can be empty, if the index AM allows that.
*
- * found_clause is set true only if there's at least one index clause;
- * and if saop_control is SAOP_REQUIRE, it has to be a ScalarArrayOpExpr
+ * found_clause is set true only if there's at least one index clause; and
+ * if saop_control is SAOP_REQUIRE, it has to be a ScalarArrayOpExpr
* clause.
*
- * We also build a Relids set showing which outer rels are required
- * by the selected clauses.
+ * We also build a Relids set showing which outer rels are required by the
+ * selected clauses.
*/
index_clauses = NIL;
clause_columns = NIL;
@@ -706,8 +705,8 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
* If no clauses match the first index column, check for amoptionalkey
* restriction. We can't generate a scan over an index with
* amoptionalkey = false unless there's at least one index clause.
- * (When working on columns after the first, this test cannot fail.
- * It is always okay for columns after the first to not have any
+ * (When working on columns after the first, this test cannot fail. It
+ * is always okay for columns after the first to not have any
* clauses.)
*/
if (index_clauses == NIL && !index->amoptionalkey)
@@ -759,7 +758,7 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * 3. Check if an index-only scan is possible. If we're not building
+ * 3. Check if an index-only scan is possible. If we're not building
* plain indexscans, this isn't relevant since bitmap scans don't support
* index data retrieval anyway.
*/
@@ -865,8 +864,8 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
/*
* Ignore partial indexes that do not match the query. If a partial
- * index is marked predOK then we know it's OK. Otherwise, we have
- * to test whether the added clauses are sufficient to imply the
+ * index is marked predOK then we know it's OK. Otherwise, we have to
+ * test whether the added clauses are sufficient to imply the
* predicate. If so, we can use the index in the current context.
*
* We set useful_predicate to true iff the predicate was proven using
@@ -904,8 +903,8 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
match_clauses_to_index(index, clauses, &clauseset);
/*
- * If no matches so far, and the index predicate isn't useful,
- * we don't want it.
+ * If no matches so far, and the index predicate isn't useful, we
+ * don't want it.
*/
if (!clauseset.nonempty && !useful_predicate)
continue;
@@ -997,7 +996,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
generate_bitmap_or_paths(root, rel,
andargs,
all_clauses,
- restriction_only));
+ restriction_only));
}
else
{
@@ -1053,7 +1052,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
*
* This is a helper for generate_bitmap_or_paths(). We leave OR clauses
* in the list whether they are joins or not, since we might be able to
- * extract a restriction item from an OR list. It's safe to leave such
+ * extract a restriction item from an OR list. It's safe to leave such
* clauses in the list because match_clauses_to_index() will ignore them,
* so there's no harm in passing such clauses to build_paths_for_OR().
*/
@@ -1361,7 +1360,7 @@ bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths)
apath.path.type = T_BitmapAndPath;
apath.path.pathtype = T_BitmapAnd;
apath.path.parent = rel;
- apath.path.param_info = NULL; /* not used in bitmap trees */
+ apath.path.param_info = NULL; /* not used in bitmap trees */
apath.path.pathkeys = NIL;
apath.bitmapquals = paths;
cost_bitmap_and_node(&apath, root);
@@ -1464,7 +1463,7 @@ get_bitmap_tree_required_outer(Path *bitmapqual)
foreach(lc, ((BitmapAndPath *) bitmapqual)->bitmapquals)
{
result = bms_join(result,
- get_bitmap_tree_required_outer((Path *) lfirst(lc)));
+ get_bitmap_tree_required_outer((Path *) lfirst(lc)));
}
}
else if (IsA(bitmapqual, BitmapOrPath))
@@ -1472,7 +1471,7 @@ get_bitmap_tree_required_outer(Path *bitmapqual)
foreach(lc, ((BitmapOrPath *) bitmapqual)->bitmapquals)
{
result = bms_join(result,
- get_bitmap_tree_required_outer((Path *) lfirst(lc)));
+ get_bitmap_tree_required_outer((Path *) lfirst(lc)));
}
}
else
@@ -1581,16 +1580,16 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
return false;
/*
- * Check that all needed attributes of the relation are available from
- * the index.
+ * Check that all needed attributes of the relation are available from the
+ * index.
*
* XXX this is overly conservative for partial indexes, since we will
* consider attributes involved in the index predicate as required even
- * though the predicate won't need to be checked at runtime. (The same
- * is true for attributes used only in index quals, if we are certain
- * that the index is not lossy.) However, it would be quite expensive
- * to determine that accurately at this point, so for now we take the
- * easy way out.
+ * though the predicate won't need to be checked at runtime. (The same is
+ * true for attributes used only in index quals, if we are certain that
+ * the index is not lossy.) However, it would be quite expensive to
+ * determine that accurately at this point, so for now we take the easy
+ * way out.
*/
/*
@@ -1603,7 +1602,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
/* Add all the attributes used by restriction clauses. */
foreach(lc, rel->baserestrictinfo)
{
- RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
}
@@ -1611,7 +1610,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
/* Construct a bitmapset of columns stored in the index. */
for (i = 0; i < index->ncolumns; i++)
{
- int attno = index->indexkeys[i];
+ int attno = index->indexkeys[i];
/*
* For the moment, we just ignore index expressions. It might be nice
@@ -1642,7 +1641,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
* Since we produce parameterized paths before we've begun to generate join
* relations, it's impossible to predict exactly how many times a parameterized
* path will be iterated; we don't know the size of the relation that will be
- * on the outside of the nestloop. However, we should try to account for
+ * on the outside of the nestloop. However, we should try to account for
* multiple iterations somehow in costing the path. The heuristic embodied
* here is to use the rowcount of the smallest other base relation needed in
* the join clauses used by the path. (We could alternatively consider the
@@ -1676,7 +1675,7 @@ get_loop_count(PlannerInfo *root, Relids outer_relids)
outer_rel = root->simple_rel_array[relid];
if (outer_rel == NULL)
continue;
- Assert(outer_rel->relid == relid); /* sanity check on array */
+ Assert(outer_rel->relid == relid); /* sanity check on array */
/* Other relation could be proven empty, if so ignore */
if (IS_DUMMY_REL(outer_rel))
@@ -1851,7 +1850,7 @@ match_clause_to_index(IndexOptInfo *index,
* doesn't involve a volatile function or a Var of the index's relation.
* In particular, Vars belonging to other relations of the query are
* accepted here, since a clause of that form can be used in a
- * parameterized indexscan. It's the responsibility of higher code levels
+ * parameterized indexscan. It's the responsibility of higher code levels
* to manage restriction and join clauses appropriately.
*
* Note: we do need to check for Vars of the index's relation on the
@@ -2149,7 +2148,7 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
List *clause_columns = NIL;
ListCell *lc1;
- *orderby_clauses_p = NIL; /* set default results */
+ *orderby_clauses_p = NIL; /* set default results */
*clause_columns_p = NIL;
/* Only indexes with the amcanorderbyop property are interesting here */
@@ -2195,9 +2194,9 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
/*
* We allow any column of the index to match each pathkey; they
- * don't have to match left-to-right as you might expect. This
- * is correct for GiST, which is the sole existing AM supporting
- * amcanorderbyop. We might need different logic in future for
+ * don't have to match left-to-right as you might expect. This is
+ * correct for GiST, which is the sole existing AM supporting
+ * amcanorderbyop. We might need different logic in future for
* other implementations.
*/
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
@@ -2393,8 +2392,8 @@ eclass_member_matches_indexcol(EquivalenceClass *ec, EquivalenceMember *em,
* If it's a btree index, we can reject it if its opfamily isn't
* compatible with the EC, since no clause generated from the EC could be
* used with the index. For non-btree indexes, we can't easily tell
- * whether clauses generated from the EC could be used with the index,
- * so don't check the opfamily. This might mean we return "true" for a
+ * whether clauses generated from the EC could be used with the index, so
+ * don't check the opfamily. This might mean we return "true" for a
* useless EC, so we have to recheck the results of
* generate_implied_equalities_for_indexcol; see
* match_eclass_clauses_to_index.
@@ -2425,7 +2424,7 @@ eclass_member_matches_indexcol(EquivalenceClass *ec, EquivalenceMember *em,
* if it is true.
* 2. A list of expressions in this relation, and a corresponding list of
* equality operators. The caller must have already checked that the operators
- * represent equality. (Note: the operators could be cross-type; the
+ * represent equality. (Note: the operators could be cross-type; the
* expressions should correspond to their RHS inputs.)
*
* The caller need only supply equality conditions arising from joins;
@@ -2571,7 +2570,7 @@ relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel,
* notion of equality.
*/
- matched = true; /* column is unique */
+ matched = true; /* column is unique */
break;
}
@@ -3300,9 +3299,9 @@ adjust_rowcompare_for_index(RowCompareExpr *clause,
/*
* See how many of the remaining columns match some index column in the
- * same way. As in match_clause_to_indexcol(), the "other" side of
- * any potential index condition is OK as long as it doesn't use Vars from
- * the indexed relation.
+ * same way. As in match_clause_to_indexcol(), the "other" side of any
+ * potential index condition is OK as long as it doesn't use Vars from the
+ * indexed relation.
*/
matching_cols = 1;
largs_cell = lnext(list_head(clause->largs));
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 446319d1356..65f86194e15 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -103,7 +103,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* If it's SEMI or ANTI join, compute correction factors for cost
- * estimation. These will be the same for all paths.
+ * estimation. These will be the same for all paths.
*/
if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
compute_semi_anti_join_factors(root, outerrel, innerrel,
@@ -118,7 +118,7 @@ add_paths_to_joinrel(PlannerInfo *root,
* to the parameter source rel instead of joining to the other input rel.
* This restriction reduces the number of parameterized paths we have to
* deal with at higher join levels, without compromising the quality of
- * the resulting plan. We express the restriction as a Relids set that
+ * the resulting plan. We express the restriction as a Relids set that
* must overlap the parameterization of any proposed join path.
*/
foreach(lc, root->join_info_list)
@@ -136,7 +136,7 @@ add_paths_to_joinrel(PlannerInfo *root,
!bms_overlap(joinrel->relids, sjinfo->min_lefthand))
param_source_rels = bms_join(param_source_rels,
bms_difference(root->all_baserels,
- sjinfo->min_righthand));
+ sjinfo->min_righthand));
/* full joins constrain both sides symmetrically */
if (sjinfo->jointype == JOIN_FULL &&
@@ -144,7 +144,7 @@ add_paths_to_joinrel(PlannerInfo *root,
!bms_overlap(joinrel->relids, sjinfo->min_righthand))
param_source_rels = bms_join(param_source_rels,
bms_difference(root->all_baserels,
- sjinfo->min_lefthand));
+ sjinfo->min_lefthand));
}
/*
@@ -216,11 +216,11 @@ try_nestloop_path(PlannerInfo *root,
List *pathkeys)
{
Relids required_outer;
- JoinCostWorkspace workspace;
+ JoinCostWorkspace workspace;
/*
- * Check to see if proposed path is still parameterized, and reject if
- * the parameterization wouldn't be sensible.
+ * Check to see if proposed path is still parameterized, and reject if the
+ * parameterization wouldn't be sensible.
*/
required_outer = calc_nestloop_required_outer(outer_path,
inner_path);
@@ -289,14 +289,14 @@ try_mergejoin_path(PlannerInfo *root,
List *innersortkeys)
{
Relids required_outer;
- JoinCostWorkspace workspace;
+ JoinCostWorkspace workspace;
/*
- * Check to see if proposed path is still parameterized, and reject if
- * the parameterization wouldn't be sensible.
+ * Check to see if proposed path is still parameterized, and reject if the
+ * parameterization wouldn't be sensible.
*/
required_outer = calc_non_nestloop_required_outer(outer_path,
- inner_path);
+ inner_path);
if (required_outer &&
!bms_overlap(required_outer, param_source_rels))
{
@@ -368,14 +368,14 @@ try_hashjoin_path(PlannerInfo *root,
List *hashclauses)
{
Relids required_outer;
- JoinCostWorkspace workspace;
+ JoinCostWorkspace workspace;
/*
- * Check to see if proposed path is still parameterized, and reject if
- * the parameterization wouldn't be sensible.
+ * Check to see if proposed path is still parameterized, and reject if the
+ * parameterization wouldn't be sensible.
*/
required_outer = calc_non_nestloop_required_outer(outer_path,
- inner_path);
+ inner_path);
if (required_outer &&
!bms_overlap(required_outer, param_source_rels))
{
@@ -487,7 +487,7 @@ sort_inner_and_outer(PlannerInfo *root,
*
* This function intentionally does not consider parameterized input paths
* (implicit in the fact that it only looks at cheapest_total_path, which
- * is always unparameterized). If we did so, we'd have a combinatorial
+ * is always unparameterized). If we did so, we'd have a combinatorial
* explosion of mergejoin paths of dubious value. This interacts with
* decisions elsewhere that also discriminate against mergejoins with
* parameterized inputs; see comments in src/backend/optimizer/README.
@@ -582,8 +582,8 @@ sort_inner_and_outer(PlannerInfo *root,
* And now we can make the path.
*
* Note: it's possible that the cheapest paths will already be sorted
- * properly. try_mergejoin_path will detect that case and suppress
- * an explicit sort step, so we needn't do so here.
+ * properly. try_mergejoin_path will detect that case and suppress an
+ * explicit sort step, so we needn't do so here.
*/
try_mergejoin_path(root,
joinrel,
@@ -733,8 +733,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* If we need to unique-ify the outer path, it's pointless to consider
- * any but the cheapest outer. (XXX we don't consider parameterized
- * outers, nor inners, for unique-ified cases. Should we?)
+ * any but the cheapest outer. (XXX we don't consider parameterized
+ * outers, nor inners, for unique-ified cases. Should we?)
*/
if (save_jointype == JOIN_UNIQUE_OUTER)
{
@@ -774,9 +774,9 @@ match_unsorted_outer(PlannerInfo *root,
{
/*
* Consider nestloop joins using this outer path and various
- * available paths for the inner relation. We consider the
- * cheapest-total paths for each available parameterization of
- * the inner relation, including the unparameterized case.
+ * available paths for the inner relation. We consider the
+ * cheapest-total paths for each available parameterization of the
+ * inner relation, including the unparameterized case.
*/
ListCell *lc2;
@@ -847,8 +847,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Generate a mergejoin on the basis of sorting the cheapest inner.
* Since a sort will be needed, only cheapest total cost matters. (But
- * try_mergejoin_path will do the right thing if
- * inner_cheapest_total is already correctly sorted.)
+ * try_mergejoin_path will do the right thing if inner_cheapest_total
+ * is already correctly sorted.)
*/
try_mergejoin_path(root,
joinrel,
@@ -873,9 +873,9 @@ match_unsorted_outer(PlannerInfo *root,
* mergejoin using a subset of the merge clauses. Here, we consider
* both cheap startup cost and cheap total cost.
*
- * Currently we do not consider parameterized inner paths here.
- * This interacts with decisions elsewhere that also discriminate
- * against mergejoins with parameterized inputs; see comments in
+ * Currently we do not consider parameterized inner paths here. This
+ * interacts with decisions elsewhere that also discriminate against
+ * mergejoins with parameterized inputs; see comments in
* src/backend/optimizer/README.
*
* As we shorten the sortkey list, we should consider only paths that
@@ -1189,7 +1189,7 @@ hash_inner_and_outer(PlannerInfo *root,
if (outerpath == cheapest_startup_outer &&
innerpath == cheapest_total_inner)
- continue; /* already tried it */
+ continue; /* already tried it */
try_hashjoin_path(root,
joinrel,
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 24d46515070..e6a0f8dab6d 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -90,7 +90,7 @@ join_search_one_level(PlannerInfo *root, int level)
if (level == 2) /* consider remaining initial rels */
other_rels = lnext(r);
- else /* consider all initial rels */
+ else /* consider all initial rels */
other_rels = list_head(joinrels[1]);
make_rels_by_clause_joins(root,
@@ -180,7 +180,7 @@ join_search_one_level(PlannerInfo *root, int level)
/*----------
* Normally, we should always have made at least one join of the current
* level. However, when special joins are involved, there may be no legal
- * way to make an N-way join for some values of N. For example consider
+ * way to make an N-way join for some values of N. For example consider
*
* SELECT ... FROM t1 WHERE
* x IN (SELECT ... FROM t2,t3 WHERE ...) AND
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index cab79518919..c918c4e8da9 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -95,8 +95,8 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
/*
* Find potentially interesting OR joinclauses. We can use any joinclause
* that is considered safe to move to this rel by the parameterized-path
- * machinery, even though what we are going to do with it is not exactly
- * a parameterized path.
+ * machinery, even though what we are going to do with it is not exactly a
+ * parameterized path.
*/
foreach(i, rel->joininfo)
{
@@ -109,7 +109,7 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
* Use the generate_bitmap_or_paths() machinery to estimate the
* value of each OR clause. We can use regular restriction
* clauses along with the OR clause contents to generate
- * indexquals. We pass restriction_only = true so that any
+ * indexquals. We pass restriction_only = true so that any
* sub-clauses that are actually joins will be ignored.
*/
List *orpaths;
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 0603a94e482..20a5644edd8 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -223,7 +223,7 @@ canonicalize_pathkeys(PlannerInfo *root, List *pathkeys)
*
* If rel is not NULL, it identifies a specific relation we're considering
* a path for, and indicates that child EC members for that relation can be
- * considered. Otherwise child members are ignored. (See the comments for
+ * considered. Otherwise child members are ignored. (See the comments for
* get_eclass_for_sort_expr.)
*
* create_it is TRUE if we should create any missing EquivalenceClass
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index c34b9b8c38e..65ad1694b07 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -1138,10 +1138,10 @@ create_indexscan_plan(PlannerInfo *root,
/*
* The qpqual list must contain all restrictions not automatically handled
* by the index, other than pseudoconstant clauses which will be handled
- * by a separate gating plan node. All the predicates in the indexquals
+ * by a separate gating plan node. All the predicates in the indexquals
* will be checked (either by the index itself, or by nodeIndexscan.c),
* but if there are any "special" operators involved then they must be
- * included in qpqual. The upshot is that qpqual must contain
+ * included in qpqual. The upshot is that qpqual must contain
* scan_clauses minus whatever appears in indexquals.
*
* In normal cases simple pointer equality checks will be enough to spot
@@ -1189,7 +1189,7 @@ create_indexscan_plan(PlannerInfo *root,
get_parse_rowmark(root->parse, baserelid) == NULL)
if (predicate_implied_by(clausel,
best_path->indexinfo->indpred))
- continue; /* implied by index predicate */
+ continue; /* implied by index predicate */
}
}
qpqual = lappend(qpqual, rinfo);
@@ -1228,7 +1228,7 @@ create_indexscan_plan(PlannerInfo *root,
indexoid,
fixed_indexquals,
fixed_indexorderbys,
- best_path->indexinfo->indextlist,
+ best_path->indexinfo->indextlist,
best_path->indexscandir);
else
scan_plan = (Scan *) make_indexscan(tlist,
@@ -1278,15 +1278,15 @@ create_bitmap_scan_plan(PlannerInfo *root,
/*
* The qpqual list must contain all restrictions not automatically handled
* by the index, other than pseudoconstant clauses which will be handled
- * by a separate gating plan node. All the predicates in the indexquals
+ * by a separate gating plan node. All the predicates in the indexquals
* will be checked (either by the index itself, or by
* nodeBitmapHeapscan.c), but if there are any "special" operators
- * involved then they must be added to qpqual. The upshot is that qpqual
+ * involved then they must be added to qpqual. The upshot is that qpqual
* must contain scan_clauses minus whatever appears in indexquals.
*
* This loop is similar to the comparable code in create_indexscan_plan(),
* but with some differences because it has to compare the scan clauses to
- * stripped (no RestrictInfos) indexquals. See comments there for more
+ * stripped (no RestrictInfos) indexquals. See comments there for more
* info.
*
* In normal cases simple equal() checks will be enough to spot duplicate
@@ -1880,14 +1880,14 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
Assert(rte->rtekind == RTE_RELATION);
/*
- * Sort clauses into best execution order. We do this first since the
- * FDW might have more info than we do and wish to adjust the ordering.
+ * Sort clauses into best execution order. We do this first since the FDW
+ * might have more info than we do and wish to adjust the ordering.
*/
scan_clauses = order_qual_clauses(root, scan_clauses);
/*
* Let the FDW perform its processing on the restriction clauses and
- * generate the plan node. Note that the FDW might remove restriction
+ * generate the plan node. Note that the FDW might remove restriction
* clauses that it intends to execute remotely, or even add more (if it
* has selected some join clauses for remote use but also wants them
* rechecked locally).
@@ -2005,7 +2005,7 @@ create_nestloop_plan(PlannerInfo *root,
bms_overlap(((PlaceHolderVar *) nlp->paramval)->phrels,
outerrelids) &&
bms_is_subset(find_placeholder_info(root,
- (PlaceHolderVar *) nlp->paramval,
+ (PlaceHolderVar *) nlp->paramval,
false)->ph_eval_at,
outerrelids))
{
@@ -2523,9 +2523,9 @@ replace_nestloop_params_mutator(Node *node, PlannerInfo *root)
/*
* If not to be replaced, just return the PlaceHolderVar unmodified.
- * We use bms_overlap as a cheap/quick test to see if the PHV might
- * be evaluated in the outer rels, and then grab its PlaceHolderInfo
- * to tell for sure.
+ * We use bms_overlap as a cheap/quick test to see if the PHV might be
+ * evaluated in the outer rels, and then grab its PlaceHolderInfo to
+ * tell for sure.
*/
if (!bms_overlap(phv->phrels, root->curOuterRels))
return node;
@@ -2612,7 +2612,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path)
/*
* Check to see if the indexkey is on the right; if so, commute
- * the clause. The indexkey should be the side that refers to
+ * the clause. The indexkey should be the side that refers to
* (only) the base relation.
*/
if (!bms_equal(rinfo->left_relids, index->rel->relids))
@@ -3690,13 +3690,12 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
{
/*
* If we are given a sort column number to match, only consider
- * the single TLE at that position. It's possible that there
- * is no such TLE, in which case fall through and generate a
- * resjunk targetentry (we assume this must have happened in the
- * parent plan as well). If there is a TLE but it doesn't match
- * the pathkey's EC, we do the same, which is probably the wrong
- * thing but we'll leave it to caller to complain about the
- * mismatch.
+ * the single TLE at that position. It's possible that there is
+ * no such TLE, in which case fall through and generate a resjunk
+ * targetentry (we assume this must have happened in the parent
+ * plan as well). If there is a TLE but it doesn't match the
+ * pathkey's EC, we do the same, which is probably the wrong thing
+ * but we'll leave it to caller to complain about the mismatch.
*/
tle = get_tle_by_resno(tlist, reqColIdx[numsortkeys]);
if (tle)
@@ -3746,11 +3745,11 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
if (!tle)
{
/*
- * No matching tlist item; look for a computable expression.
- * Note that we treat Aggrefs as if they were variables; this
- * is necessary when attempting to sort the output from an Agg
- * node for use in a WindowFunc (since grouping_planner will
- * have treated the Aggrefs as variables, too).
+ * No matching tlist item; look for a computable expression. Note
+ * that we treat Aggrefs as if they were variables; this is
+ * necessary when attempting to sort the output from an Agg node
+ * for use in a WindowFunc (since grouping_planner will have
+ * treated the Aggrefs as variables, too).
*/
Expr *sortexpr = NULL;
@@ -3769,7 +3768,8 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
continue;
/*
- * Ignore child members unless they match the rel being sorted.
+ * Ignore child members unless they match the rel being
+ * sorted.
*/
if (em->em_is_child &&
!bms_equal(em->em_relids, relids))
@@ -3817,7 +3817,7 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
NULL,
true);
tlist = lappend(tlist, tle);
- lefttree->targetlist = tlist; /* just in case NIL before */
+ lefttree->targetlist = tlist; /* just in case NIL before */
}
/*
@@ -3877,8 +3877,7 @@ find_ec_member_for_tle(EquivalenceClass *ec,
/*
* We shouldn't be trying to sort by an equivalence class that
- * contains a constant, so no need to consider such cases any
- * further.
+ * contains a constant, so no need to consider such cases any further.
*/
if (em->em_is_const)
continue;
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 231e8353733..3c7fa632b8e 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -192,9 +192,9 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
where_needed);
/*
- * If we are creating PlaceHolderInfos, mark them with the
- * correct maybe-needed locations. Otherwise, it's too late to
- * change that.
+ * If we are creating PlaceHolderInfos, mark them with the correct
+ * maybe-needed locations. Otherwise, it's too late to change
+ * that.
*/
if (create_new_ph)
mark_placeholder_maybe_needed(root, phinfo, where_needed);
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index 7e2c6d2c31f..be52d16ff06 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -116,9 +116,9 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
rtr = (RangeTblRef *) jtnode;
rte = planner_rt_fetch(rtr->rtindex, root);
if (rte->rtekind == RTE_RELATION)
- /* ordinary relation, ok */ ;
+ /* ordinary relation, ok */ ;
else if (rte->rtekind == RTE_SUBQUERY && rte->inh)
- /* flattened UNION ALL subquery, ok */ ;
+ /* flattened UNION ALL subquery, ok */ ;
else
return;
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index c439e9652c1..9838dc45d5e 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -146,8 +146,8 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* Make a flattened version of the rangetable for faster access (this is
- * OK because the rangetable won't change any more), and set up an
- * empty array for indexing base relations.
+ * OK because the rangetable won't change any more), and set up an empty
+ * array for indexing base relations.
*/
setup_simple_rel_arrays(root);
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 0b1ee971df1..df76341c0a3 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -766,9 +766,9 @@ inheritance_planner(PlannerInfo *root)
/*
* The rowMarks list might contain references to subquery RTEs, so
- * make a copy that we can apply ChangeVarNodes to. (Fortunately,
- * the executor doesn't need to see the modified copies --- we can
- * just pass it the original rowMarks list.)
+ * make a copy that we can apply ChangeVarNodes to. (Fortunately, the
+ * executor doesn't need to see the modified copies --- we can just
+ * pass it the original rowMarks list.)
*/
subroot.rowMarks = (List *) copyObject(root->rowMarks);
@@ -784,10 +784,11 @@ inheritance_planner(PlannerInfo *root)
/*
* If this isn't the first child Query, generate duplicates of all
- * subquery RTEs, and adjust Var numbering to reference the duplicates.
- * To simplify the loop logic, we scan the original rtable not the
- * copy just made by adjust_appendrel_attrs; that should be OK since
- * subquery RTEs couldn't contain any references to the target rel.
+ * subquery RTEs, and adjust Var numbering to reference the
+ * duplicates. To simplify the loop logic, we scan the original rtable
+ * not the copy just made by adjust_appendrel_attrs; that should be OK
+ * since subquery RTEs couldn't contain any references to the target
+ * rel.
*/
if (final_rtable != NIL)
{
@@ -800,7 +801,7 @@ inheritance_planner(PlannerInfo *root)
if (rte->rtekind == RTE_SUBQUERY)
{
- Index newrti;
+ Index newrti;
/*
* The RTE can't contain any references to its own RT
@@ -849,7 +850,7 @@ inheritance_planner(PlannerInfo *root)
else
final_rtable = list_concat(final_rtable,
list_copy_tail(subroot.parse->rtable,
- list_length(final_rtable)));
+ list_length(final_rtable)));
/*
* We need to collect all the RelOptInfos from all child plans into
@@ -1317,18 +1318,17 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
need_sort_for_grouping = true;
/*
- * Always override create_plan's tlist, so that we don't
- * sort useless data from a "physical" tlist.
+ * Always override create_plan's tlist, so that we don't sort
+ * useless data from a "physical" tlist.
*/
need_tlist_eval = true;
}
/*
- * create_plan returns a plan with just a "flat" tlist of
- * required Vars. Usually we need to insert the sub_tlist as the
- * tlist of the top plan node. However, we can skip that if we
- * determined that whatever create_plan chose to return will be
- * good enough.
+ * create_plan returns a plan with just a "flat" tlist of required
+ * Vars. Usually we need to insert the sub_tlist as the tlist of
+ * the top plan node. However, we can skip that if we determined
+ * that whatever create_plan chose to return will be good enough.
*/
if (need_tlist_eval)
{
@@ -1546,7 +1546,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
*
* Note: it's essential here to use PVC_INCLUDE_AGGREGATES so that
* Vars mentioned only in aggregate expressions aren't pulled out
- * as separate targetlist entries. Otherwise we could be putting
+ * as separate targetlist entries. Otherwise we could be putting
* ungrouped Vars directly into an Agg node's tlist, resulting in
* undefined behavior.
*/
@@ -2653,8 +2653,8 @@ make_subplanTargetList(PlannerInfo *root,
}
/*
- * Otherwise, we must build a tlist containing all grouping columns,
- * plus any other Vars mentioned in the targetlist and HAVING qual.
+ * Otherwise, we must build a tlist containing all grouping columns, plus
+ * any other Vars mentioned in the targetlist and HAVING qual.
*/
sub_tlist = NIL;
non_group_cols = NIL;
@@ -2705,8 +2705,8 @@ make_subplanTargetList(PlannerInfo *root,
else
{
/*
- * Non-grouping column, so just remember the expression
- * for later call to pull_var_clause. There's no need for
+ * Non-grouping column, so just remember the expression for
+ * later call to pull_var_clause. There's no need for
* pull_var_clause to examine the TargetEntry node itself.
*/
non_group_cols = lappend(non_group_cols, tle->expr);
@@ -2733,7 +2733,7 @@ make_subplanTargetList(PlannerInfo *root,
* add them to the result tlist if not already present. (A Var used
* directly as a GROUP BY item will be present already.) Note this
* includes Vars used in resjunk items, so we are covering the needs of
- * ORDER BY and window specifications. Vars used within Aggrefs will be
+ * ORDER BY and window specifications. Vars used within Aggrefs will be
* pulled out here, too.
*/
non_group_vars = pull_var_clause((Node *) non_group_cols,
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index db301e6c595..f375b5f76d4 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -327,7 +327,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
break;
case T_IndexOnlyScan:
{
- IndexOnlyScan *splan = (IndexOnlyScan *) plan;
+ IndexOnlyScan *splan = (IndexOnlyScan *) plan;
return set_indexonlyscan_references(root, splan, rtoffset);
}
@@ -573,9 +573,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
lcrr, splan->resultRelations,
lcp, splan->plans)
{
- List *rlist = (List *) lfirst(lcrl);
- Index resultrel = lfirst_int(lcrr);
- Plan *subplan = (Plan *) lfirst(lcp);
+ List *rlist = (List *) lfirst(lcrl);
+ Index resultrel = lfirst_int(lcrr);
+ Plan *subplan = (Plan *) lfirst(lcp);
rlist = set_returning_clause_references(root,
rlist,
@@ -590,7 +590,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
* Set up the visible plan targetlist as being the same as
* the first RETURNING list. This is for the use of
* EXPLAIN; the executor won't pay any attention to the
- * targetlist. We postpone this step until here so that
+ * targetlist. We postpone this step until here so that
* we don't have to do set_returning_clause_references()
* twice on identical targetlists.
*/
@@ -1885,7 +1885,7 @@ record_plan_function_dependency(PlannerInfo *root, Oid funcid)
*/
inval_item->cacheId = PROCOID;
inval_item->hashValue = GetSysCacheHashValue1(PROCOID,
- ObjectIdGetDatum(funcid));
+ ObjectIdGetDatum(funcid));
root->glob->invalItems = lappend(root->glob->invalItems, inval_item);
}
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index fb6c7045484..8ce6bee8561 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -1822,8 +1822,8 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
}
/*
- * Don't recurse into the arguments of an outer PHV or aggregate here.
- * Any SubLinks in the arguments have to be dealt with at the outer query
+ * Don't recurse into the arguments of an outer PHV or aggregate here. Any
+ * SubLinks in the arguments have to be dealt with at the outer query
* level; they'll be handled when build_subplan collects the PHV or Aggref
* into the arguments to be passed down to the current subplan.
*/
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 47ddae6992f..be1219eb3d1 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -332,6 +332,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@@ -357,6 +358,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@@ -384,6 +386,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@@ -409,6 +412,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@@ -439,7 +443,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
if (sublink->subLinkType == EXISTS_SUBLINK)
{
if ((j = convert_EXISTS_sublink_to_join(root, sublink, true,
- available_rels1)) != NULL)
+ available_rels1)) != NULL)
{
/* Yes; insert the new join node into the join tree */
j->larg = *jtlink1;
@@ -448,11 +452,12 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Because
- * we are underneath a NOT, we can't pull up sublinks
- * that reference the left-hand stuff, but it's still
- * okay to pull up sublinks referencing j->rarg.
+ * we are underneath a NOT, we can't pull up sublinks that
+ * reference the left-hand stuff, but it's still okay to
+ * pull up sublinks referencing j->rarg.
*/
j->quals = pull_up_sublinks_qual_recurse(root,
j->quals,
@@ -464,7 +469,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
}
if (available_rels2 != NULL &&
(j = convert_EXISTS_sublink_to_join(root, sublink, true,
- available_rels2)) != NULL)
+ available_rels2)) != NULL)
{
/* Yes; insert the new join node into the join tree */
j->larg = *jtlink2;
@@ -473,11 +478,12 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Because
- * we are underneath a NOT, we can't pull up sublinks
- * that reference the left-hand stuff, but it's still
- * okay to pull up sublinks referencing j->rarg.
+ * we are underneath a NOT, we can't pull up sublinks that
+ * reference the left-hand stuff, but it's still okay to
+ * pull up sublinks referencing j->rarg.
*/
j->quals = pull_up_sublinks_qual_recurse(root,
j->quals,
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 7b6d12de38a..6475633ae7d 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -150,9 +150,9 @@ plan_set_operations(PlannerInfo *root, double tuple_fraction,
Assert(parse->distinctClause == NIL);
/*
- * We'll need to build RelOptInfos for each of the leaf subqueries,
- * which are RTE_SUBQUERY rangetable entries in this Query. Prepare the
- * index arrays for that.
+ * We'll need to build RelOptInfos for each of the leaf subqueries, which
+ * are RTE_SUBQUERY rangetable entries in this Query. Prepare the index
+ * arrays for that.
*/
setup_simple_rel_arrays(root);
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 344ebb79891..73f5e11abef 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -113,7 +113,7 @@ static Expr *simplify_function(Oid funcid,
bool process_args, bool allow_non_const,
eval_const_expressions_context *context);
static List *expand_function_arguments(List *args, Oid result_type,
- HeapTuple func_tuple);
+ HeapTuple func_tuple);
static List *reorder_function_arguments(List *args, HeapTuple func_tuple);
static List *add_function_defaults(List *args, HeapTuple func_tuple);
static List *fetch_function_defaults(HeapTuple func_tuple);
@@ -181,7 +181,7 @@ make_opclause(Oid opno, Oid opresulttype, bool opretset,
Node *
get_leftop(const Expr *clause)
{
- const OpExpr *expr = (const OpExpr *) clause;
+ const OpExpr *expr = (const OpExpr *) clause;
if (expr->args != NIL)
return linitial(expr->args);
@@ -198,7 +198,7 @@ get_leftop(const Expr *clause)
Node *
get_rightop(const Expr *clause)
{
- const OpExpr *expr = (const OpExpr *) clause;
+ const OpExpr *expr = (const OpExpr *) clause;
if (list_length(expr->args) >= 2)
return lsecond(expr->args);
@@ -1128,15 +1128,15 @@ contain_nonstrict_functions_walker(Node *node, void *context)
}
/*****************************************************************************
- * Check clauses for non-leakproof functions
+ * Check clauses for non-leakproof functions
*****************************************************************************/
/*
* contain_leaky_functions
- * Recursively search for leaky functions within a clause.
+ * Recursively search for leaky functions within a clause.
*
* Returns true if any function call with side-effect may be present in the
- * clause. Qualifiers from outside the a security_barrier view should not
+ * clause. Qualifiers from outside the a security_barrier view should not
* be pushed down into the view, lest the contents of tuples intended to be
* filtered out be revealed via side effects.
*/
@@ -1155,8 +1155,8 @@ contain_leaky_functions_walker(Node *node, void *context)
switch (nodeTag(node))
{
case T_Var:
- case T_Const:
- case T_Param:
+ case T_Const:
+ case T_Param:
case T_ArrayExpr:
case T_NamedArgExpr:
case T_BoolExpr:
@@ -1168,6 +1168,7 @@ contain_leaky_functions_walker(Node *node, void *context)
case T_NullTest:
case T_BooleanTest:
case T_List:
+
/*
* We know these node types don't contain function calls; but
* something further down in the node tree might.
@@ -1176,7 +1177,7 @@ contain_leaky_functions_walker(Node *node, void *context)
case T_FuncExpr:
{
- FuncExpr *expr = (FuncExpr *) node;
+ FuncExpr *expr = (FuncExpr *) node;
if (!get_func_leakproof(expr->funcid))
return true;
@@ -1187,7 +1188,7 @@ contain_leaky_functions_walker(Node *node, void *context)
case T_DistinctExpr: /* struct-equivalent to OpExpr */
case T_NullIfExpr: /* struct-equivalent to OpExpr */
{
- OpExpr *expr = (OpExpr *) node;
+ OpExpr *expr = (OpExpr *) node;
set_opfuncid(expr);
if (!get_func_leakproof(expr->opfuncid))
@@ -1208,11 +1209,11 @@ contain_leaky_functions_walker(Node *node, void *context)
case T_CoerceViaIO:
{
CoerceViaIO *expr = (CoerceViaIO *) node;
- Oid funcid;
- Oid ioparam;
- bool varlena;
+ Oid funcid;
+ Oid ioparam;
+ bool varlena;
- getTypeInputInfo(exprType((Node *)expr->arg),
+ getTypeInputInfo(exprType((Node *) expr->arg),
&funcid, &ioparam);
if (!get_func_leakproof(funcid))
return true;
@@ -1226,11 +1227,11 @@ contain_leaky_functions_walker(Node *node, void *context)
case T_ArrayCoerceExpr:
{
ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
- Oid funcid;
- Oid ioparam;
- bool varlena;
+ Oid funcid;
+ Oid ioparam;
+ bool varlena;
- getTypeInputInfo(exprType((Node *)expr->arg),
+ getTypeInputInfo(exprType((Node *) expr->arg),
&funcid, &ioparam);
if (!get_func_leakproof(funcid))
return true;
@@ -1247,7 +1248,7 @@ contain_leaky_functions_walker(Node *node, void *context)
foreach(opid, rcexpr->opnos)
{
- Oid funcid = get_opcode(lfirst_oid(opid));
+ Oid funcid = get_opcode(lfirst_oid(opid));
if (!get_func_leakproof(funcid))
return true;
@@ -1256,6 +1257,7 @@ contain_leaky_functions_walker(Node *node, void *context)
break;
default:
+
/*
* If we don't recognize the node tag, assume it might be leaky.
* This prevents an unexpected security hole if someone adds a new
@@ -2683,7 +2685,7 @@ eval_const_expressions_mutator(Node *node,
-1,
InvalidOid,
sizeof(Oid),
- ObjectIdGetDatum(intypioparam),
+ ObjectIdGetDatum(intypioparam),
false,
true),
makeConst(INT4OID,
@@ -2812,13 +2814,13 @@ eval_const_expressions_mutator(Node *node,
* TRUE: drop all remaining alternatives
* If the first non-FALSE alternative is a constant TRUE,
* we can simplify the entire CASE to that alternative's
- * expression. If there are no non-FALSE alternatives,
+ * expression. If there are no non-FALSE alternatives,
* we simplify the entire CASE to the default result (ELSE).
*
* If we have a simple-form CASE with constant test
* expression, we substitute the constant value for contained
* CaseTestExpr placeholder nodes, so that we have the
- * opportunity to reduce constant test conditions. For
+ * opportunity to reduce constant test conditions. For
* example this allows
* CASE 0 WHEN 0 THEN 1 ELSE 1/0 END
* to reduce to 1 rather than drawing a divide-by-0 error.
@@ -3581,12 +3583,12 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
* deliver a constant result, use a transform function to generate a
* substitute node tree, or expand in-line the body of the function
* definition (which only works for simple SQL-language functions, but
- * that is a common case). Each case needs access to the function's
+ * that is a common case). Each case needs access to the function's
* pg_proc tuple, so fetch it just once.
*
* Note: the allow_non_const flag suppresses both the second and third
- * strategies; so if !allow_non_const, simplify_function can only return
- * a Const or NULL. Argument-list rewriting happens anyway, though.
+ * strategies; so if !allow_non_const, simplify_function can only return a
+ * Const or NULL. Argument-list rewriting happens anyway, though.
*/
func_tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
if (!HeapTupleIsValid(func_tuple))
@@ -3603,7 +3605,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
{
args = expand_function_arguments(args, result_type, func_tuple);
args = (List *) expression_tree_mutator((Node *) args,
- eval_const_expressions_mutator,
+ eval_const_expressions_mutator,
(void *) context);
/* Argument processing done, give it back to the caller */
*args_p = args;
@@ -3618,7 +3620,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
if (!newexpr && allow_non_const && OidIsValid(func_form->protransform))
{
/*
- * Build a dummy FuncExpr node containing the simplified arg list. We
+ * Build a dummy FuncExpr node containing the simplified arg list. We
* use this approach to present a uniform interface to the transform
* function regardless of how the function is actually being invoked.
*/
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 61502aa6425..00052f5c846 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -128,11 +128,11 @@ compare_fractional_path_costs(Path *path1, Path *path2,
*
* The fuzz_factor argument must be 1.0 plus delta, where delta is the
* fraction of the smaller cost that is considered to be a significant
- * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
+ * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
* be 1% of the smaller cost.
*
* The two paths are said to have "equal" costs if both startup and total
- * costs are fuzzily the same. Path1 is said to be better than path2 if
+ * costs are fuzzily the same. Path1 is said to be better than path2 if
* it has fuzzily better startup cost and fuzzily no worse total cost,
* or if it has fuzzily better total cost and fuzzily no worse startup cost.
* Path2 is better than path1 if the reverse holds. Finally, if one path
@@ -190,9 +190,9 @@ compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
* and save them in the rel's cheapest-path fields.
*
* Only unparameterized paths are considered candidates for cheapest_startup
- * and cheapest_total. The cheapest_parameterized_paths list collects paths
+ * and cheapest_total. The cheapest_parameterized_paths list collects paths
* that are cheapest-total for their parameterization (i.e., there is no
- * cheaper path with the same or weaker parameterization). This list always
+ * cheaper path with the same or weaker parameterization). This list always
* includes the unparameterized cheapest-total path, too.
*
* This is normally called only after we've finished constructing the path
@@ -294,8 +294,8 @@ set_cheapest(RelOptInfo *parent_rel)
*
* There is one policy decision embedded in this function, along with its
* sibling add_path_precheck: we treat all parameterized paths as having
- * NIL pathkeys, so that they compete only on cost. This is to reduce
- * the number of parameterized paths that are kept. See discussion in
+ * NIL pathkeys, so that they compete only on cost. This is to reduce
+ * the number of parameterized paths that are kept. See discussion in
* src/backend/optimizer/README.
*
* The pathlist is kept sorted by total_cost, with cheaper paths
@@ -358,7 +358,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
p1_next = lnext(p1);
/*
- * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this
+ * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this
* percentage need to be user-configurable?)
*/
costcmp = compare_path_costs_fuzzily(new_path, old_path, 1.01);
@@ -388,20 +388,20 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
{
case COSTS_EQUAL:
outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
- PATH_REQ_OUTER(old_path));
+ PATH_REQ_OUTER(old_path));
if (keyscmp == PATHKEYS_BETTER1)
{
if ((outercmp == BMS_EQUAL ||
outercmp == BMS_SUBSET1) &&
new_path->rows <= old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
}
else if (keyscmp == PATHKEYS_BETTER2)
{
if ((outercmp == BMS_EQUAL ||
outercmp == BMS_SUBSET2) &&
new_path->rows >= old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
}
else /* keyscmp == PATHKEYS_EQUAL */
{
@@ -425,19 +425,20 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
if (new_path->rows < old_path->rows)
remove_old = true; /* new dominates old */
else if (new_path->rows > old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
else if (compare_path_costs_fuzzily(new_path, old_path,
- 1.0000000001) == COSTS_BETTER1)
+ 1.0000000001) == COSTS_BETTER1)
remove_old = true; /* new dominates old */
else
- accept_new = false; /* old equals or dominates new */
+ accept_new = false; /* old equals or
+ * dominates new */
}
else if (outercmp == BMS_SUBSET1 &&
new_path->rows <= old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
else if (outercmp == BMS_SUBSET2 &&
new_path->rows >= old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
/* else different parameterizations, keep both */
}
break;
@@ -445,25 +446,26 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
if (keyscmp != PATHKEYS_BETTER2)
{
outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
- PATH_REQ_OUTER(old_path));
+ PATH_REQ_OUTER(old_path));
if ((outercmp == BMS_EQUAL ||
outercmp == BMS_SUBSET1) &&
new_path->rows <= old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
}
break;
case COSTS_BETTER2:
if (keyscmp != PATHKEYS_BETTER1)
{
outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
- PATH_REQ_OUTER(old_path));
+ PATH_REQ_OUTER(old_path));
if ((outercmp == BMS_EQUAL ||
outercmp == BMS_SUBSET2) &&
new_path->rows >= old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
}
break;
case COSTS_DIFFERENT:
+
/*
* can't get here, but keep this case to keep compiler
* quiet
@@ -529,7 +531,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
* and have lower bounds for its costs.
*
* Note that we do not know the path's rowcount, since getting an estimate for
- * that is too expensive to do before prechecking. We assume here that paths
+ * that is too expensive to do before prechecking. We assume here that paths
* of a superset parameterization will generate fewer rows; if that holds,
* then paths with different parameterizations cannot dominate each other
* and so we can simply ignore existing paths of another parameterization.
@@ -561,9 +563,9 @@ add_path_precheck(RelOptInfo *parent_rel,
* pathkeys as well as both cost metrics. If we find one, we can
* reject the new path.
*
- * For speed, we make exact rather than fuzzy cost comparisons.
- * If an old path dominates the new path exactly on both costs, it
- * will surely do so fuzzily.
+ * For speed, we make exact rather than fuzzy cost comparisons. If an
+ * old path dominates the new path exactly on both costs, it will
+ * surely do so fuzzily.
*/
if (total_cost >= old_path->total_cost)
{
@@ -588,9 +590,9 @@ add_path_precheck(RelOptInfo *parent_rel,
else
{
/*
- * Since the pathlist is sorted by total_cost, we can stop
- * looking once we reach a path with a total_cost larger
- * than the new path's.
+ * Since the pathlist is sorted by total_cost, we can stop looking
+ * once we reach a path with a total_cost larger than the new
+ * path's.
*/
break;
}
@@ -652,26 +654,26 @@ add_parameterized_path(RelOptInfo *parent_rel, Path *new_path)
{
if (outercmp != BMS_SUBSET2 &&
new_path->rows <= old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
}
else if (costcmp > 0)
{
if (outercmp != BMS_SUBSET1 &&
new_path->rows >= old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
}
else if (outercmp == BMS_SUBSET1 &&
new_path->rows <= old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
else if (outercmp == BMS_SUBSET2 &&
new_path->rows >= old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
else if (new_path->rows < old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
else
{
/* Same cost, rows, and param rels; arbitrarily keep old */
- accept_new = false; /* old equals or dominates new */
+ accept_new = false; /* old equals or dominates new */
}
}
@@ -697,8 +699,8 @@ add_parameterized_path(RelOptInfo *parent_rel, Path *new_path)
/*
* If we found an old path that dominates new_path, we can quit
- * scanning the list; we will not add new_path, and we assume
- * new_path cannot dominate any other elements of the list.
+ * scanning the list; we will not add new_path, and we assume new_path
+ * cannot dominate any other elements of the list.
*/
if (!accept_new)
break;
@@ -940,7 +942,7 @@ create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer)
* Compute rows and costs as sums of subplan rows and costs. We charge
* nothing extra for the Append itself, which perhaps is too optimistic,
* but since it doesn't do any selection or projection, it is a pretty
- * cheap node. If you change this, see also make_append().
+ * cheap node. If you change this, see also make_append().
*/
pathnode->path.rows = 0;
pathnode->path.startup_cost = 0;
@@ -1772,9 +1774,9 @@ create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
Relids
calc_nestloop_required_outer(Path *outer_path, Path *inner_path)
{
- Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
- Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
- Relids required_outer;
+ Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
+ Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
+ Relids required_outer;
/* inner_path can require rels from outer path, but not vice versa */
Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
@@ -1804,9 +1806,9 @@ calc_nestloop_required_outer(Path *outer_path, Path *inner_path)
Relids
calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
{
- Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
- Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
- Relids required_outer;
+ Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
+ Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
+ Relids required_outer;
/* neither path can require rels from the other */
Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
@@ -1853,9 +1855,9 @@ create_nestloop_path(PlannerInfo *root,
/*
* If the inner path is parameterized by the outer, we must drop any
- * restrict_clauses that are due to be moved into the inner path. We
- * have to do this now, rather than postpone the work till createplan
- * time, because the restrict_clauses list can affect the size and cost
+ * restrict_clauses that are due to be moved into the inner path. We have
+ * to do this now, rather than postpone the work till createplan time,
+ * because the restrict_clauses list can affect the size and cost
* estimates for this path.
*/
if (bms_overlap(inner_req_outer, outer_path->parent->relids))
@@ -2033,7 +2035,7 @@ create_hashjoin_path(PlannerInfo *root,
* same parameterization level, ensuring that they all enforce the same set
* of join quals (and thus that that parameterization can be attributed to
* an append path built from such paths). Currently, only a few path types
- * are supported here, though more could be added at need. We return NULL
+ * are supported here, though more could be added at need. We return NULL
* if we can't reparameterize the given path.
*
* Note: we intentionally do not pass created paths to add_path(); it would
@@ -2058,32 +2060,33 @@ reparameterize_path(PlannerInfo *root, Path *path,
return create_seqscan_path(root, rel, required_outer);
case T_IndexScan:
case T_IndexOnlyScan:
- {
- IndexPath *ipath = (IndexPath *) path;
- IndexPath *newpath = makeNode(IndexPath);
+ {
+ IndexPath *ipath = (IndexPath *) path;
+ IndexPath *newpath = makeNode(IndexPath);
- /*
- * We can't use create_index_path directly, and would not want to
- * because it would re-compute the indexqual conditions which is
- * wasted effort. Instead we hack things a bit: flat-copy the
- * path node, revise its param_info, and redo the cost estimate.
- */
- memcpy(newpath, ipath, sizeof(IndexPath));
- newpath->path.param_info =
- get_baserel_parampathinfo(root, rel, required_outer);
- cost_index(newpath, root, loop_count);
- return (Path *) newpath;
- }
+ /*
+ * We can't use create_index_path directly, and would not want
+ * to because it would re-compute the indexqual conditions
+ * which is wasted effort. Instead we hack things a bit:
+ * flat-copy the path node, revise its param_info, and redo
+ * the cost estimate.
+ */
+ memcpy(newpath, ipath, sizeof(IndexPath));
+ newpath->path.param_info =
+ get_baserel_parampathinfo(root, rel, required_outer);
+ cost_index(newpath, root, loop_count);
+ return (Path *) newpath;
+ }
case T_BitmapHeapScan:
- {
- BitmapHeapPath *bpath = (BitmapHeapPath *) path;
+ {
+ BitmapHeapPath *bpath = (BitmapHeapPath *) path;
- return (Path *) create_bitmap_heap_path(root,
- rel,
- bpath->bitmapqual,
- required_outer,
- loop_count);
- }
+ return (Path *) create_bitmap_heap_path(root,
+ rel,
+ bpath->bitmapqual,
+ required_outer,
+ loop_count);
+ }
case T_SubqueryScan:
return create_subqueryscan_path(root, rel, path->pathkeys,
required_outer);
diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c
index 93f1c2cdfa4..e05c8ddef1b 100644
--- a/src/backend/optimizer/util/placeholder.c
+++ b/src/backend/optimizer/util/placeholder.c
@@ -61,7 +61,7 @@ make_placeholder_expr(PlannerInfo *root, Expr *expr, Relids phrels)
* We build PlaceHolderInfos only for PHVs that are still present in the
* simplified query passed to query_planner().
*
- * Note: this should only be called after query_planner() has started. Also,
+ * Note: this should only be called after query_planner() has started. Also,
* create_new_ph must not be TRUE after deconstruct_jointree begins, because
* make_outerjoininfo assumes that we already know about all placeholders.
*/
@@ -259,7 +259,7 @@ mark_placeholder_maybe_needed(PlannerInfo *root, PlaceHolderInfo *phinfo,
* but they aren't going to be needed where the outer PHV is referenced.
* Rather, they'll be needed where the outer PHV is evaluated. We can
* estimate that (conservatively) as the syntactic location of the PHV's
- * expression. Recurse to take care of any such PHVs.
+ * expression. Recurse to take care of any such PHVs.
*/
mark_placeholders_in_expr(root, (Node *) phinfo->ph_var->phexpr,
phinfo->ph_var->phrels);
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index aaf288a50e9..38b81a05ff7 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -341,7 +341,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
}
else
{
- double allvisfrac; /* dummy */
+ double allvisfrac; /* dummy */
estimate_rel_size(indexRelation, NULL,
&info->pages, &info->tuples, &allvisfrac);
@@ -403,12 +403,12 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* minimum size estimate of 10 pages. The idea here is to avoid
* assuming a newly-created table is really small, even if it
* currently is, because that may not be true once some data gets
- * loaded into it. Once a vacuum or analyze cycle has been done
+ * loaded into it. Once a vacuum or analyze cycle has been done
* on it, it's more reasonable to believe the size is somewhat
* stable.
*
* (Note that this is only an issue if the plan gets cached and
- * used again after the table has been filled. What we're trying
+ * used again after the table has been filled. What we're trying
* to avoid is using a nestloop-type plan on a table that has
* grown substantially since the plan was made. Normally,
* autovacuum/autoanalyze will occur once enough inserts have
@@ -965,7 +965,7 @@ build_index_tlist(PlannerInfo *root, IndexOptInfo *index,
if (indexkey < 0)
att_tup = SystemAttributeDefinition(indexkey,
- heapRelation->rd_rel->relhasoids);
+ heapRelation->rd_rel->relhasoids);
else
att_tup = heapRelation->rd_att->attrs[indexkey - 1];
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index c3161c5293b..65d191e5d3c 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -1624,7 +1624,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
clause_op_infos = get_op_btree_interpretation(clause_op);
if (clause_op_infos)
pred_op_infos = get_op_btree_interpretation(pred_op);
- else /* no point in looking */
+ else /* no point in looking */
pred_op_infos = NIL;
foreach(lcp, pred_op_infos)
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index bfdd9ff222c..8d4ab03d20e 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -840,12 +840,12 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
outer_and_req = bms_union(outer_path->parent->relids,
PATH_REQ_OUTER(outer_path));
else
- outer_and_req = NULL; /* outer path does not accept parameters */
+ outer_and_req = NULL; /* outer path does not accept parameters */
if (inner_path->param_info)
inner_and_req = bms_union(inner_path->parent->relids,
PATH_REQ_OUTER(inner_path));
else
- inner_and_req = NULL; /* inner path does not accept parameters */
+ inner_and_req = NULL; /* inner path does not accept parameters */
pclauses = NIL;
foreach(lc, joinrel->joininfo)
@@ -909,7 +909,7 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
*restrict_clauses);
/*
- * And now we can build the ParamPathInfo. No point in saving the
+ * And now we can build the ParamPathInfo. No point in saving the
* input-pair-dependent clause list, though.
*
* Note: in GEQO mode, we'll be called in a temporary memory context, but
@@ -929,8 +929,8 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
* Get the ParamPathInfo for a parameterized path for an append relation.
*
* For an append relation, the rowcount estimate will just be the sum of
- * the estimates for its children. However, we still need a ParamPathInfo
- * to flag the fact that the path requires parameters. So this just creates
+ * the estimates for its children. However, we still need a ParamPathInfo
+ * to flag the fact that the path requires parameters. So this just creates
* a suitable struct with zero ppi_rows (and no ppi_clauses either, since
* the Append node isn't responsible for checking quals).
*/
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index 2bffb0a651e..9bc90c25313 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -218,7 +218,7 @@ pull_varattnos_walker(Node *node, pull_varattnos_context *context)
if (var->varno == context->varno && var->varlevelsup == 0)
context->varattnos =
bms_add_member(context->varattnos,
- var->varattno - FirstLowInvalidHeapAttributeNumber);
+ var->varattno - FirstLowInvalidHeapAttributeNumber);
return false;
}