* of partial_pathlist because of the way add_partial_path works.
*/
cheapest_partial_path = linitial(rel->partial_pathlist);
- rows =
- cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
+ rows = compute_gather_rows(cheapest_partial_path);
simple_gather_path = (Path *)
create_gather_path(root, rel, cheapest_partial_path, rel->reltarget,
NULL, rowsp);
if (subpath->pathkeys == NIL)
continue;
- rows = subpath->rows * subpath->parallel_workers;
+ rows = compute_gather_rows(subpath);
path = create_gather_merge_path(root, rel, subpath, rel->reltarget,
subpath->pathkeys, NULL, rowsp);
add_path(rel, &path->path);
subpath,
useful_pathkeys,
-1.0);
- rows = subpath->rows * subpath->parallel_workers;
}
else
subpath = (Path *) create_incremental_sort_path(root,
useful_pathkeys,
presorted_keys,
-1);
+ rows = compute_gather_rows(subpath);
path = create_gather_merge_path(root, rel,
subpath,
rel->reltarget,
return pages_fetched;
}
+
+/*
+ * compute_gather_rows
+ * Estimate number of rows for gather (merge) nodes.
+ *
+ * In a parallel plan, each worker's row estimate is determined by dividing the
+ * total number of rows by parallel_divisor, which accounts for the leader's
+ * contribution in addition to the number of workers. Accordingly, when
+ * estimating the number of rows for gather (merge) nodes, we multiply the rows
+ * per worker by the same parallel_divisor to undo the division.
+ */
+double
+compute_gather_rows(Path *path)
+{
+ Assert(path->parallel_workers > 0);
+
+ return clamp_row_est(path->rows * get_parallel_divisor(path));
+}
root->sort_pathkeys,
presorted_keys,
limit_tuples);
- total_groups = input_path->rows *
- input_path->parallel_workers;
+ total_groups = compute_gather_rows(sorted_path);
sorted_path = (Path *)
create_gather_merge_path(root, ordered_rel,
sorted_path,
(presorted_keys == 0 || !enable_incremental_sort))
continue;
- total_groups = path->rows * path->parallel_workers;
-
/*
* We've no need to consider both a sort and incremental sort. We'll
* just do a sort if there are no presorted keys and an incremental
groupby_pathkeys,
presorted_keys,
-1.0);
-
+ total_groups = compute_gather_rows(path);
path = (Path *)
create_gather_merge_path(root,
rel,
pathnode->num_workers = subpath->parallel_workers;
pathnode->path.pathkeys = pathkeys;
pathnode->path.pathtarget = target ? target : rel->reltarget;
- pathnode->path.rows += subpath->rows;
if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
{
extern double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
Path *bitmapqual, double loop_count,
Cost *cost_p, double *tuples_p);
+extern double compute_gather_rows(Path *path);
#endif /* COST_H */
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
- QUERY PLAN
------------------------------------------------------------------------
- Finalize Aggregate
+ QUERY PLAN
+-----------------------------------------------------------------
+ Aggregate
-> Gather
Workers Planned: 1
- -> Partial Aggregate
- -> Parallel Hash Join
- Hash Cond: (r.id = s.id)
- -> Parallel Seq Scan on simple r
- -> Parallel Hash
- -> Parallel Seq Scan on extremely_skewed s
-(9 rows)
+ -> Parallel Hash Join
+ Hash Cond: (r.id = s.id)
+ -> Parallel Seq Scan on simple r
+ -> Parallel Hash
+ -> Parallel Seq Scan on extremely_skewed s
+(8 rows)
select count(*) from simple r join extremely_skewed s using (id);
count