summaryrefslogtreecommitdiff
path: root/src/backend/optimizer
diff options
context:
space:
mode:
authorTom Lane2015-02-21 20:13:06 +0000
committerTom Lane2015-02-21 20:13:06 +0000
commite1a11d93111ff3fba7a91f3f2ac0b0aca16909a8 (patch)
tree522fdb9a6b2ed8208bdf692579399eac73c69184 /src/backend/optimizer
parent3d9b6f31eec150b5a6000e0814e81e36d9eb069a (diff)
Use FLEXIBLE_ARRAY_MEMBER for HeapTupleHeaderData.t_bits[].
This requires changing quite a few places that were depending on sizeof(HeapTupleHeaderData), but it seems for the best. Michael Paquier, some adjustments by me
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/path/costsize.c6
-rw-r--r--src/backend/optimizer/plan/planner.c4
-rw-r--r--src/backend/optimizer/plan/subselect.c8
-rw-r--r--src/backend/optimizer/prep/prepunion.c2
-rw-r--r--src/backend/optimizer/util/plancat.c2
5 files changed, 11 insertions, 11 deletions
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 020558b430a..78ef22949a6 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -4036,11 +4036,11 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
/*
* If we have a whole-row reference, estimate its width as the sum of
- * per-column widths plus sizeof(HeapTupleHeaderData).
+ * per-column widths plus heap tuple header overhead.
*/
if (have_wholerow_var)
{
- int32 wholerow_width = sizeof(HeapTupleHeaderData);
+ int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
if (reloid != InvalidOid)
{
@@ -4078,7 +4078,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
static double
relation_byte_size(double tuples, int width)
{
- return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
+ return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
}
/*
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 5c4884f46b9..b02a1079ae4 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -2755,7 +2755,7 @@ choose_hashed_grouping(PlannerInfo *root,
*/
/* Estimate per-hash-entry space at tuple width... */
- hashentrysize = MAXALIGN(path_width) + MAXALIGN(sizeof(MinimalTupleData));
+ hashentrysize = MAXALIGN(path_width) + MAXALIGN(SizeofMinimalTupleHeader);
/* plus space for pass-by-ref transition values... */
hashentrysize += agg_costs->transitionSpace;
/* plus the per-hash-entry overhead */
@@ -2923,7 +2923,7 @@ choose_hashed_distinct(PlannerInfo *root,
*/
/* Estimate per-hash-entry space at tuple width... */
- hashentrysize = MAXALIGN(path_width) + MAXALIGN(sizeof(MinimalTupleData));
+ hashentrysize = MAXALIGN(path_width) + MAXALIGN(SizeofMinimalTupleHeader);
/* plus the per-hash-entry overhead */
hashentrysize += hash_agg_entry_size(0);
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 78fb6b199ca..5a1d539e8de 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -974,12 +974,12 @@ subplan_is_hashable(Plan *plan)
/*
* The estimated size of the subquery result must fit in work_mem. (Note:
- * we use sizeof(HeapTupleHeaderData) here even though the tuples will
- * actually be stored as MinimalTuples; this provides some fudge factor
- * for hashtable overhead.)
+ * we use heap tuple overhead here even though the tuples will actually be
+ * stored as MinimalTuples; this provides some fudge factor for hashtable
+ * overhead.)
*/
subquery_size = plan->plan_rows *
- (MAXALIGN(plan->plan_width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
+ (MAXALIGN(plan->plan_width) + MAXALIGN(SizeofHeapTupleHeader));
if (subquery_size > work_mem * 1024L)
return false;
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 05f601ec2f0..b90fee387b4 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -832,7 +832,7 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
* Don't do it if it doesn't look like the hashtable will fit into
* work_mem.
*/
- hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(sizeof(MinimalTupleData));
+ hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(SizeofMinimalTupleHeader);
if (hashentrysize * dNumGroups > work_mem * 1024L)
return false;
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index fb7db6d9599..5cbd6a98f20 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -508,7 +508,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
int32 tuple_width;
tuple_width = get_rel_data_width(rel, attr_widths);
- tuple_width += sizeof(HeapTupleHeaderData);
+ tuple_width += MAXALIGN(SizeofHeapTupleHeader);
tuple_width += sizeof(ItemIdData);
/* note: integer division is intentional here */
density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;