*/
lowersizelimit = skey->heapkeyspace &&
(P_ISLEAF(topaque) || BTreeTupleGetHeapTID(itup) == NULL);
- if (tupsize > (lowersizelimit ? BTMaxItemSize(state->target) :
- BTMaxItemSizeNoHeapTid(state->target)))
+ if (tupsize > (lowersizelimit ? BTMaxItemSize : BTMaxItemSizeNoHeapTid))
{
ItemPointer tid = BTreeTupleGetPointsToTID(itup);
char *itid,
state = (BTDedupState) palloc(sizeof(BTDedupStateData));
state->deduplicate = true;
state->nmaxitems = 0;
- state->maxpostingsize = Min(BTMaxItemSize(page) / 2, INDEX_SIZE_MASK);
+ state->maxpostingsize = Min(BTMaxItemSize / 2, INDEX_SIZE_MASK);
/* Metadata about base tuple of current pending posting list */
state->base = NULL;
state->baseoff = InvalidOffsetNumber;
/* Use original, unchanged base tuple */
tuplesz = IndexTupleSize(state->base);
Assert(tuplesz == MAXALIGN(IndexTupleSize(state->base)));
- Assert(tuplesz <= BTMaxItemSize(newpage));
+ Assert(tuplesz <= BTMaxItemSize);
if (PageAddItem(newpage, (Item) state->base, tuplesz, tupoff,
false, false) == InvalidOffsetNumber)
elog(ERROR, "deduplication failed to add tuple to page");
state->intervals[state->nintervals].nitems = state->nitems;
Assert(tuplesz == MAXALIGN(IndexTupleSize(final)));
- Assert(tuplesz <= BTMaxItemSize(newpage));
+ Assert(tuplesz <= BTMaxItemSize);
if (PageAddItem(newpage, (Item) final, tuplesz, tupoff, false,
false) == InvalidOffsetNumber)
elog(ERROR, "deduplication failed to add tuple to page");
opaque = BTPageGetOpaque(page);
/* Check 1/3 of a page restriction */
- if (unlikely(insertstate->itemsz > BTMaxItemSize(page)))
+ if (unlikely(insertstate->itemsz > BTMaxItemSize))
_bt_check_third_page(rel, heapRel, itup_key->heapkeyspace, page,
insertstate->itup);
* make use of the reserved space. This should never fail on internal
* pages.
*/
- if (unlikely(itupsz > BTMaxItemSize(npage)))
+ if (unlikely(itupsz > BTMaxItemSize))
_bt_check_third_page(wstate->index, wstate->heap, isleaf, npage,
itup);
*/
dstate->maxpostingsize = MAXALIGN_DOWN((BLCKSZ * 10 / 100)) -
sizeof(ItemIdData);
- Assert(dstate->maxpostingsize <= BTMaxItemSize((Page) state->btps_buf) &&
+ Assert(dstate->maxpostingsize <= BTMaxItemSize &&
dstate->maxpostingsize <= INDEX_SIZE_MASK);
dstate->htids = palloc(dstate->maxpostingsize);
itemsz = MAXALIGN(IndexTupleSize(newtup));
/* Double check item size against limit */
- if (itemsz <= BTMaxItemSize(page))
+ if (itemsz <= BTMaxItemSize)
return;
/*
* index uses version 2 or version 3, or that page is an internal page, in
* which case a slightly higher limit applies.
*/
- if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid(page))
+ if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid)
return;
/*
errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"",
itemsz,
needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION,
- needheaptidspace ? BTMaxItemSize(page) :
- BTMaxItemSizeNoHeapTid(page),
+ needheaptidspace ? BTMaxItemSize : BTMaxItemSizeNoHeapTid,
RelationGetRelationName(rel)),
errdetail("Index row references tuple (%u,%u) in relation \"%s\".",
ItemPointerGetBlockNumber(BTreeTupleGetHeapTID(newtup)),
state->deduplicate = true; /* unused */
state->nmaxitems = 0; /* unused */
/* Conservatively use larger maxpostingsize than primary */
- state->maxpostingsize = BTMaxItemSize(page);
+ state->maxpostingsize = BTMaxItemSize;
state->base = NULL;
state->baseoff = InvalidOffsetNumber;
state->basetupsize = 0;
* a heap index tuple to make space for a tiebreaker heap TID
* attribute, which we account for here.
*/
-#define BTMaxItemSize(page) \
- (MAXALIGN_DOWN((PageGetPageSize(page) - \
+#define BTMaxItemSize \
+ (MAXALIGN_DOWN((BLCKSZ - \
MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \
MAXALIGN(sizeof(BTPageOpaqueData))) / 3) - \
MAXALIGN(sizeof(ItemPointerData)))
-#define BTMaxItemSizeNoHeapTid(page) \
- MAXALIGN_DOWN((PageGetPageSize(page) - \
+#define BTMaxItemSizeNoHeapTid \
+ MAXALIGN_DOWN((BLCKSZ - \
MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \
MAXALIGN(sizeof(BTPageOpaqueData))) / 3)