* tuples to return? */
bool boundUsed; /* true if we made use of a bounded heap */
int bound; /* if bounded, the maximum number of tuples */
+ int64 tupleMem; /* memory consumed by individual tuples.
+ * storing this separately from what we track
+ * in availMem allows us to subtract the
+ * memory consumed by all tuples when dumping
+ * tuples to tape */
int64 availMem; /* remaining memory available, in bytes */
int64 allowedMem; /* total memory allowed, in bytes */
int maxTapes; /* max number of input tapes to merge in each
* in the parent context, not this context, because there is no need to
* free memtuples early. For bounded sorts, tuples may be pfreed in any
* order, so we use a regular aset.c context so that it can make use of
- * free'd memory. When the sort is not bounded, we make use of a
- * generation.c context as this keeps allocations more compact with less
- * wastage. Allocations are also slightly more CPU efficient.
- */
- if (state->base.sortopt & TUPLESORT_ALLOWBOUNDED)
+ * free'd memory. When the sort is not bounded, we make use of a bump.c
+ * context as this keeps allocations more compact with less wastage.
+ * Allocations are also slightly more CPU efficient.
+ */
+ if (TupleSortUseBumpTupleCxt(state->base.sortopt))
+ state->base.tuplecontext = BumpContextCreate(state->base.sortcontext,
+ "Caller tuples",
+ ALLOCSET_DEFAULT_SIZES);
+ else
state->base.tuplecontext = AllocSetContextCreate(state->base.sortcontext,
"Caller tuples",
ALLOCSET_DEFAULT_SIZES);
- else
- state->base.tuplecontext = GenerationContextCreate(state->base.sortcontext,
- "Caller tuples",
- ALLOCSET_DEFAULT_SIZES);
state->status = TSS_INITIAL;
* Shared code for tuple and datum cases.
*/
void
-tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev)
+tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple,
+ bool useAbbrev, Size tuplen)
{
MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
Assert(!LEADER(state));
- /* Count the size of the out-of-line data */
- if (tuple->tuple != NULL)
- USEMEM(state, GetMemoryChunkSpace(tuple->tuple));
+ /* account for the memory used for this tuple */
+ USEMEM(state, tuplen);
+ state->tupleMem += tuplen;
if (!useAbbrev)
{
SortTuple *stup = &state->memtuples[i];
WRITETUP(state, state->destTape, stup);
-
- /*
- * Account for freeing the tuple, but no need to do the actual pfree
- * since the tuplecontext is being reset after the loop.
- */
- if (stup->tuple != NULL)
- FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
}
state->memtupcount = 0;
/*
* Reset tuple memory. We've freed all of the tuples that we previously
* allocated. It's important to avoid fragmentation when there is a stark
- * change in the sizes of incoming tuples. Fragmentation due to
- * AllocSetFree's bucketing by size class might be particularly bad if
- * this step wasn't taken.
+ * change in the sizes of incoming tuples. In bounded sorts,
+ * fragmentation due to AllocSetFree's bucketing by size class might be
+ * particularly bad if this step wasn't taken.
*/
MemoryContextReset(state->base.tuplecontext);
+ /*
+ * Now update the memory accounting to subtract the memory used by the
+ * tuple.
+ */
+ FREEMEM(state, state->tupleMem);
+ state->tupleMem = 0;
+
markrunend(state->destTape);
#ifdef TRACE_SORT
SortTuple stup;
MinimalTuple tuple;
HeapTupleData htup;
+ Size tuplen;
/* copy the tuple into sort storage */
tuple = ExecCopySlotMinimalTuple(slot);
tupDesc,
&stup.isnull1);
+ /* GetMemoryChunkSpace is not supported for bump contexts */
+ if (TupleSortUseBumpTupleCxt(base->sortopt))
+ tuplen = MAXALIGN(tuple->t_len);
+ else
+ tuplen = GetMemoryChunkSpace(tuple);
+
tuplesort_puttuple_common(state, &stup,
base->sortKeys->abbrev_converter &&
- !stup.isnull1);
+ !stup.isnull1, tuplen);
MemoryContextSwitchTo(oldcontext);
}
TuplesortPublic *base = TuplesortstateGetPublic(state);
MemoryContext oldcontext = MemoryContextSwitchTo(base->tuplecontext);
TuplesortClusterArg *arg = (TuplesortClusterArg *) base->arg;
+ Size tuplen;
/* copy the tuple into sort storage */
tup = heap_copytuple(tup);
&stup.isnull1);
}
+ /* GetMemoryChunkSpace is not supported for bump contexts */
+ if (TupleSortUseBumpTupleCxt(base->sortopt))
+ tuplen = MAXALIGN(HEAPTUPLESIZE + tup->t_len);
+ else
+ tuplen = GetMemoryChunkSpace(tup);
+
tuplesort_puttuple_common(state, &stup,
base->haveDatum1 &&
base->sortKeys->abbrev_converter &&
- !stup.isnull1);
+ !stup.isnull1, tuplen);
MemoryContextSwitchTo(oldcontext);
}
IndexTuple tuple;
TuplesortPublic *base = TuplesortstateGetPublic(state);
TuplesortIndexArg *arg = (TuplesortIndexArg *) base->arg;
+ Size tuplen;
stup.tuple = index_form_tuple_context(RelationGetDescr(rel), values,
isnull, base->tuplecontext);
RelationGetDescr(arg->indexRel),
&stup.isnull1);
+ /* GetMemoryChunkSpace is not supported for bump contexts */
+ if (TupleSortUseBumpTupleCxt(base->sortopt))
+ tuplen = MAXALIGN(tuple->t_info & INDEX_SIZE_MASK);
+ else
+ tuplen = GetMemoryChunkSpace(tuple);
+
tuplesort_puttuple_common(state, &stup,
base->sortKeys &&
base->sortKeys->abbrev_converter &&
- !stup.isnull1);
+ !stup.isnull1, tuplen);
}
/*
BrinSortTuple *bstup;
TuplesortPublic *base = TuplesortstateGetPublic(state);
MemoryContext oldcontext = MemoryContextSwitchTo(base->tuplecontext);
+ Size tuplen;
/* allocate space for the whole BRIN sort tuple */
bstup = palloc(BRINSORTTUPLE_SIZE(size));
stup.datum1 = tuple->bt_blkno;
stup.isnull1 = false;
+ /* GetMemoryChunkSpace is not supported for bump contexts */
+ if (TupleSortUseBumpTupleCxt(base->sortopt))
+ tuplen = MAXALIGN(BRINSORTTUPLE_SIZE(size));
+ else
+ tuplen = GetMemoryChunkSpace(bstup);
+
tuplesort_puttuple_common(state, &stup,
base->sortKeys &&
base->sortKeys->abbrev_converter &&
- !stup.isnull1);
+ !stup.isnull1, tuplen);
MemoryContextSwitchTo(oldcontext);
}
tuplesort_puttuple_common(state, &stup,
base->tuples &&
- base->sortKeys->abbrev_converter && !isNull);
+ base->sortKeys->abbrev_converter && !isNull, 0);
MemoryContextSwitchTo(oldcontext);
}
/* specifies if the tuplesort is able to support bounded sorts */
#define TUPLESORT_ALLOWBOUNDED (1 << 1)
+/*
+ * For bounded sort, tuples get pfree'd when they fall outside of the bound.
+ * When bounded sorts are not required, we can use a bump context for tuple
+ * allocation as there's no risk that pfree will ever be called for a tuple.
+ * Define a macro to make it easier for code to figure out if we're using a
+ * bump allocator.
+ */
+#define TupleSortUseBumpTupleCxt(opt) (((opt) & TUPLESORT_ALLOWBOUNDED) == 0)
+
typedef struct TuplesortInstrumentation
{
TuplesortMethod sortMethod; /* sort algorithm used */
* The objects we actually sort are SortTuple structs. These contain
* a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
* which is a separate palloc chunk --- we assume it is just one chunk and
- * can be freed by a simple pfree() (except during merge, when we use a
- * simple slab allocator). SortTuples also contain the tuple's first key
- * column in Datum/nullflag format, and a source/input tape number that
- * tracks which tape each heap element/slot belongs to during merging.
+ * can be freed by a simple pfree() (except during merge, where we use a
+ * simple slab allocator, and during a non-bounded sort where we use a bump
+ * allocator). SortTuples also contain the tuple's first key column in
+ * Datum/nullflag format, and a source/input tape number that tracks which
+ * tape each heap element/slot belongs to during merging.
*
* Storing the first key column lets us save heap_getattr or index_getattr
* calls during tuple comparisons. We could extract and save all the key
extern void tuplesort_set_bound(Tuplesortstate *state, int64 bound);
extern bool tuplesort_used_bound(Tuplesortstate *state);
extern void tuplesort_puttuple_common(Tuplesortstate *state,
- SortTuple *tuple, bool useAbbrev);
+ SortTuple *tuple, bool useAbbrev,
+ Size tuplen);
extern void tuplesort_performsort(Tuplesortstate *state);
extern bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
SortTuple *stup);