20 | 0 | AAA020
(10 rows)
-SET enable_resultcache TO off;
+SET enable_memoize TO off;
-- right outer join + left outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
20 | 0 | AAA020
(10 rows)
-RESET enable_resultcache;
+RESET enable_memoize;
-- left outer join + right outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
Output: t1."C 1"
-> Index Scan using t1_pkey on "S 1"."T 1" t1
Output: t1."C 1", t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
- -> Result Cache
+ -> Memoize
Cache Key: t1.c2
-> Subquery Scan on q
-> HashAggregate
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
-SET enable_resultcache TO off;
+SET enable_memoize TO off;
-- right outer join + left outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
-RESET enable_resultcache;
+RESET enable_memoize;
-- left outer join + right outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
</listitem>
</varlistentry>
- <varlistentry id="guc-enable-resultcache" xreflabel="enable_resultcache">
- <term><varname>enable_resultcache</varname> (<type>boolean</type>)
+ <varlistentry id="guc-enable-memoize" xreflabel="enable_memoize">
+ <term><varname>enable_memoize</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_resultcache</varname> configuration parameter</primary>
+ <primary><varname>enable_memoize</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- Enables or disables the query planner's use of result cache plans for
+ Enables or disables the query planner's use of memoize plans for
caching results from parameterized scans inside nested-loop joins.
This plan type allows scans to the underlying plans to be skipped when
the results for the current parameters are already in the cache. Less
static void show_incremental_sort_info(IncrementalSortState *incrsortstate,
ExplainState *es);
static void show_hash_info(HashState *hashstate, ExplainState *es);
-static void show_resultcache_info(ResultCacheState *rcstate, List *ancestors,
- ExplainState *es);
+static void show_memoize_info(MemoizeState *mstate, List *ancestors,
+ ExplainState *es);
static void show_hashagg_info(AggState *hashstate, ExplainState *es);
static void show_tidbitmap_info(BitmapHeapScanState *planstate,
ExplainState *es);
case T_Material:
pname = sname = "Materialize";
break;
- case T_ResultCache:
- pname = sname = "Result Cache";
+ case T_Memoize:
+ pname = sname = "Memoize";
break;
case T_Sort:
pname = sname = "Sort";
case T_Hash:
show_hash_info(castNode(HashState, planstate), es);
break;
- case T_ResultCache:
- show_resultcache_info(castNode(ResultCacheState, planstate),
- ancestors, es);
+ case T_Memoize:
+ show_memoize_info(castNode(MemoizeState, planstate), ancestors,
+ es);
break;
default:
break;
}
/*
- * Show information on result cache hits/misses/evictions and memory usage.
+ * Show information on memoize hits/misses/evictions and memory usage.
*/
static void
-show_resultcache_info(ResultCacheState *rcstate, List *ancestors,
- ExplainState *es)
+show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
{
- Plan *plan = ((PlanState *) rcstate)->plan;
+ Plan *plan = ((PlanState *) mstate)->plan;
ListCell *lc;
List *context;
StringInfoData keystr;
initStringInfo(&keystr);
/*
- * It's hard to imagine having a result cache with fewer than 2 RTEs, but
+ * It's hard to imagine having a memoize node with fewer than 2 RTEs, but
* let's just keep the same useprefix logic as elsewhere in this file.
*/
useprefix = list_length(es->rtable) > 1 || es->verbose;
plan,
ancestors);
- foreach(lc, ((ResultCache *) plan)->param_exprs)
+ foreach(lc, ((Memoize *) plan)->param_exprs)
{
Node *expr = (Node *) lfirst(lc);
if (!es->analyze)
return;
- if (rcstate->stats.cache_misses > 0)
+ if (mstate->stats.cache_misses > 0)
{
/*
* mem_peak is only set when we freed memory, so we must use mem_used
* when mem_peak is 0.
*/
- if (rcstate->stats.mem_peak > 0)
- memPeakKb = (rcstate->stats.mem_peak + 1023) / 1024;
+ if (mstate->stats.mem_peak > 0)
+ memPeakKb = (mstate->stats.mem_peak + 1023) / 1024;
else
- memPeakKb = (rcstate->mem_used + 1023) / 1024;
+ memPeakKb = (mstate->mem_used + 1023) / 1024;
if (es->format != EXPLAIN_FORMAT_TEXT)
{
- ExplainPropertyInteger("Cache Hits", NULL, rcstate->stats.cache_hits, es);
- ExplainPropertyInteger("Cache Misses", NULL, rcstate->stats.cache_misses, es);
- ExplainPropertyInteger("Cache Evictions", NULL, rcstate->stats.cache_evictions, es);
- ExplainPropertyInteger("Cache Overflows", NULL, rcstate->stats.cache_overflows, es);
+ ExplainPropertyInteger("Cache Hits", NULL, mstate->stats.cache_hits, es);
+ ExplainPropertyInteger("Cache Misses", NULL, mstate->stats.cache_misses, es);
+ ExplainPropertyInteger("Cache Evictions", NULL, mstate->stats.cache_evictions, es);
+ ExplainPropertyInteger("Cache Overflows", NULL, mstate->stats.cache_overflows, es);
ExplainPropertyInteger("Peak Memory Usage", "kB", memPeakKb, es);
}
else
ExplainIndentText(es);
appendStringInfo(es->str,
"Hits: " UINT64_FORMAT " Misses: " UINT64_FORMAT " Evictions: " UINT64_FORMAT " Overflows: " UINT64_FORMAT " Memory Usage: " INT64_FORMAT "kB\n",
- rcstate->stats.cache_hits,
- rcstate->stats.cache_misses,
- rcstate->stats.cache_evictions,
- rcstate->stats.cache_overflows,
+ mstate->stats.cache_hits,
+ mstate->stats.cache_misses,
+ mstate->stats.cache_evictions,
+ mstate->stats.cache_overflows,
memPeakKb);
}
}
- if (rcstate->shared_info == NULL)
+ if (mstate->shared_info == NULL)
return;
/* Show details from parallel workers */
- for (int n = 0; n < rcstate->shared_info->num_workers; n++)
+ for (int n = 0; n < mstate->shared_info->num_workers; n++)
{
- ResultCacheInstrumentation *si;
+ MemoizeInstrumentation *si;
- si = &rcstate->shared_info->sinstrument[n];
+ si = &mstate->shared_info->sinstrument[n];
/*
* Skip workers that didn't do any work. We needn't bother checking
ExplainOpenWorker(n, es);
/*
- * Since the worker's ResultCacheState.mem_used field is unavailable
- * to us, ExecEndResultCache will have set the
- * ResultCacheInstrumentation.mem_peak field for us. No need to do
- * the zero checks like we did for the serial case above.
+ * Since the worker's MemoizeState.mem_used field is unavailable to
+ * us, ExecEndMemoize will have set the
+ * MemoizeInstrumentation.mem_peak field for us. No need to do the
+ * zero checks like we did for the serial case above.
*/
memPeakKb = (si->mem_peak + 1023) / 1024;
nodeLimit.o \
nodeLockRows.o \
nodeMaterial.o \
+ nodeMemoize.o \
nodeMergeAppend.o \
nodeMergejoin.o \
nodeModifyTable.o \
nodeProjectSet.o \
nodeRecursiveunion.o \
nodeResult.o \
- nodeResultCache.o \
nodeSamplescan.o \
nodeSeqscan.o \
nodeSetOp.o \
#include "executor/nodeLimit.h"
#include "executor/nodeLockRows.h"
#include "executor/nodeMaterial.h"
+#include "executor/nodeMemoize.h"
#include "executor/nodeMergeAppend.h"
#include "executor/nodeMergejoin.h"
#include "executor/nodeModifyTable.h"
#include "executor/nodeProjectSet.h"
#include "executor/nodeRecursiveunion.h"
#include "executor/nodeResult.h"
-#include "executor/nodeResultCache.h"
#include "executor/nodeSamplescan.h"
#include "executor/nodeSeqscan.h"
#include "executor/nodeSetOp.h"
ExecReScanMaterial((MaterialState *) node);
break;
- case T_ResultCacheState:
- ExecReScanResultCache((ResultCacheState *) node);
+ case T_MemoizeState:
+ ExecReScanMemoize((MemoizeState *) node);
break;
case T_SortState:
#include "executor/nodeIncrementalSort.h"
#include "executor/nodeIndexonlyscan.h"
#include "executor/nodeIndexscan.h"
-#include "executor/nodeResultCache.h"
+#include "executor/nodeMemoize.h"
#include "executor/nodeSeqscan.h"
#include "executor/nodeSort.h"
#include "executor/nodeSubplan.h"
/* even when not parallel-aware, for EXPLAIN ANALYZE */
ExecAggEstimate((AggState *) planstate, e->pcxt);
break;
- case T_ResultCacheState:
+ case T_MemoizeState:
/* even when not parallel-aware, for EXPLAIN ANALYZE */
- ExecResultCacheEstimate((ResultCacheState *) planstate, e->pcxt);
+ ExecMemoizeEstimate((MemoizeState *) planstate, e->pcxt);
break;
default:
break;
/* even when not parallel-aware, for EXPLAIN ANALYZE */
ExecAggInitializeDSM((AggState *) planstate, d->pcxt);
break;
- case T_ResultCacheState:
+ case T_MemoizeState:
/* even when not parallel-aware, for EXPLAIN ANALYZE */
- ExecResultCacheInitializeDSM((ResultCacheState *) planstate, d->pcxt);
+ ExecMemoizeInitializeDSM((MemoizeState *) planstate, d->pcxt);
break;
default:
break;
case T_HashState:
case T_SortState:
case T_IncrementalSortState:
- case T_ResultCacheState:
+ case T_MemoizeState:
/* these nodes have DSM state, but no reinitialization is required */
break;
case T_AggState:
ExecAggRetrieveInstrumentation((AggState *) planstate);
break;
- case T_ResultCacheState:
- ExecResultCacheRetrieveInstrumentation((ResultCacheState *) planstate);
+ case T_MemoizeState:
+ ExecMemoizeRetrieveInstrumentation((MemoizeState *) planstate);
break;
default:
break;
/* even when not parallel-aware, for EXPLAIN ANALYZE */
ExecAggInitializeWorker((AggState *) planstate, pwcxt);
break;
- case T_ResultCacheState:
+ case T_MemoizeState:
/* even when not parallel-aware, for EXPLAIN ANALYZE */
- ExecResultCacheInitializeWorker((ResultCacheState *) planstate,
- pwcxt);
+ ExecMemoizeInitializeWorker((MemoizeState *) planstate, pwcxt);
break;
default:
break;
#include "executor/nodeLimit.h"
#include "executor/nodeLockRows.h"
#include "executor/nodeMaterial.h"
+#include "executor/nodeMemoize.h"
#include "executor/nodeMergeAppend.h"
#include "executor/nodeMergejoin.h"
#include "executor/nodeModifyTable.h"
#include "executor/nodeProjectSet.h"
#include "executor/nodeRecursiveunion.h"
#include "executor/nodeResult.h"
-#include "executor/nodeResultCache.h"
#include "executor/nodeSamplescan.h"
#include "executor/nodeSeqscan.h"
#include "executor/nodeSetOp.h"
estate, eflags);
break;
- case T_ResultCache:
- result = (PlanState *) ExecInitResultCache((ResultCache *) node,
- estate, eflags);
+ case T_Memoize:
+ result = (PlanState *) ExecInitMemoize((Memoize *) node, estate,
+ eflags);
break;
case T_Group:
ExecEndIncrementalSort((IncrementalSortState *) node);
break;
- case T_ResultCacheState:
- ExecEndResultCache((ResultCacheState *) node);
+ case T_MemoizeState:
+ ExecEndMemoize((MemoizeState *) node);
break;
case T_GroupState:
/*-------------------------------------------------------------------------
*
- * nodeResultCache.c
+ * nodeMemoize.c
* Routines to handle caching of results from parameterized nodes
*
* Portions Copyright (c) 2021, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
- * src/backend/executor/nodeResultCache.c
+ * src/backend/executor/nodeMemoize.c
*
- * ResultCache nodes are intended to sit above parameterized nodes in the plan
+ * Memoize nodes are intended to sit above parameterized nodes in the plan
* tree in order to cache results from them. The intention here is that a
* repeat scan with a parameter value that has already been seen by the node
* can fetch tuples from the cache rather than having to re-scan the outer
* happens then we'll have already evicted all other cache entries. When
* caching another tuple would cause us to exceed our memory budget, we must
* free the entry that we're currently populating and move the state machine
- * into RC_CACHE_BYPASS_MODE. This means that we'll not attempt to cache any
- * further tuples for this particular scan. We don't have the memory for it.
- * The state machine will be reset again on the next rescan. If the memory
- * requirements to cache the next parameter's tuples are less demanding, then
- * that may allow us to start putting useful entries back into the cache
- * again.
+ * into MEMO_CACHE_BYPASS_MODE. This means that we'll not attempt to cache
+ * any further tuples for this particular scan. We don't have the memory for
+ * it. The state machine will be reset again on the next rescan. If the
+ * memory requirements to cache the next parameter's tuples are less
+ * demanding, then that may allow us to start putting useful entries back into
+ * the cache again.
*
*
* INTERFACE ROUTINES
- * ExecResultCache - lookup cache, exec subplan when not found
- * ExecInitResultCache - initialize node and subnodes
- * ExecEndResultCache - shutdown node and subnodes
- * ExecReScanResultCache - rescan the result cache
+ * ExecMemoize - lookup cache, exec subplan when not found
+ * ExecInitMemoize - initialize node and subnodes
+ * ExecEndMemoize - shutdown node and subnodes
+ * ExecReScanMemoize - rescan the memoize node
*
- * ExecResultCacheEstimate estimates DSM space needed for parallel plan
- * ExecResultCacheInitializeDSM initialize DSM for parallel plan
- * ExecResultCacheInitializeWorker attach to DSM info in parallel worker
- * ExecResultCacheRetrieveInstrumentation get instrumentation from worker
+ * ExecMemoizeEstimate estimates DSM space needed for parallel plan
+ * ExecMemoizeInitializeDSM initialize DSM for parallel plan
+ * ExecMemoizeInitializeWorker attach to DSM info in parallel worker
+ * ExecMemoizeRetrieveInstrumentation get instrumentation from worker
*-------------------------------------------------------------------------
*/
#include "common/hashfn.h"
#include "executor/executor.h"
-#include "executor/nodeResultCache.h"
+#include "executor/nodeMemoize.h"
#include "lib/ilist.h"
#include "miscadmin.h"
#include "utils/lsyscache.h"
-/* States of the ExecResultCache state machine */
-#define RC_CACHE_LOOKUP 1 /* Attempt to perform a cache lookup */
-#define RC_CACHE_FETCH_NEXT_TUPLE 2 /* Get another tuple from the cache */
-#define RC_FILLING_CACHE 3 /* Read outer node to fill cache */
-#define RC_CACHE_BYPASS_MODE 4 /* Bypass mode. Just read from our
+/* States of the ExecMemoize state machine */
+#define MEMO_CACHE_LOOKUP 1 /* Attempt to perform a cache lookup */
+#define MEMO_CACHE_FETCH_NEXT_TUPLE 2 /* Get another tuple from the cache */
+#define MEMO_FILLING_CACHE 3 /* Read outer node to fill cache */
+#define MEMO_CACHE_BYPASS_MODE 4 /* Bypass mode. Just read from our
* subplan without caching anything */
-#define RC_END_OF_SCAN 5 /* Ready for rescan */
+#define MEMO_END_OF_SCAN 5 /* Ready for rescan */
/* Helper macros for memory accounting */
-#define EMPTY_ENTRY_MEMORY_BYTES(e) (sizeof(ResultCacheEntry) + \
- sizeof(ResultCacheKey) + \
+#define EMPTY_ENTRY_MEMORY_BYTES(e) (sizeof(MemoizeEntry) + \
+ sizeof(MemoizeKey) + \
(e)->key->params->t_len);
-#define CACHE_TUPLE_BYTES(t) (sizeof(ResultCacheTuple) + \
+#define CACHE_TUPLE_BYTES(t) (sizeof(MemoizeTuple) + \
(t)->mintuple->t_len)
- /* ResultCacheTuple Stores an individually cached tuple */
-typedef struct ResultCacheTuple
+ /* MemoizeTuple Stores an individually cached tuple */
+typedef struct MemoizeTuple
{
MinimalTuple mintuple; /* Cached tuple */
- struct ResultCacheTuple *next; /* The next tuple with the same parameter
- * values or NULL if it's the last one */
-} ResultCacheTuple;
+ struct MemoizeTuple *next; /* The next tuple with the same parameter
+ * values or NULL if it's the last one */
+} MemoizeTuple;
/*
- * ResultCacheKey
+ * MemoizeKey
* The hash table key for cached entries plus the LRU list link
*/
-typedef struct ResultCacheKey
+typedef struct MemoizeKey
{
MinimalTuple params;
dlist_node lru_node; /* Pointer to next/prev key in LRU list */
-} ResultCacheKey;
+} MemoizeKey;
/*
- * ResultCacheEntry
+ * MemoizeEntry
* The data struct that the cache hash table stores
*/
-typedef struct ResultCacheEntry
+typedef struct MemoizeEntry
{
- ResultCacheKey *key; /* Hash key for hash table lookups */
- ResultCacheTuple *tuplehead; /* Pointer to the first tuple or NULL if
- * no tuples are cached for this entry */
+ MemoizeKey *key; /* Hash key for hash table lookups */
+ MemoizeTuple *tuplehead; /* Pointer to the first tuple or NULL if no
+ * tuples are cached for this entry */
uint32 hash; /* Hash value (cached) */
char status; /* Hash status */
bool complete; /* Did we read the outer plan to completion? */
-} ResultCacheEntry;
+} MemoizeEntry;
-#define SH_PREFIX resultcache
-#define SH_ELEMENT_TYPE ResultCacheEntry
-#define SH_KEY_TYPE ResultCacheKey *
+#define SH_PREFIX memoize
+#define SH_ELEMENT_TYPE MemoizeEntry
+#define SH_KEY_TYPE MemoizeKey *
#define SH_SCOPE static inline
#define SH_DECLARE
#include "lib/simplehash.h"
-static uint32 ResultCacheHash_hash(struct resultcache_hash *tb,
- const ResultCacheKey *key);
-static int ResultCacheHash_equal(struct resultcache_hash *tb,
- const ResultCacheKey *params1,
- const ResultCacheKey *params2);
+static uint32 MemoizeHash_hash(struct memoize_hash *tb,
+ const MemoizeKey *key);
+static int MemoizeHash_equal(struct memoize_hash *tb,
+ const MemoizeKey *params1,
+ const MemoizeKey *params2);
-#define SH_PREFIX resultcache
-#define SH_ELEMENT_TYPE ResultCacheEntry
-#define SH_KEY_TYPE ResultCacheKey *
+#define SH_PREFIX memoize
+#define SH_ELEMENT_TYPE MemoizeEntry
+#define SH_KEY_TYPE MemoizeKey *
#define SH_KEY key
-#define SH_HASH_KEY(tb, key) ResultCacheHash_hash(tb, key)
-#define SH_EQUAL(tb, a, b) (ResultCacheHash_equal(tb, a, b) == 0)
+#define SH_HASH_KEY(tb, key) MemoizeHash_hash(tb, key)
+#define SH_EQUAL(tb, a, b) (MemoizeHash_equal(tb, a, b) == 0)
#define SH_SCOPE static inline
#define SH_STORE_HASH
#define SH_GET_HASH(tb, a) a->hash
#include "lib/simplehash.h"
/*
- * ResultCacheHash_hash
+ * MemoizeHash_hash
* Hash function for simplehash hashtable. 'key' is unused here as we
- * require that all table lookups first populate the ResultCacheState's
+ * require that all table lookups first populate the MemoizeState's
* probeslot with the key values to be looked up.
*/
static uint32
-ResultCacheHash_hash(struct resultcache_hash *tb, const ResultCacheKey *key)
+MemoizeHash_hash(struct memoize_hash *tb, const MemoizeKey *key)
{
- ResultCacheState *rcstate = (ResultCacheState *) tb->private_data;
- TupleTableSlot *pslot = rcstate->probeslot;
+ MemoizeState *mstate = (MemoizeState *) tb->private_data;
+ TupleTableSlot *pslot = mstate->probeslot;
uint32 hashkey = 0;
- int numkeys = rcstate->nkeys;
- FmgrInfo *hashfunctions = rcstate->hashfunctions;
- Oid *collations = rcstate->collations;
+ int numkeys = mstate->nkeys;
+ FmgrInfo *hashfunctions = mstate->hashfunctions;
+ Oid *collations = mstate->collations;
for (int i = 0; i < numkeys; i++)
{
}
/*
- * ResultCacheHash_equal
+ * MemoizeHash_equal
* Equality function for confirming hash value matches during a hash
- * table lookup. 'key2' is never used. Instead the ResultCacheState's
+ * table lookup. 'key2' is never used. Instead the MemoizeState's
* probeslot is always populated with details of what's being looked up.
*/
static int
-ResultCacheHash_equal(struct resultcache_hash *tb, const ResultCacheKey *key1,
- const ResultCacheKey *key2)
+MemoizeHash_equal(struct memoize_hash *tb, const MemoizeKey *key1,
+ const MemoizeKey *key2)
{
- ResultCacheState *rcstate = (ResultCacheState *) tb->private_data;
- ExprContext *econtext = rcstate->ss.ps.ps_ExprContext;
- TupleTableSlot *tslot = rcstate->tableslot;
- TupleTableSlot *pslot = rcstate->probeslot;
+ MemoizeState *mstate = (MemoizeState *) tb->private_data;
+ ExprContext *econtext = mstate->ss.ps.ps_ExprContext;
+ TupleTableSlot *tslot = mstate->tableslot;
+ TupleTableSlot *pslot = mstate->probeslot;
/* probeslot should have already been prepared by prepare_probe_slot() */
-
ExecStoreMinimalTuple(key1->params, tslot, false);
econtext->ecxt_innertuple = tslot;
econtext->ecxt_outertuple = pslot;
- return !ExecQualAndReset(rcstate->cache_eq_expr, econtext);
+ return !ExecQualAndReset(mstate->cache_eq_expr, econtext);
}
/*
* Initialize the hash table to empty.
*/
static void
-build_hash_table(ResultCacheState *rcstate, uint32 size)
+build_hash_table(MemoizeState *mstate, uint32 size)
{
/* Make a guess at a good size when we're not given a valid size. */
if (size == 0)
size = 1024;
- /* resultcache_create will convert the size to a power of 2 */
- rcstate->hashtable = resultcache_create(rcstate->tableContext, size,
- rcstate);
+ /* memoize_create will convert the size to a power of 2 */
+ mstate->hashtable = memoize_create(mstate->tableContext, size, mstate);
}
/*
* prepare_probe_slot
- * Populate rcstate's probeslot with the values from the tuple stored
+ * Populate mstate's probeslot with the values from the tuple stored
* in 'key'. If 'key' is NULL, then perform the population by evaluating
- * rcstate's param_exprs.
+ * mstate's param_exprs.
*/
static inline void
-prepare_probe_slot(ResultCacheState *rcstate, ResultCacheKey *key)
+prepare_probe_slot(MemoizeState *mstate, MemoizeKey *key)
{
- TupleTableSlot *pslot = rcstate->probeslot;
- TupleTableSlot *tslot = rcstate->tableslot;
- int numKeys = rcstate->nkeys;
+ TupleTableSlot *pslot = mstate->probeslot;
+ TupleTableSlot *tslot = mstate->tableslot;
+ int numKeys = mstate->nkeys;
ExecClearTuple(pslot);
{
/* Set the probeslot's values based on the current parameter values */
for (int i = 0; i < numKeys; i++)
- pslot->tts_values[i] = ExecEvalExpr(rcstate->param_exprs[i],
- rcstate->ss.ps.ps_ExprContext,
+ pslot->tts_values[i] = ExecEvalExpr(mstate->param_exprs[i],
+ mstate->ss.ps.ps_ExprContext,
&pslot->tts_isnull[i]);
}
else
* reflect the removal of the tuples.
*/
static inline void
-entry_purge_tuples(ResultCacheState *rcstate, ResultCacheEntry *entry)
+entry_purge_tuples(MemoizeState *mstate, MemoizeEntry *entry)
{
- ResultCacheTuple *tuple = entry->tuplehead;
+ MemoizeTuple *tuple = entry->tuplehead;
uint64 freed_mem = 0;
while (tuple != NULL)
{
- ResultCacheTuple *next = tuple->next;
+ MemoizeTuple *next = tuple->next;
freed_mem += CACHE_TUPLE_BYTES(tuple);
entry->tuplehead = NULL;
/* Update the memory accounting */
- rcstate->mem_used -= freed_mem;
+ mstate->mem_used -= freed_mem;
}
/*
* Remove 'entry' from the cache and free memory used by it.
*/
static void
-remove_cache_entry(ResultCacheState *rcstate, ResultCacheEntry *entry)
+remove_cache_entry(MemoizeState *mstate, MemoizeEntry *entry)
{
- ResultCacheKey *key = entry->key;
+ MemoizeKey *key = entry->key;
dlist_delete(&entry->key->lru_node);
/* Remove all of the tuples from this entry */
- entry_purge_tuples(rcstate, entry);
+ entry_purge_tuples(mstate, entry);
/*
* Update memory accounting. entry_purge_tuples should have already
* subtracted the memory used for each cached tuple. Here we just update
* the amount used by the entry itself.
*/
- rcstate->mem_used -= EMPTY_ENTRY_MEMORY_BYTES(entry);
+ mstate->mem_used -= EMPTY_ENTRY_MEMORY_BYTES(entry);
/* Remove the entry from the cache */
- resultcache_delete_item(rcstate->hashtable, entry);
+ memoize_delete_item(mstate->hashtable, entry);
pfree(key->params);
pfree(key);
* cache_reduce_memory
* Evict older and less recently used items from the cache in order to
* reduce the memory consumption back to something below the
- * ResultCacheState's mem_limit.
+ * MemoizeState's mem_limit.
*
* 'specialkey', if not NULL, causes the function to return false if the entry
* which the key belongs to is removed from the cache.
*/
static bool
-cache_reduce_memory(ResultCacheState *rcstate, ResultCacheKey *specialkey)
+cache_reduce_memory(MemoizeState *mstate, MemoizeKey *specialkey)
{
bool specialkey_intact = true; /* for now */
dlist_mutable_iter iter;
uint64 evictions = 0;
/* Update peak memory usage */
- if (rcstate->mem_used > rcstate->stats.mem_peak)
- rcstate->stats.mem_peak = rcstate->mem_used;
+ if (mstate->mem_used > mstate->stats.mem_peak)
+ mstate->stats.mem_peak = mstate->mem_used;
/* We expect only to be called when we've gone over budget on memory */
- Assert(rcstate->mem_used > rcstate->mem_limit);
+ Assert(mstate->mem_used > mstate->mem_limit);
/* Start the eviction process starting at the head of the LRU list. */
- dlist_foreach_modify(iter, &rcstate->lru_list)
+ dlist_foreach_modify(iter, &mstate->lru_list)
{
- ResultCacheKey *key = dlist_container(ResultCacheKey, lru_node,
- iter.cur);
- ResultCacheEntry *entry;
+ MemoizeKey *key = dlist_container(MemoizeKey, lru_node, iter.cur);
+ MemoizeEntry *entry;
/*
* Populate the hash probe slot in preparation for looking up this LRU
* entry.
*/
- prepare_probe_slot(rcstate, key);
+ prepare_probe_slot(mstate, key);
/*
* Ideally the LRU list pointers would be stored in the entry itself
* pointer to the key here, we must perform a hash table lookup to
* find the entry that the key belongs to.
*/
- entry = resultcache_lookup(rcstate->hashtable, NULL);
+ entry = memoize_lookup(mstate->hashtable, NULL);
/* A good spot to check for corruption of the table and LRU list. */
Assert(entry != NULL);
/*
* Finally remove the entry. This will remove from the LRU list too.
*/
- remove_cache_entry(rcstate, entry);
+ remove_cache_entry(mstate, entry);
evictions++;
/* Exit if we've freed enough memory */
- if (rcstate->mem_used <= rcstate->mem_limit)
+ if (mstate->mem_used <= mstate->mem_limit)
break;
}
- rcstate->stats.cache_evictions += evictions; /* Update Stats */
+ mstate->stats.cache_evictions += evictions; /* Update Stats */
return specialkey_intact;
}
/*
* cache_lookup
- * Perform a lookup to see if we've already cached results based on the
+ * Perform a lookup to see if we've already cached tuples based on the
* scan's current parameters. If we find an existing entry we move it to
* the end of the LRU list, set *found to true then return it. If we
* don't find an entry then we create a new one and add it to the end of
*
* Callers can assume we'll never return NULL when *found is true.
*/
-static ResultCacheEntry *
-cache_lookup(ResultCacheState *rcstate, bool *found)
+static MemoizeEntry *
+cache_lookup(MemoizeState *mstate, bool *found)
{
- ResultCacheKey *key;
- ResultCacheEntry *entry;
+ MemoizeKey *key;
+ MemoizeEntry *entry;
MemoryContext oldcontext;
/* prepare the probe slot with the current scan parameters */
- prepare_probe_slot(rcstate, NULL);
+ prepare_probe_slot(mstate, NULL);
/*
* Add the new entry to the cache. No need to pass a valid key since the
- * hash function uses rcstate's probeslot, which we populated above.
+ * hash function uses mstate's probeslot, which we populated above.
*/
- entry = resultcache_insert(rcstate->hashtable, NULL, found);
+ entry = memoize_insert(mstate->hashtable, NULL, found);
if (*found)
{
* Move existing entry to the tail of the LRU list to mark it as the
* most recently used item.
*/
- dlist_move_tail(&rcstate->lru_list, &entry->key->lru_node);
+ dlist_move_tail(&mstate->lru_list, &entry->key->lru_node);
return entry;
}
- oldcontext = MemoryContextSwitchTo(rcstate->tableContext);
+ oldcontext = MemoryContextSwitchTo(mstate->tableContext);
/* Allocate a new key */
- entry->key = key = (ResultCacheKey *) palloc(sizeof(ResultCacheKey));
- key->params = ExecCopySlotMinimalTuple(rcstate->probeslot);
+ entry->key = key = (MemoizeKey *) palloc(sizeof(MemoizeKey));
+ key->params = ExecCopySlotMinimalTuple(mstate->probeslot);
/* Update the total cache memory utilization */
- rcstate->mem_used += EMPTY_ENTRY_MEMORY_BYTES(entry);
+ mstate->mem_used += EMPTY_ENTRY_MEMORY_BYTES(entry);
/* Initialize this entry */
entry->complete = false;
* Since this is the most recently used entry, push this entry onto the
* end of the LRU list.
*/
- dlist_push_tail(&rcstate->lru_list, &entry->key->lru_node);
+ dlist_push_tail(&mstate->lru_list, &entry->key->lru_node);
- rcstate->last_tuple = NULL;
+ mstate->last_tuple = NULL;
MemoryContextSwitchTo(oldcontext);
* If we've gone over our memory budget, then we'll free up some space in
* the cache.
*/
- if (rcstate->mem_used > rcstate->mem_limit)
+ if (mstate->mem_used > mstate->mem_limit)
{
/*
* Try to free up some memory. It's highly unlikely that we'll fail
* any tuples and we're able to remove any other entry to reduce the
* memory consumption.
*/
- if (unlikely(!cache_reduce_memory(rcstate, key)))
+ if (unlikely(!cache_reduce_memory(mstate, key)))
return NULL;
/*
* happened by seeing if the entry is still in use and that the key
* pointer matches our expected key.
*/
- if (entry->status != resultcache_SH_IN_USE || entry->key != key)
+ if (entry->status != memoize_SH_IN_USE || entry->key != key)
{
/*
* We need to repopulate the probeslot as lookups performed during
* the cache evictions above will have stored some other key.
*/
- prepare_probe_slot(rcstate, key);
+ prepare_probe_slot(mstate, key);
/* Re-find the newly added entry */
- entry = resultcache_lookup(rcstate->hashtable, NULL);
+ entry = memoize_lookup(mstate->hashtable, NULL);
Assert(entry != NULL);
}
}
/*
* cache_store_tuple
- * Add the tuple stored in 'slot' to the rcstate's current cache entry.
+ * Add the tuple stored in 'slot' to the mstate's current cache entry.
* The cache entry must have already been made with cache_lookup().
- * rcstate's last_tuple field must point to the tail of rcstate->entry's
+ * mstate's last_tuple field must point to the tail of mstate->entry's
* list of tuples.
*/
static bool
-cache_store_tuple(ResultCacheState *rcstate, TupleTableSlot *slot)
+cache_store_tuple(MemoizeState *mstate, TupleTableSlot *slot)
{
- ResultCacheTuple *tuple;
- ResultCacheEntry *entry = rcstate->entry;
+ MemoizeTuple *tuple;
+ MemoizeEntry *entry = mstate->entry;
MemoryContext oldcontext;
Assert(slot != NULL);
Assert(entry != NULL);
- oldcontext = MemoryContextSwitchTo(rcstate->tableContext);
+ oldcontext = MemoryContextSwitchTo(mstate->tableContext);
- tuple = (ResultCacheTuple *) palloc(sizeof(ResultCacheTuple));
+ tuple = (MemoizeTuple *) palloc(sizeof(MemoizeTuple));
tuple->mintuple = ExecCopySlotMinimalTuple(slot);
tuple->next = NULL;
/* Account for the memory we just consumed */
- rcstate->mem_used += CACHE_TUPLE_BYTES(tuple);
+ mstate->mem_used += CACHE_TUPLE_BYTES(tuple);
if (entry->tuplehead == NULL)
{
else
{
/* push this tuple onto the tail of the list */
- rcstate->last_tuple->next = tuple;
+ mstate->last_tuple->next = tuple;
}
- rcstate->last_tuple = tuple;
+ mstate->last_tuple = tuple;
MemoryContextSwitchTo(oldcontext);
/*
* If we've gone over our memory budget then free up some space in the
* cache.
*/
- if (rcstate->mem_used > rcstate->mem_limit)
+ if (mstate->mem_used > mstate->mem_limit)
{
- ResultCacheKey *key = entry->key;
+ MemoizeKey *key = entry->key;
- if (!cache_reduce_memory(rcstate, key))
+ if (!cache_reduce_memory(mstate, key))
return false;
/*
* happened by seeing if the entry is still in use and that the key
* pointer matches our expected key.
*/
- if (entry->status != resultcache_SH_IN_USE || entry->key != key)
+ if (entry->status != memoize_SH_IN_USE || entry->key != key)
{
/*
* We need to repopulate the probeslot as lookups performed during
* the cache evictions above will have stored some other key.
*/
- prepare_probe_slot(rcstate, key);
+ prepare_probe_slot(mstate, key);
/* Re-find the entry */
- rcstate->entry = entry = resultcache_lookup(rcstate->hashtable,
- NULL);
+ mstate->entry = entry = memoize_lookup(mstate->hashtable, NULL);
Assert(entry != NULL);
}
}
}
static TupleTableSlot *
-ExecResultCache(PlanState *pstate)
+ExecMemoize(PlanState *pstate)
{
- ResultCacheState *node = castNode(ResultCacheState, pstate);
+ MemoizeState *node = castNode(MemoizeState, pstate);
PlanState *outerNode;
TupleTableSlot *slot;
- switch (node->rc_status)
+ switch (node->mstatus)
{
- case RC_CACHE_LOOKUP:
+ case MEMO_CACHE_LOOKUP:
{
- ResultCacheEntry *entry;
+ MemoizeEntry *entry;
TupleTableSlot *outerslot;
bool found;
/*
* Set last_tuple and entry so that the state
- * RC_CACHE_FETCH_NEXT_TUPLE can easily find the next
+ * MEMO_CACHE_FETCH_NEXT_TUPLE can easily find the next
* tuple for these parameters.
*/
node->last_tuple = entry->tuplehead;
/* Fetch the first cached tuple, if there is one */
if (entry->tuplehead)
{
- node->rc_status = RC_CACHE_FETCH_NEXT_TUPLE;
+ node->mstatus = MEMO_CACHE_FETCH_NEXT_TUPLE;
slot = node->ss.ps.ps_ResultTupleSlot;
ExecStoreMinimalTuple(entry->tuplehead->mintuple,
}
/* The cache entry is void of any tuples. */
- node->rc_status = RC_END_OF_SCAN;
+ node->mstatus = MEMO_END_OF_SCAN;
return NULL;
}
* cache_lookup may have returned NULL due to failure to
* free enough cache space, so ensure we don't do anything
* here that assumes it worked. There's no need to go into
- * bypass mode here as we're setting rc_status to end of
+ * bypass mode here as we're setting mstatus to end of
* scan.
*/
if (likely(entry))
entry->complete = true;
- node->rc_status = RC_END_OF_SCAN;
+ node->mstatus = MEMO_END_OF_SCAN;
return NULL;
}
{
node->stats.cache_overflows += 1; /* stats update */
- node->rc_status = RC_CACHE_BYPASS_MODE;
+ node->mstatus = MEMO_CACHE_BYPASS_MODE;
/*
* No need to clear out last_tuple as we'll stay in bypass
* executed to completion.
*/
entry->complete = node->singlerow;
- node->rc_status = RC_FILLING_CACHE;
+ node->mstatus = MEMO_FILLING_CACHE;
}
slot = node->ss.ps.ps_ResultTupleSlot;
return slot;
}
- case RC_CACHE_FETCH_NEXT_TUPLE:
+ case MEMO_CACHE_FETCH_NEXT_TUPLE:
{
/* We shouldn't be in this state if these are not set */
Assert(node->entry != NULL);
/* No more tuples in the cache */
if (node->last_tuple == NULL)
{
- node->rc_status = RC_END_OF_SCAN;
+ node->mstatus = MEMO_END_OF_SCAN;
return NULL;
}
return slot;
}
- case RC_FILLING_CACHE:
+ case MEMO_FILLING_CACHE:
{
TupleTableSlot *outerslot;
- ResultCacheEntry *entry = node->entry;
+ MemoizeEntry *entry = node->entry;
- /* entry should already have been set by RC_CACHE_LOOKUP */
+ /* entry should already have been set by MEMO_CACHE_LOOKUP */
Assert(entry != NULL);
/*
- * When in the RC_FILLING_CACHE state, we've just had a cache
- * miss and are populating the cache with the current scan
- * tuples.
+ * When in the MEMO_FILLING_CACHE state, we've just had a
+ * cache miss and are populating the cache with the current
+ * scan tuples.
*/
outerNode = outerPlanState(node);
outerslot = ExecProcNode(outerNode);
{
/* No more tuples. Mark it as complete */
entry->complete = true;
- node->rc_status = RC_END_OF_SCAN;
+ node->mstatus = MEMO_END_OF_SCAN;
return NULL;
}
/* Couldn't store it? Handle overflow */
node->stats.cache_overflows += 1; /* stats update */
- node->rc_status = RC_CACHE_BYPASS_MODE;
+ node->mstatus = MEMO_CACHE_BYPASS_MODE;
/*
* No need to clear out entry or last_tuple as we'll stay
return slot;
}
- case RC_CACHE_BYPASS_MODE:
+ case MEMO_CACHE_BYPASS_MODE:
{
TupleTableSlot *outerslot;
outerslot = ExecProcNode(outerNode);
if (TupIsNull(outerslot))
{
- node->rc_status = RC_END_OF_SCAN;
+ node->mstatus = MEMO_END_OF_SCAN;
return NULL;
}
return slot;
}
- case RC_END_OF_SCAN:
+ case MEMO_END_OF_SCAN:
/*
* We've already returned NULL for this scan, but just in case
return NULL;
default:
- elog(ERROR, "unrecognized resultcache state: %d",
- (int) node->rc_status);
+ elog(ERROR, "unrecognized memoize state: %d",
+ (int) node->mstatus);
return NULL;
} /* switch */
}
-ResultCacheState *
-ExecInitResultCache(ResultCache *node, EState *estate, int eflags)
+MemoizeState *
+ExecInitMemoize(Memoize *node, EState *estate, int eflags)
{
- ResultCacheState *rcstate = makeNode(ResultCacheState);
+ MemoizeState *mstate = makeNode(MemoizeState);
Plan *outerNode;
int i;
int nkeys;
/* check for unsupported flags */
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
- rcstate->ss.ps.plan = (Plan *) node;
- rcstate->ss.ps.state = estate;
- rcstate->ss.ps.ExecProcNode = ExecResultCache;
+ mstate->ss.ps.plan = (Plan *) node;
+ mstate->ss.ps.state = estate;
+ mstate->ss.ps.ExecProcNode = ExecMemoize;
/*
* Miscellaneous initialization
*
* create expression context for node
*/
- ExecAssignExprContext(estate, &rcstate->ss.ps);
+ ExecAssignExprContext(estate, &mstate->ss.ps);
outerNode = outerPlan(node);
- outerPlanState(rcstate) = ExecInitNode(outerNode, estate, eflags);
+ outerPlanState(mstate) = ExecInitNode(outerNode, estate, eflags);
/*
* Initialize return slot and type. No need to initialize projection info
* because this node doesn't do projections.
*/
- ExecInitResultTupleSlotTL(&rcstate->ss.ps, &TTSOpsMinimalTuple);
- rcstate->ss.ps.ps_ProjInfo = NULL;
+ ExecInitResultTupleSlotTL(&mstate->ss.ps, &TTSOpsMinimalTuple);
+ mstate->ss.ps.ps_ProjInfo = NULL;
/*
* Initialize scan slot and type.
*/
- ExecCreateScanSlotFromOuterPlan(estate, &rcstate->ss, &TTSOpsMinimalTuple);
+ ExecCreateScanSlotFromOuterPlan(estate, &mstate->ss, &TTSOpsMinimalTuple);
/*
* Set the state machine to lookup the cache. We won't find anything
* until we cache something, but this saves a special case to create the
* first entry.
*/
- rcstate->rc_status = RC_CACHE_LOOKUP;
+ mstate->mstatus = MEMO_CACHE_LOOKUP;
- rcstate->nkeys = nkeys = node->numKeys;
- rcstate->hashkeydesc = ExecTypeFromExprList(node->param_exprs);
- rcstate->tableslot = MakeSingleTupleTableSlot(rcstate->hashkeydesc,
- &TTSOpsMinimalTuple);
- rcstate->probeslot = MakeSingleTupleTableSlot(rcstate->hashkeydesc,
- &TTSOpsVirtual);
+ mstate->nkeys = nkeys = node->numKeys;
+ mstate->hashkeydesc = ExecTypeFromExprList(node->param_exprs);
+ mstate->tableslot = MakeSingleTupleTableSlot(mstate->hashkeydesc,
+ &TTSOpsMinimalTuple);
+ mstate->probeslot = MakeSingleTupleTableSlot(mstate->hashkeydesc,
+ &TTSOpsVirtual);
- rcstate->param_exprs = (ExprState **) palloc(nkeys * sizeof(ExprState *));
- rcstate->collations = node->collations; /* Just point directly to the plan
+ mstate->param_exprs = (ExprState **) palloc(nkeys * sizeof(ExprState *));
+ mstate->collations = node->collations; /* Just point directly to the plan
* data */
- rcstate->hashfunctions = (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
+ mstate->hashfunctions = (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
eqfuncoids = palloc(nkeys * sizeof(Oid));
elog(ERROR, "could not find hash function for hash operator %u",
hashop);
- fmgr_info(left_hashfn, &rcstate->hashfunctions[i]);
+ fmgr_info(left_hashfn, &mstate->hashfunctions[i]);
- rcstate->param_exprs[i] = ExecInitExpr(param_expr, (PlanState *) rcstate);
+ mstate->param_exprs[i] = ExecInitExpr(param_expr, (PlanState *) mstate);
eqfuncoids[i] = get_opcode(hashop);
}
- rcstate->cache_eq_expr = ExecBuildParamSetEqual(rcstate->hashkeydesc,
- &TTSOpsMinimalTuple,
- &TTSOpsVirtual,
- eqfuncoids,
- node->collations,
- node->param_exprs,
- (PlanState *) rcstate);
+ mstate->cache_eq_expr = ExecBuildParamSetEqual(mstate->hashkeydesc,
+ &TTSOpsMinimalTuple,
+ &TTSOpsVirtual,
+ eqfuncoids,
+ node->collations,
+ node->param_exprs,
+ (PlanState *) mstate);
pfree(eqfuncoids);
- rcstate->mem_used = 0;
+ mstate->mem_used = 0;
/* Limit the total memory consumed by the cache to this */
- rcstate->mem_limit = get_hash_mem() * 1024L;
+ mstate->mem_limit = get_hash_mem() * 1024L;
/* A memory context dedicated for the cache */
- rcstate->tableContext = AllocSetContextCreate(CurrentMemoryContext,
- "ResultCacheHashTable",
- ALLOCSET_DEFAULT_SIZES);
+ mstate->tableContext = AllocSetContextCreate(CurrentMemoryContext,
+ "MemoizeHashTable",
+ ALLOCSET_DEFAULT_SIZES);
- dlist_init(&rcstate->lru_list);
- rcstate->last_tuple = NULL;
- rcstate->entry = NULL;
+ dlist_init(&mstate->lru_list);
+ mstate->last_tuple = NULL;
+ mstate->entry = NULL;
/*
* Mark if we can assume the cache entry is completed after we get the
* matching inner tuple. In this case, the cache entry is complete after
* getting the first tuple. This allows us to mark it as so.
*/
- rcstate->singlerow = node->singlerow;
+ mstate->singlerow = node->singlerow;
/* Zero the statistics counters */
- memset(&rcstate->stats, 0, sizeof(ResultCacheInstrumentation));
+ memset(&mstate->stats, 0, sizeof(MemoizeInstrumentation));
/* Allocate and set up the actual cache */
- build_hash_table(rcstate, node->est_entries);
+ build_hash_table(mstate, node->est_entries);
- return rcstate;
+ return mstate;
}
void
-ExecEndResultCache(ResultCacheState *node)
+ExecEndMemoize(MemoizeState *node)
{
#ifdef USE_ASSERT_CHECKING
/* Validate the memory accounting code is correct in assert builds. */
{
int count;
uint64 mem = 0;
- resultcache_iterator i;
- ResultCacheEntry *entry;
+ memoize_iterator i;
+ MemoizeEntry *entry;
- resultcache_start_iterate(node->hashtable, &i);
+ memoize_start_iterate(node->hashtable, &i);
count = 0;
- while ((entry = resultcache_iterate(node->hashtable, &i)) != NULL)
+ while ((entry = memoize_iterate(node->hashtable, &i)) != NULL)
{
- ResultCacheTuple *tuple = entry->tuplehead;
+ MemoizeTuple *tuple = entry->tuplehead;
mem += EMPTY_ENTRY_MEMORY_BYTES(entry);
while (tuple != NULL)
*/
if (node->shared_info != NULL && IsParallelWorker())
{
- ResultCacheInstrumentation *si;
+ MemoizeInstrumentation *si;
/* Make mem_peak available for EXPLAIN */
if (node->stats.mem_peak == 0)
Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
si = &node->shared_info->sinstrument[ParallelWorkerNumber];
- memcpy(si, &node->stats, sizeof(ResultCacheInstrumentation));
+ memcpy(si, &node->stats, sizeof(MemoizeInstrumentation));
}
/* Remove the cache context */
}
void
-ExecReScanResultCache(ResultCacheState *node)
+ExecReScanMemoize(MemoizeState *node)
{
PlanState *outerPlan = outerPlanState(node);
/* Mark that we must lookup the cache for a new set of parameters */
- node->rc_status = RC_CACHE_LOOKUP;
+ node->mstatus = MEMO_CACHE_LOOKUP;
/* nullify pointers used for the last scan */
node->entry = NULL;
double
ExecEstimateCacheEntryOverheadBytes(double ntuples)
{
- return sizeof(ResultCacheEntry) + sizeof(ResultCacheKey) +
- sizeof(ResultCacheTuple) * ntuples;
+ return sizeof(MemoizeEntry) + sizeof(MemoizeKey) + sizeof(MemoizeTuple) *
+ ntuples;
}
/* ----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
- * ExecResultCacheEstimate
+ * ExecMemoizeEstimate
*
- * Estimate space required to propagate result cache statistics.
+ * Estimate space required to propagate memoize statistics.
* ----------------------------------------------------------------
*/
void
-ExecResultCacheEstimate(ResultCacheState *node, ParallelContext *pcxt)
+ExecMemoizeEstimate(MemoizeState *node, ParallelContext *pcxt)
{
Size size;
if (!node->ss.ps.instrument || pcxt->nworkers == 0)
return;
- size = mul_size(pcxt->nworkers, sizeof(ResultCacheInstrumentation));
- size = add_size(size, offsetof(SharedResultCacheInfo, sinstrument));
+ size = mul_size(pcxt->nworkers, sizeof(MemoizeInstrumentation));
+ size = add_size(size, offsetof(SharedMemoizeInfo, sinstrument));
shm_toc_estimate_chunk(&pcxt->estimator, size);
shm_toc_estimate_keys(&pcxt->estimator, 1);
}
/* ----------------------------------------------------------------
- * ExecResultCacheInitializeDSM
+ * ExecMemoizeInitializeDSM
*
- * Initialize DSM space for result cache statistics.
+ * Initialize DSM space for memoize statistics.
* ----------------------------------------------------------------
*/
void
-ExecResultCacheInitializeDSM(ResultCacheState *node, ParallelContext *pcxt)
+ExecMemoizeInitializeDSM(MemoizeState *node, ParallelContext *pcxt)
{
Size size;
if (!node->ss.ps.instrument || pcxt->nworkers == 0)
return;
- size = offsetof(SharedResultCacheInfo, sinstrument)
- + pcxt->nworkers * sizeof(ResultCacheInstrumentation);
+ size = offsetof(SharedMemoizeInfo, sinstrument)
+ + pcxt->nworkers * sizeof(MemoizeInstrumentation);
node->shared_info = shm_toc_allocate(pcxt->toc, size);
/* ensure any unfilled slots will contain zeroes */
memset(node->shared_info, 0, size);
}
/* ----------------------------------------------------------------
- * ExecResultCacheInitializeWorker
+ * ExecMemoizeInitializeWorker
*
- * Attach worker to DSM space for result cache statistics.
+ * Attach worker to DSM space for memoize statistics.
* ----------------------------------------------------------------
*/
void
-ExecResultCacheInitializeWorker(ResultCacheState *node, ParallelWorkerContext *pwcxt)
+ExecMemoizeInitializeWorker(MemoizeState *node, ParallelWorkerContext *pwcxt)
{
node->shared_info =
shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true);
}
/* ----------------------------------------------------------------
- * ExecResultCacheRetrieveInstrumentation
+ * ExecMemoizeRetrieveInstrumentation
*
- * Transfer result cache statistics from DSM to private memory.
+ * Transfer memoize statistics from DSM to private memory.
* ----------------------------------------------------------------
*/
void
-ExecResultCacheRetrieveInstrumentation(ResultCacheState *node)
+ExecMemoizeRetrieveInstrumentation(MemoizeState *node)
{
Size size;
- SharedResultCacheInfo *si;
+ SharedMemoizeInfo *si;
if (node->shared_info == NULL)
return;
- size = offsetof(SharedResultCacheInfo, sinstrument)
- + node->shared_info->num_workers * sizeof(ResultCacheInstrumentation);
+ size = offsetof(SharedMemoizeInfo, sinstrument)
+ + node->shared_info->num_workers * sizeof(MemoizeInstrumentation);
si = palloc(size);
memcpy(si, node->shared_info, size);
node->shared_info = si;
/*
- * _copyResultCache
+ * _copyMemoize
*/
-static ResultCache *
-_copyResultCache(const ResultCache *from)
+static Memoize *
+_copyMemoize(const Memoize *from)
{
- ResultCache *newnode = makeNode(ResultCache);
+ Memoize *newnode = makeNode(Memoize);
/*
* copy node superclass fields
case T_Material:
retval = _copyMaterial(from);
break;
- case T_ResultCache:
- retval = _copyResultCache(from);
+ case T_Memoize:
+ retval = _copyMemoize(from);
break;
case T_Sort:
retval = _copySort(from);
}
static void
-_outResultCache(StringInfo str, const ResultCache *node)
+_outMemoize(StringInfo str, const Memoize *node)
{
- WRITE_NODE_TYPE("RESULTCACHE");
+ WRITE_NODE_TYPE("MEMOIZE");
_outPlanInfo(str, (const Plan *) node);
}
static void
-_outResultCachePath(StringInfo str, const ResultCachePath *node)
+_outMemoizePath(StringInfo str, const MemoizePath *node)
{
- WRITE_NODE_TYPE("RESULTCACHEPATH");
+ WRITE_NODE_TYPE("MEMOIZEPATH");
_outPathInfo(str, (const Path *) node);
case T_Material:
_outMaterial(str, obj);
break;
- case T_ResultCache:
- _outResultCache(str, obj);
+ case T_Memoize:
+ _outMemoize(str, obj);
break;
case T_Sort:
_outSort(str, obj);
case T_MaterialPath:
_outMaterialPath(str, obj);
break;
- case T_ResultCachePath:
- _outResultCachePath(str, obj);
+ case T_MemoizePath:
+ _outMemoizePath(str, obj);
break;
case T_UniquePath:
_outUniquePath(str, obj);
}
/*
- * _readResultCache
+ * _readMemoize
*/
-static ResultCache *
-_readResultCache(void)
+static Memoize *
+_readMemoize(void)
{
- READ_LOCALS(ResultCache);
+ READ_LOCALS(Memoize);
ReadCommonPlan(&local_node->plan);
return_value = _readHashJoin();
else if (MATCH("MATERIAL", 8))
return_value = _readMaterial();
- else if (MATCH("RESULTCACHE", 11))
- return_value = _readResultCache();
+ else if (MATCH("MEMOIZE", 7))
+ return_value = _readMemoize();
else if (MATCH("SORT", 4))
return_value = _readSort();
else if (MATCH("INCREMENTALSORT", 15))
MergeAppendPath - merge multiple subpaths, preserving their common sort order
GroupResultPath - childless Result plan node (used for degenerate grouping)
MaterialPath - a Material plan node
- ResultCachePath - a result cache plan node for caching tuples from sub-paths
+ MemoizePath - a Memoize plan node for caching tuples from sub-paths
UniquePath - remove duplicate rows (either by hashing or sorting)
GatherPath - collect the results of parallel workers
GatherMergePath - collect parallel results, preserving their common sort order
ptype = "Material";
subpath = ((MaterialPath *) path)->subpath;
break;
- case T_ResultCachePath:
- ptype = "ResultCache";
- subpath = ((ResultCachePath *) path)->subpath;
+ case T_MemoizePath:
+ ptype = "Memoize";
+ subpath = ((MemoizePath *) path)->subpath;
break;
case T_UniquePath:
ptype = "Unique";
#include "executor/executor.h"
#include "executor/nodeAgg.h"
#include "executor/nodeHash.h"
-#include "executor/nodeResultCache.h"
+#include "executor/nodeMemoize.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
bool enable_hashagg = true;
bool enable_nestloop = true;
bool enable_material = true;
-bool enable_resultcache = true;
+bool enable_memoize = true;
bool enable_mergejoin = true;
bool enable_hashjoin = true;
bool enable_gathermerge = true;
}
/*
- * cost_resultcache_rescan
- * Determines the estimated cost of rescanning a ResultCache node.
+ * cost_memoize_rescan
+ * Determines the estimated cost of rescanning a Memoize node.
*
* In order to estimate this, we must gain knowledge of how often we expect to
* be called and how many distinct sets of parameters we are likely to be
* hit and caching would be a complete waste of effort.
*/
static void
-cost_resultcache_rescan(PlannerInfo *root, ResultCachePath *rcpath,
- Cost *rescan_startup_cost, Cost *rescan_total_cost)
+cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
+ Cost *rescan_startup_cost, Cost *rescan_total_cost)
{
EstimationInfo estinfo;
- Cost input_startup_cost = rcpath->subpath->startup_cost;
- Cost input_total_cost = rcpath->subpath->total_cost;
- double tuples = rcpath->subpath->rows;
- double calls = rcpath->calls;
- int width = rcpath->subpath->pathtarget->width;
+ Cost input_startup_cost = mpath->subpath->startup_cost;
+ Cost input_total_cost = mpath->subpath->total_cost;
+ double tuples = mpath->subpath->rows;
+ double calls = mpath->calls;
+ int width = mpath->subpath->pathtarget->width;
double hash_mem_bytes;
double est_entry_bytes;
est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
/* estimate on the distinct number of parameter values */
- ndistinct = estimate_num_groups(root, rcpath->param_exprs, calls, NULL,
+ ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
&estinfo);
/*
* When the estimation fell back on using a default value, it's a bit too
- * risky to assume that it's ok to use a Result Cache. The use of a
- * default could cause us to use a Result Cache when it's really
+ * risky to assume that it's ok to use a Memoize node. The use of a
+ * default could cause us to use a Memoize node when it's really
* inappropriate to do so. If we see that this has been done, then we'll
* assume that every call will have unique parameters, which will almost
- * certainly mean a ResultCachePath will never survive add_path().
+ * certainly mean a MemoizePath will never survive add_path().
*/
if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
ndistinct = calls;
* size itself. Really this is not the right place to do this, but it's
* convenient since everything is already calculated.
*/
- rcpath->est_entries = Min(Min(ndistinct, est_cache_entries),
- PG_UINT32_MAX);
+ mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
+ PG_UINT32_MAX);
/*
* When the number of distinct parameter values is above the amount we can
*rescan_total_cost = run_cost;
}
break;
- case T_ResultCache:
- /* All the hard work is done by cost_resultcache_rescan */
- cost_resultcache_rescan(root, (ResultCachePath *) path,
- rescan_startup_cost, rescan_total_cost);
+ case T_Memoize:
+ /* All the hard work is done by cost_memoize_rescan */
+ cost_memoize_rescan(root, (MemoizePath *) path,
+ rescan_startup_cost, rescan_total_cost);
break;
default:
*rescan_startup_cost = path->startup_cost;
case JOIN_ANTI:
/*
- * XXX it may be worth proving this to allow a ResultCache to be
+ * XXX it may be worth proving this to allow a Memoize to be
* considered for Nested Loop Semi/Anti Joins.
*/
extra.inner_unique = false; /* well, unproven */
OpExpr *opexpr;
Node *expr;
- /* can't use result cache without a valid hash equals operator */
+ /* can't use a memoize node without a valid hash equals operator */
if (!OidIsValid(rinfo->hasheqoperator) ||
!clause_sides_match_join(rinfo, outerrel, innerrel))
{
typentry = lookup_type_cache(exprType(expr),
TYPECACHE_HASH_PROC | TYPECACHE_EQ_OPR);
- /* can't use result cache without a valid hash equals operator */
+ /* can't use a memoize node without a valid hash equals operator */
if (!OidIsValid(typentry->hash_proc) || !OidIsValid(typentry->eq_opr))
{
list_free(*operators);
*param_exprs = lappend(*param_exprs, expr);
}
- /* We're okay to use result cache */
+ /* We're okay to use memoize */
return true;
}
/*
- * get_resultcache_path
- * If possible, make and return a Result Cache path atop of 'inner_path'.
+ * get_memoize_path
+ * If possible, make and return a Memoize path atop of 'inner_path'.
* Otherwise return NULL.
*/
static Path *
-get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
- RelOptInfo *outerrel, Path *inner_path,
- Path *outer_path, JoinType jointype,
- JoinPathExtraData *extra)
+get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
+ RelOptInfo *outerrel, Path *inner_path,
+ Path *outer_path, JoinType jointype,
+ JoinPathExtraData *extra)
{
List *param_exprs;
List *hash_operators;
ListCell *lc;
/* Obviously not if it's disabled */
- if (!enable_resultcache)
+ if (!enable_memoize)
return NULL;
/*
return NULL;
/*
- * We can only have a result cache when there's some kind of cache key,
+ * We can only have a memoize node when there's some kind of cache key,
* either parameterized path clauses or lateral Vars. No cache key sounds
* more like something a Materialize node might be more useful for.
*/
/*
* Currently we don't do this for SEMI and ANTI joins unless they're
* marked as inner_unique. This is because nested loop SEMI/ANTI joins
- * don't scan the inner node to completion, which will mean result cache
- * cannot mark the cache entry as complete.
+ * don't scan the inner node to completion, which will mean memoize cannot
+ * mark the cache entry as complete.
*
* XXX Currently we don't attempt to mark SEMI/ANTI joins as inner_unique
* = true. Should we? See add_paths_to_joinrel()
return NULL;
/*
- * Result Cache normally marks cache entries as complete when it runs out
- * of tuples to read from its subplan. However, with unique joins, Nested
+ * Memoize normally marks cache entries as complete when it runs out of
+ * tuples to read from its subplan. However, with unique joins, Nested
* Loop will skip to the next outer tuple after finding the first matching
* inner tuple. This means that we may not read the inner side of the
* join to completion which leaves no opportunity to mark the cache entry
* condition, we can't be sure which part of it causes the join to be
* unique. This means there are no guarantees that only 1 tuple will be
* read. We cannot mark the cache entry as complete after reading the
- * first tuple without that guarantee. This means the scope of Result
- * Cache's usefulness is limited to only outer rows that have no join
+ * first tuple without that guarantee. This means the scope of Memoize
+ * node's usefulness is limited to only outer rows that have no join
* partner as this is the only case where Nested Loop would exhaust the
* inner scan of a unique join. Since the scope is limited to that, we
- * just don't bother making a result cache path in this case.
+ * just don't bother making a memoize path in this case.
*
* Lateral vars needn't be considered here as they're not considered when
* determining if the join is unique.
return NULL;
/*
- * We can't use a result cache if there are volatile functions in the
+ * We can't use a memoize node if there are volatile functions in the
* inner rel's target list or restrict list. A cache hit could reduce the
* number of calls to these functions.
*/
¶m_exprs,
&hash_operators))
{
- return (Path *) create_resultcache_path(root,
- innerrel,
- inner_path,
- param_exprs,
- hash_operators,
- extra->inner_unique,
- outer_path->parent->rows);
+ return (Path *) create_memoize_path(root,
+ innerrel,
+ inner_path,
+ param_exprs,
+ hash_operators,
+ extra->inner_unique,
+ outer_path->parent->rows);
}
return NULL;
foreach(lc2, innerrel->cheapest_parameterized_paths)
{
Path *innerpath = (Path *) lfirst(lc2);
- Path *rcpath;
+ Path *mpath;
try_nestloop_path(root,
joinrel,
extra);
/*
- * Try generating a result cache path and see if that makes
- * the nested loop any cheaper.
+ * Try generating a memoize path and see if that makes the
+ * nested loop any cheaper.
*/
- rcpath = get_resultcache_path(root, innerrel, outerrel,
- innerpath, outerpath, jointype,
- extra);
- if (rcpath != NULL)
+ mpath = get_memoize_path(root, innerrel, outerrel,
+ innerpath, outerpath, jointype,
+ extra);
+ if (mpath != NULL)
try_nestloop_path(root,
joinrel,
outerpath,
- rcpath,
+ mpath,
merge_pathkeys,
jointype,
extra);
foreach(lc2, innerrel->cheapest_parameterized_paths)
{
Path *innerpath = (Path *) lfirst(lc2);
- Path *rcpath;
+ Path *mpath;
/* Can't join to an inner path that is not parallel-safe */
if (!innerpath->parallel_safe)
pathkeys, jointype, extra);
/*
- * Try generating a result cache path and see if that makes the
- * nested loop any cheaper.
+ * Try generating a memoize path and see if that makes the nested
+ * loop any cheaper.
*/
- rcpath = get_resultcache_path(root, innerrel, outerrel,
- innerpath, outerpath, jointype,
- extra);
- if (rcpath != NULL)
- try_partial_nestloop_path(root, joinrel, outerpath, rcpath,
+ mpath = get_memoize_path(root, innerrel, outerrel,
+ innerpath, outerpath, jointype,
+ extra);
+ if (mpath != NULL)
+ try_partial_nestloop_path(root, joinrel, outerpath, mpath,
pathkeys, jointype, extra);
}
}
static ProjectSet *create_project_set_plan(PlannerInfo *root, ProjectSetPath *best_path);
static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path,
int flags);
-static ResultCache *create_resultcache_plan(PlannerInfo *root,
- ResultCachePath *best_path,
- int flags);
+static Memoize *create_memoize_plan(PlannerInfo *root, MemoizePath *best_path,
+ int flags);
static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path,
int flags);
static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
AttrNumber *grpColIdx,
Plan *lefttree);
static Material *make_material(Plan *lefttree);
-static ResultCache *make_resultcache(Plan *lefttree, Oid *hashoperators,
- Oid *collations,
- List *param_exprs,
- bool singlerow,
- uint32 est_entries);
+static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
+ Oid *collations, List *param_exprs,
+ bool singlerow, uint32 est_entries);
static WindowAgg *make_windowagg(List *tlist, Index winref,
int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
(MaterialPath *) best_path,
flags);
break;
- case T_ResultCache:
- plan = (Plan *) create_resultcache_plan(root,
- (ResultCachePath *) best_path,
- flags);
+ case T_Memoize:
+ plan = (Plan *) create_memoize_plan(root,
+ (MemoizePath *) best_path,
+ flags);
break;
case T_Unique:
if (IsA(best_path, UpperUniquePath))
}
/*
- * create_resultcache_plan
- * Create a ResultCache plan for 'best_path' and (recursively) plans
- * for its subpaths.
+ * create_memoize_plan
+ * Create a Memoize plan for 'best_path' and (recursively) plans for its
+ * subpaths.
*
* Returns a Plan node.
*/
-static ResultCache *
-create_resultcache_plan(PlannerInfo *root, ResultCachePath *best_path, int flags)
+static Memoize *
+create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags)
{
- ResultCache *plan;
+ Memoize *plan;
Plan *subplan;
Oid *operators;
Oid *collations;
i++;
}
- plan = make_resultcache(subplan, operators, collations, param_exprs,
- best_path->singlerow, best_path->est_entries);
+ plan = make_memoize(subplan, operators, collations, param_exprs,
+ best_path->singlerow, best_path->est_entries);
copy_generic_path_info(&plan->plan, (Path *) best_path);
return matplan;
}
-static ResultCache *
-make_resultcache(Plan *lefttree, Oid *hashoperators, Oid *collations,
- List *param_exprs, bool singlerow, uint32 est_entries)
+static Memoize *
+make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
+ List *param_exprs, bool singlerow, uint32 est_entries)
{
- ResultCache *node = makeNode(ResultCache);
+ Memoize *node = makeNode(Memoize);
Plan *plan = &node->plan;
plan->targetlist = lefttree->targetlist;
{
case T_Hash:
case T_Material:
- case T_ResultCache:
+ case T_Memoize:
case T_Sort:
case T_IncrementalSort:
case T_Unique:
{
case T_Hash:
case T_Material:
- case T_ResultCache:
+ case T_Memoize:
case T_Sort:
case T_Unique:
case T_SetOp:
static bool check_redundant_nullability_qual(PlannerInfo *root, Node *clause);
static void check_mergejoinable(RestrictInfo *restrictinfo);
static void check_hashjoinable(RestrictInfo *restrictinfo);
-static void check_resultcacheable(RestrictInfo *restrictinfo);
+static void check_memoizable(RestrictInfo *restrictinfo);
/*****************************************************************************
/*
* Likewise, check if the clause is suitable to be used with a
- * Result Cache node to cache inner tuples during a parameterized
+ * Memoize node to cache inner tuples during a parameterized
* nested loop.
*/
- check_resultcacheable(restrictinfo);
+ check_memoizable(restrictinfo);
/*
* Add clause to the join lists of all the relevant relations.
/* Set mergejoinability/hashjoinability flags */
check_mergejoinable(restrictinfo);
check_hashjoinable(restrictinfo);
- check_resultcacheable(restrictinfo);
+ check_memoizable(restrictinfo);
return restrictinfo;
}
}
/*
- * check_resultcacheable
- * If the restrictinfo's clause is suitable to be used for a Result Cache
- * node, set the hasheqoperator to the hash equality operator that will be
- * needed during caching.
+ * check_memoizable
+ * If the restrictinfo's clause is suitable to be used for a Memoize node,
+ * set the hasheqoperator to the hash equality operator that will be needed
+ * during caching.
*/
static void
-check_resultcacheable(RestrictInfo *restrictinfo)
+check_memoizable(RestrictInfo *restrictinfo)
{
TypeCacheEntry *typentry;
Expr *clause = restrictinfo->clause;
set_hash_references(root, plan, rtoffset);
break;
- case T_ResultCache:
+ case T_Memoize:
{
- ResultCache *rcplan = (ResultCache *) plan;
+ Memoize *mplan = (Memoize *) plan;
/*
- * Result Cache does not evaluate its targetlist. It just
- * uses the same targetlist from its outer subnode.
+ * Memoize does not evaluate its targetlist. It just uses the
+ * same targetlist from its outer subnode.
*/
set_dummy_tlist_references(plan, rtoffset);
- rcplan->param_exprs = fix_scan_list(root, rcplan->param_exprs,
- rtoffset,
- NUM_EXEC_TLIST(plan));
+ mplan->param_exprs = fix_scan_list(root, mplan->param_exprs,
+ rtoffset,
+ NUM_EXEC_TLIST(plan));
break;
}
/* rescan_param does *not* get added to scan_params */
break;
- case T_ResultCache:
- finalize_primnode((Node *) ((ResultCache *) plan)->param_exprs,
+ case T_Memoize:
+ finalize_primnode((Node *) ((Memoize *) plan)->param_exprs,
&context);
break;
}
/*
- * create_resultcache_path
- * Creates a path corresponding to a ResultCache plan, returning the
- * pathnode.
+ * create_memoize_path
+ * Creates a path corresponding to a Memoize plan, returning the pathnode.
*/
-ResultCachePath *
-create_resultcache_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
- List *param_exprs, List *hash_operators,
- bool singlerow, double calls)
+MemoizePath *
+create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
+ List *param_exprs, List *hash_operators,
+ bool singlerow, double calls)
{
- ResultCachePath *pathnode = makeNode(ResultCachePath);
+ MemoizePath *pathnode = makeNode(MemoizePath);
Assert(subpath->parent == rel);
- pathnode->path.pathtype = T_ResultCache;
+ pathnode->path.pathtype = T_Memoize;
pathnode->path.parent = rel;
pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = subpath->param_info;
pathnode->calls = calls;
/*
- * For now we set est_entries to 0. cost_resultcache_rescan() does all
- * the hard work to determine how many cache entries there are likely to
- * be, so it seems best to leave it up to that function to fill this field
- * in. If left at 0, the executor will make a guess at a good value.
+ * For now we set est_entries to 0. cost_memoize_rescan() does all the
+ * hard work to determine how many cache entries there are likely to be,
+ * so it seems best to leave it up to that function to fill this field in.
+ * If left at 0, the executor will make a guess at a good value.
*/
pathnode->est_entries = 0;
/*
* Add a small additional charge for caching the first entry. All the
- * harder calculations for rescans are performed in
- * cost_resultcache_rescan().
+ * harder calculations for rescans are performed in cost_memoize_rescan().
*/
pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
apath->path.parallel_aware,
-1);
}
- case T_ResultCache:
+ case T_Memoize:
{
- ResultCachePath *rcpath = (ResultCachePath *) path;
-
- return (Path *) create_resultcache_path(root, rel,
- rcpath->subpath,
- rcpath->param_exprs,
- rcpath->hash_operators,
- rcpath->singlerow,
- rcpath->calls);
+ MemoizePath *mpath = (MemoizePath *) path;
+
+ return (Path *) create_memoize_path(root, rel,
+ mpath->subpath,
+ mpath->param_exprs,
+ mpath->hash_operators,
+ mpath->singlerow,
+ mpath->calls);
}
default:
break;
}
break;
- case T_ResultCachePath:
+ case T_MemoizePath:
{
- ResultCachePath *rcpath;
+ MemoizePath *mpath;
- FLAT_COPY_PATH(rcpath, path, ResultCachePath);
- REPARAMETERIZE_CHILD_PATH(rcpath->subpath);
- new_path = (Path *) rcpath;
+ FLAT_COPY_PATH(mpath, path, MemoizePath);
+ REPARAMETERIZE_CHILD_PATH(mpath->subpath);
+ new_path = (Path *) mpath;
}
break;
NULL, NULL, NULL
},
{
- {"enable_resultcache", PGC_USERSET, QUERY_TUNING_METHOD,
- gettext_noop("Enables the planner's use of result caching."),
+ {"enable_memoize", PGC_USERSET, QUERY_TUNING_METHOD,
+ gettext_noop("Enables the planner's use of memoization."),
NULL,
GUC_EXPLAIN
},
- &enable_resultcache,
+ &enable_memoize,
true,
NULL, NULL, NULL
},
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
-#enable_resultcache = on
+#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * nodeMemoize.h
+ *
+ *
+ *
+ * Portions Copyright (c) 2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/executor/nodeMemoize.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NODEMEMOIZE_H
+#define NODEMEMOIZE_H
+
+#include "access/parallel.h"
+#include "nodes/execnodes.h"
+
+extern MemoizeState *ExecInitMemoize(Memoize *node, EState *estate, int eflags);
+extern void ExecEndMemoize(MemoizeState *node);
+extern void ExecReScanMemoize(MemoizeState *node);
+extern double ExecEstimateCacheEntryOverheadBytes(double ntuples);
+extern void ExecMemoizeEstimate(MemoizeState *node,
+ ParallelContext *pcxt);
+extern void ExecMemoizeInitializeDSM(MemoizeState *node,
+ ParallelContext *pcxt);
+extern void ExecMemoizeInitializeWorker(MemoizeState *node,
+ ParallelWorkerContext *pwcxt);
+extern void ExecMemoizeRetrieveInstrumentation(MemoizeState *node);
+
+#endif /* NODEMEMOIZE_H */
+++ /dev/null
-/*-------------------------------------------------------------------------
- *
- * nodeResultCache.h
- *
- *
- *
- * Portions Copyright (c) 2021, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * src/include/executor/nodeResultCache.h
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NODERESULTCACHE_H
-#define NODERESULTCACHE_H
-
-#include "access/parallel.h"
-#include "nodes/execnodes.h"
-
-extern ResultCacheState *ExecInitResultCache(ResultCache *node, EState *estate, int eflags);
-extern void ExecEndResultCache(ResultCacheState *node);
-extern void ExecReScanResultCache(ResultCacheState *node);
-extern double ExecEstimateCacheEntryOverheadBytes(double ntuples);
-extern void ExecResultCacheEstimate(ResultCacheState *node,
- ParallelContext *pcxt);
-extern void ExecResultCacheInitializeDSM(ResultCacheState *node,
- ParallelContext *pcxt);
-extern void ExecResultCacheInitializeWorker(ResultCacheState *node,
- ParallelWorkerContext *pwcxt);
-extern void ExecResultCacheRetrieveInstrumentation(ResultCacheState *node);
-
-#endif /* NODERESULTCACHE_H */
Tuplestorestate *tuplestorestate;
} MaterialState;
-struct ResultCacheEntry;
-struct ResultCacheTuple;
-struct ResultCacheKey;
+struct MemoizeEntry;
+struct MemoizeTuple;
+struct MemoizeKey;
-typedef struct ResultCacheInstrumentation
+typedef struct MemoizeInstrumentation
{
uint64 cache_hits; /* number of rescans where we've found the
* scan parameter values to be cached */
* able to free enough space to store the
* current scan's tuples. */
uint64 mem_peak; /* peak memory usage in bytes */
-} ResultCacheInstrumentation;
+} MemoizeInstrumentation;
/* ----------------
- * Shared memory container for per-worker resultcache information
+ * Shared memory container for per-worker memoize information
* ----------------
*/
-typedef struct SharedResultCacheInfo
+typedef struct SharedMemoizeInfo
{
int num_workers;
- ResultCacheInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER];
-} SharedResultCacheInfo;
+ MemoizeInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER];
+} SharedMemoizeInfo;
/* ----------------
- * ResultCacheState information
+ * MemoizeState information
*
- * resultcache nodes are used to cache recent and commonly seen results
- * from a parameterized scan.
+ * memoize nodes are used to cache recent and commonly seen results from
+ * a parameterized scan.
* ----------------
*/
-typedef struct ResultCacheState
+typedef struct MemoizeState
{
ScanState ss; /* its first field is NodeTag */
- int rc_status; /* value of ExecResultCache state machine */
+ int mstatus; /* value of ExecMemoize state machine */
int nkeys; /* number of cache keys */
- struct resultcache_hash *hashtable; /* hash table for cache entries */
+ struct memoize_hash *hashtable; /* hash table for cache entries */
TupleDesc hashkeydesc; /* tuple descriptor for cache keys */
TupleTableSlot *tableslot; /* min tuple slot for existing cache entries */
TupleTableSlot *probeslot; /* virtual slot used for hash lookups */
uint64 mem_limit; /* memory limit in bytes for the cache */
MemoryContext tableContext; /* memory context to store cache data */
dlist_head lru_list; /* least recently used entry list */
- struct ResultCacheTuple *last_tuple; /* Used to point to the last tuple
- * returned during a cache hit and
- * the tuple we last stored when
- * populating the cache. */
- struct ResultCacheEntry *entry; /* the entry that 'last_tuple' belongs to
- * or NULL if 'last_tuple' is NULL. */
+ struct MemoizeTuple *last_tuple; /* Used to point to the last tuple
+ * returned during a cache hit and the
+ * tuple we last stored when
+ * populating the cache. */
+ struct MemoizeEntry *entry; /* the entry that 'last_tuple' belongs to or
+ * NULL if 'last_tuple' is NULL. */
bool singlerow; /* true if the cache entry is to be marked as
* complete after caching the first tuple. */
- ResultCacheInstrumentation stats; /* execution statistics */
- SharedResultCacheInfo *shared_info; /* statistics for parallel workers */
-} ResultCacheState;
+ MemoizeInstrumentation stats; /* execution statistics */
+ SharedMemoizeInfo *shared_info; /* statistics for parallel workers */
+} MemoizeState;
/* ----------------
* When performing sorting by multiple keys, it's possible that the input
T_MergeJoin,
T_HashJoin,
T_Material,
- T_ResultCache,
+ T_Memoize,
T_Sort,
T_IncrementalSort,
T_Group,
T_MergeJoinState,
T_HashJoinState,
T_MaterialState,
- T_ResultCacheState,
+ T_MemoizeState,
T_SortState,
T_IncrementalSortState,
T_GroupState,
T_MergeAppendPath,
T_GroupResultPath,
T_MaterialPath,
- T_ResultCachePath,
+ T_MemoizePath,
T_UniquePath,
T_GatherPath,
T_GatherMergePath,
} MaterialPath;
/*
- * ResultCachePath represents a ResultCache plan node, i.e., a cache that
- * caches tuples from parameterized paths to save the underlying node from
- * having to be rescanned for parameter values which are already cached.
+ * MemoizePath represents a Memoize plan node, i.e., a cache that caches
+ * tuples from parameterized paths to save the underlying node from having to
+ * be rescanned for parameter values which are already cached.
*/
-typedef struct ResultCachePath
+typedef struct MemoizePath
{
Path path;
Path *subpath; /* outerpath to cache tuples from */
uint32 est_entries; /* The maximum number of entries that the
* planner expects will fit in the cache, or 0
* if unknown */
-} ResultCachePath;
+} MemoizePath;
/*
* UniquePath represents elimination of distinct rows from the output of
Selectivity left_mcvfreq; /* left side's most common val's freq */
Selectivity right_mcvfreq; /* right side's most common val's freq */
- /* hash equality operator used for result cache, else InvalidOid */
+ /* hash equality operator used for memoize nodes, else InvalidOid */
Oid hasheqoperator;
} RestrictInfo;
} Material;
/* ----------------
- * result cache node
+ * memoize node
* ----------------
*/
-typedef struct ResultCache
+typedef struct Memoize
{
Plan plan;
uint32 est_entries; /* The maximum number of entries that the
* planner expects will fit in the cache, or 0
* if unknown */
-} ResultCache;
+} Memoize;
/* ----------------
* sort node
extern PGDLLIMPORT bool enable_hashagg;
extern PGDLLIMPORT bool enable_nestloop;
extern PGDLLIMPORT bool enable_material;
-extern PGDLLIMPORT bool enable_resultcache;
+extern PGDLLIMPORT bool enable_memoize;
extern PGDLLIMPORT bool enable_mergejoin;
extern PGDLLIMPORT bool enable_hashjoin;
extern PGDLLIMPORT bool enable_gathermerge;
PathTarget *target,
List *havingqual);
extern MaterialPath *create_material_path(RelOptInfo *rel, Path *subpath);
-extern ResultCachePath *create_resultcache_path(PlannerInfo *root,
- RelOptInfo *rel,
- Path *subpath,
- List *param_exprs,
- List *hash_operators,
- bool singlerow,
- double calls);
+extern MemoizePath *create_memoize_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ List *param_exprs,
+ List *hash_operators,
+ bool singlerow,
+ double calls);
extern UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel,
Path *subpath, SpecialJoinInfo *sjinfo);
extern GatherPath *create_gather_path(PlannerInfo *root,
-- Make sure that generation of HashAggregate for uniqification purposes
-- does not lead to array overflow due to unexpected duplicate hash keys
-- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com
-set enable_resultcache to off;
+set enable_memoize to off;
explain (costs off)
select 1 from tenk1
where (hundred, thousand) in (select twothousand, twothousand from onek);
-> Seq Scan on onek
(8 rows)
-reset enable_resultcache;
+reset enable_memoize;
--
-- Hash Aggregation Spill tests
--
--
set work_mem to '64kB';
set enable_mergejoin to off;
-set enable_resultcache to off;
+set enable_memoize to off;
explain (costs off)
select count(*) from tenk1 a, tenk1 b
where a.hundred = b.thousand and (b.fivethous % 10) < 10;
reset work_mem;
reset enable_mergejoin;
-reset enable_resultcache;
+reset enable_memoize;
--
-- regression test for 8.2 bug with improper re-ordering of left joins
--
Recheck Cond: (t1.hundred = hundred)
-> Bitmap Index Scan on tenk1_hundred
Index Cond: (hundred = t1.hundred)
- -> Result Cache
+ -> Memoize
Cache Key: t2.thousand
-> Index Scan using tenk1_unique2 on tenk1 t3
Index Cond: (unique2 = t2.thousand)
Recheck Cond: (t1.hundred = hundred)
-> Bitmap Index Scan on tenk1_hundred
Index Cond: (hundred = t1.hundred)
- -> Result Cache
+ -> Memoize
Cache Key: t2.thousand
-> Index Scan using tenk1_unique2 on tenk1 t3
Index Cond: (unique2 = t2.thousand)
-> Seq Scan on public.int8_tbl i8
Output: i8.q1, i8.q2
Filter: (i8.q2 = 123)
- -> Result Cache
+ -> Memoize
Output: (i8.q1), t2.f1
Cache Key: i8.q1
-> Limit
-> Seq Scan on public.int8_tbl i8
Output: i8.q1, i8.q2
Filter: (i8.q2 = 123)
- -> Result Cache
+ -> Memoize
Output: (i8.q1), t2.f1
Cache Key: i8.q1
-> Limit
Output: (i8.q1), t2.f1
-> Seq Scan on public.text_tbl t2
Output: i8.q1, t2.f1
- -> Result Cache
+ -> Memoize
Output: ((i8.q1)), (t2.f1)
Cache Key: (i8.q1), t2.f1
-> Limit
-> Seq Scan on public.text_tbl tt4
Output: tt4.f1
Filter: (tt4.f1 = 'foo'::text)
- -> Result Cache
+ -> Memoize
Output: ss1.c0
Cache Key: tt4.f1
-> Subquery Scan on ss1
Aggregate
-> Nested Loop
-> Seq Scan on tenk1 a
- -> Result Cache
+ -> Memoize
Cache Key: a.two
-> Function Scan on generate_series g
(6 rows)
Aggregate
-> Nested Loop
-> Seq Scan on tenk1 a
- -> Result Cache
+ -> Memoize
Cache Key: a.two
-> Function Scan on generate_series g
(6 rows)
Aggregate
-> Nested Loop
-> Seq Scan on tenk1 a
- -> Result Cache
+ -> Memoize
Cache Key: a.two
-> Function Scan on generate_series g
(6 rows)
-> Nested Loop
-> Index Only Scan using tenk1_unique1 on tenk1 a
-> Values Scan on "*VALUES*"
- -> Result Cache
+ -> Memoize
Cache Key: "*VALUES*".column1
-> Index Only Scan using tenk1_unique2 on tenk1 b
Index Cond: (unique2 = "*VALUES*".column1)
--- Perform tests on the Result Cache node.
--- The cache hits/misses/evictions from the Result Cache node can vary between
+-- Perform tests on the Memoize node.
+-- The cache hits/misses/evictions from the Memoize node can vary between
-- machines. Let's just replace the number with an 'N'. In order to allow us
-- to perform validation when the measure was zero, we replace a zero value
-- with "Zero". All other numbers are replaced with 'N'.
-create function explain_resultcache(query text, hide_hitmiss bool) returns setof text
+create function explain_memoize(query text, hide_hitmiss bool) returns setof text
language plpgsql as
$$
declare
end loop;
end;
$$;
--- Ensure we get a result cache on the inner side of the nested loop
+-- Ensure we get a memoize node on the inner side of the nested loop
SET enable_hashjoin TO off;
SET enable_bitmapscan TO off;
-SELECT explain_resultcache('
+SELECT explain_memoize('
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty
WHERE t2.unique1 < 1000;', false);
- explain_resultcache
+ explain_memoize
-------------------------------------------------------------------------------------------
Aggregate (actual rows=1 loops=N)
-> Nested Loop (actual rows=1000 loops=N)
-> Seq Scan on tenk1 t2 (actual rows=1000 loops=N)
Filter: (unique1 < 1000)
Rows Removed by Filter: 9000
- -> Result Cache (actual rows=1 loops=N)
+ -> Memoize (actual rows=1 loops=N)
Cache Key: t2.twenty
Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB
-> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N)
(1 row)
-- Try with LATERAL joins
-SELECT explain_resultcache('
+SELECT explain_memoize('
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
WHERE t1.unique1 < 1000;', false);
- explain_resultcache
+ explain_memoize
-------------------------------------------------------------------------------------------
Aggregate (actual rows=1 loops=N)
-> Nested Loop (actual rows=1000 loops=N)
-> Seq Scan on tenk1 t1 (actual rows=1000 loops=N)
Filter: (unique1 < 1000)
Rows Removed by Filter: 9000
- -> Result Cache (actual rows=1 loops=N)
+ -> Memoize (actual rows=1 loops=N)
Cache Key: t1.twenty
Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB
-> Index Only Scan using tenk1_unique1 on tenk1 t2 (actual rows=1 loops=N)
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
-- between different machines.
-SELECT explain_resultcache('
+SELECT explain_memoize('
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand
WHERE t2.unique1 < 1200;', true);
- explain_resultcache
+ explain_memoize
-------------------------------------------------------------------------------------------
Aggregate (actual rows=1 loops=N)
-> Nested Loop (actual rows=1200 loops=N)
-> Seq Scan on tenk1 t2 (actual rows=1200 loops=N)
Filter: (unique1 < 1200)
Rows Removed by Filter: 8800
- -> Result Cache (actual rows=1 loops=N)
+ -> Memoize (actual rows=1 loops=N)
Cache Key: t2.thousand
Hits: N Misses: N Evictions: N Overflows: 0 Memory Usage: NkB
-> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N)
RESET work_mem;
RESET enable_bitmapscan;
RESET enable_hashjoin;
--- Test parallel plans with Result Cache.
+-- Test parallel plans with Memoize
SET min_parallel_table_scan_size TO 0;
SET parallel_setup_cost TO 0;
SET parallel_tuple_cost TO 0;
Recheck Cond: (unique1 < 1000)
-> Bitmap Index Scan on tenk1_unique1
Index Cond: (unique1 < 1000)
- -> Result Cache
+ -> Memoize
Cache Key: t1.twenty
-> Index Only Scan using tenk1_unique1 on tenk1 t2
Index Cond: (unique1 = t1.twenty)
create index ab_a3_b3_a_idx on ab_a3_b3 (a);
set enable_hashjoin = 0;
set enable_mergejoin = 0;
-set enable_resultcache = 0;
+set enable_memoize = 0;
select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)');
explain_parallel_append
--------------------------------------------------------------------------------------------------------
reset enable_hashjoin;
reset enable_mergejoin;
-reset enable_resultcache;
+reset enable_memoize;
reset parallel_setup_cost;
reset parallel_tuple_cost;
reset min_parallel_table_scan_size;
-> Nested Loop
-> Seq Scan on onek o
Filter: (ten = 1)
- -> Result Cache
+ -> Memoize
Cache Key: o.four
-> CTE Scan on x
CTE x
enable_indexonlyscan | on
enable_indexscan | on
enable_material | on
+ enable_memoize | on
enable_mergejoin | on
enable_nestloop | on
enable_parallel_append | on
enable_partition_pruning | on
enable_partitionwise_aggregate | off
enable_partitionwise_join | off
- enable_resultcache | on
enable_seqscan | on
enable_sort | on
enable_tidscan | on
# ----------
# Another group of parallel tests
# ----------
-test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression resultcache
+test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize
# event triggers cannot run concurrently with any test that runs DDL
# oidjoins is read-only, though, and should run late for best coverage
-- Make sure that generation of HashAggregate for uniqification purposes
-- does not lead to array overflow due to unexpected duplicate hash keys
-- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com
-set enable_resultcache to off;
+set enable_memoize to off;
explain (costs off)
select 1 from tenk1
where (hundred, thousand) in (select twothousand, twothousand from onek);
-reset enable_resultcache;
+reset enable_memoize;
--
-- Hash Aggregation Spill tests
set work_mem to '64kB';
set enable_mergejoin to off;
-set enable_resultcache to off;
+set enable_memoize to off;
explain (costs off)
select count(*) from tenk1 a, tenk1 b
reset work_mem;
reset enable_mergejoin;
-reset enable_resultcache;
+reset enable_memoize;
--
-- regression test for 8.2 bug with improper re-ordering of left joins
--- Perform tests on the Result Cache node.
+-- Perform tests on the Memoize node.
--- The cache hits/misses/evictions from the Result Cache node can vary between
+-- The cache hits/misses/evictions from the Memoize node can vary between
-- machines. Let's just replace the number with an 'N'. In order to allow us
-- to perform validation when the measure was zero, we replace a zero value
-- with "Zero". All other numbers are replaced with 'N'.
-create function explain_resultcache(query text, hide_hitmiss bool) returns setof text
+create function explain_memoize(query text, hide_hitmiss bool) returns setof text
language plpgsql as
$$
declare
end;
$$;
--- Ensure we get a result cache on the inner side of the nested loop
+-- Ensure we get a memoize node on the inner side of the nested loop
SET enable_hashjoin TO off;
SET enable_bitmapscan TO off;
-SELECT explain_resultcache('
+SELECT explain_memoize('
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty
WHERE t2.unique1 < 1000;', false);
WHERE t2.unique1 < 1000;
-- Try with LATERAL joins
-SELECT explain_resultcache('
+SELECT explain_memoize('
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
WHERE t1.unique1 < 1000;', false);
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
-- between different machines.
-SELECT explain_resultcache('
+SELECT explain_memoize('
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand
WHERE t2.unique1 < 1200;', true);
RESET enable_bitmapscan;
RESET enable_hashjoin;
--- Test parallel plans with Result Cache.
+-- Test parallel plans with Memoize
SET min_parallel_table_scan_size TO 0;
SET parallel_setup_cost TO 0;
SET parallel_tuple_cost TO 0;
set enable_hashjoin = 0;
set enable_mergejoin = 0;
-set enable_resultcache = 0;
+set enable_memoize = 0;
select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)');
reset enable_hashjoin;
reset enable_mergejoin;
-reset enable_resultcache;
+reset enable_memoize;
reset parallel_setup_cost;
reset parallel_tuple_cost;
reset min_parallel_table_scan_size;
MaterialPath
MaterialState
MdfdVec
+Memoize
+MemoizeEntry
+MemoizeInstrumentation
+MemoizeKey
+MemoizePath
+MemoizeState
+MemoizeTuple
MemoryContext
MemoryContextCallback
MemoryContextCallbackFunction
RestorePass
RestrictInfo
Result
-ResultCache
-ResultCacheEntry
-ResultCacheInstrumentation
-ResultCacheKey
-ResultCachePath
-ResultCacheState
-ResultCacheTuple
ResultRelInfo
ResultState
ReturnSetInfo
SharedInvalSnapshotMsg
SharedInvalidationMessage
SharedJitInstrumentation
+SharedMemoizeInfo
SharedRecordTableEntry
SharedRecordTableKey
SharedRecordTypmodRegistry
-SharedResultCacheInfo
SharedSortInfo
SharedTuplestore
SharedTuplestoreAccessor
mbdisplaylen_converter
mblen_converter
mbstr_verifier
+memoize_hash
+memoize_iterator
metastring
mix_data_t
mixedStruct
rendezvousHashEntry
replace_rte_variables_callback
replace_rte_variables_context
-resultcache_hash
-resultcache_iterator
ret_type
rewind_source
rewrite_event