diff options
| author | David Rowley | 2021-07-14 00:43:58 +0000 |
|---|---|---|
| committer | David Rowley | 2021-07-14 00:43:58 +0000 |
| commit | 83f4fcc65503c5d4e5d5eefc8e7a70d3c9a6496f (patch) | |
| tree | 23c0962d1c255e8e6ca5cc29a0d1fe68e2d1223d /src/test/regress | |
| parent | d68a00391214be2020e49be4b55f761d47a5c229 (diff) | |
Change the name of the Result Cache node to Memoize
"Result Cache" was never a great name for this node, but nobody managed
to come up with another name that anyone liked enough. That was until
David Johnston mentioned "Node Memoization", which Tom Lane revised to
just "Memoize". People seem to like "Memoize", so let's do the rename.
Reviewed-by: Justin Pryzby
Discussion: https://postgr.es/m/20210708165145.GG1176@momjian.us
Backpatch-through: 14, where Result Cache was introduced
Diffstat (limited to 'src/test/regress')
| -rw-r--r-- | src/test/regress/expected/aggregates.out | 4 | ||||
| -rw-r--r-- | src/test/regress/expected/join.out | 24 | ||||
| -rw-r--r-- | src/test/regress/expected/memoize.out (renamed from src/test/regress/expected/resultcache.out) | 30 | ||||
| -rw-r--r-- | src/test/regress/expected/partition_prune.out | 4 | ||||
| -rw-r--r-- | src/test/regress/expected/subselect.out | 2 | ||||
| -rw-r--r-- | src/test/regress/expected/sysviews.out | 2 | ||||
| -rw-r--r-- | src/test/regress/parallel_schedule | 2 | ||||
| -rw-r--r-- | src/test/regress/sql/aggregates.sql | 4 | ||||
| -rw-r--r-- | src/test/regress/sql/join.sql | 4 | ||||
| -rw-r--r-- | src/test/regress/sql/memoize.sql (renamed from src/test/regress/sql/resultcache.sql) | 16 | ||||
| -rw-r--r-- | src/test/regress/sql/partition_prune.sql | 4 |
11 files changed, 48 insertions, 48 deletions
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out index ca06d41dd04..23b112b2af8 100644 --- a/src/test/regress/expected/aggregates.out +++ b/src/test/regress/expected/aggregates.out @@ -2584,7 +2584,7 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*) -- Make sure that generation of HashAggregate for uniqification purposes -- does not lead to array overflow due to unexpected duplicate hash keys -- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com -set enable_resultcache to off; +set enable_memoize to off; explain (costs off) select 1 from tenk1 where (hundred, thousand) in (select twothousand, twothousand from onek); @@ -2600,7 +2600,7 @@ explain (costs off) -> Seq Scan on onek (8 rows) -reset enable_resultcache; +reset enable_memoize; -- -- Hash Aggregation Spill tests -- diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index 19cd0569876..f3589d0dbb0 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -2536,7 +2536,7 @@ reset enable_nestloop; -- set work_mem to '64kB'; set enable_mergejoin to off; -set enable_resultcache to off; +set enable_memoize to off; explain (costs off) select count(*) from tenk1 a, tenk1 b where a.hundred = b.thousand and (b.fivethous % 10) < 10; @@ -2560,7 +2560,7 @@ select count(*) from tenk1 a, tenk1 b reset work_mem; reset enable_mergejoin; -reset enable_resultcache; +reset enable_memoize; -- -- regression test for 8.2 bug with improper re-ordering of left joins -- @@ -3684,7 +3684,7 @@ where t1.unique1 = 1; Recheck Cond: (t1.hundred = hundred) -> Bitmap Index Scan on tenk1_hundred Index Cond: (hundred = t1.hundred) - -> Result Cache + -> Memoize Cache Key: t2.thousand -> Index Scan using tenk1_unique2 on tenk1 t3 Index Cond: (unique2 = t2.thousand) @@ -3706,7 +3706,7 @@ where t1.unique1 = 1; Recheck Cond: (t1.hundred = hundred) -> Bitmap Index Scan on tenk1_hundred Index Cond: (hundred = t1.hundred) - -> Result Cache + -> Memoize Cache Key: t2.thousand -> Index Scan using tenk1_unique2 on tenk1 t3 Index Cond: (unique2 = t2.thousand) @@ -4235,7 +4235,7 @@ where t1.f1 = ss.f1; -> Seq Scan on public.int8_tbl i8 Output: i8.q1, i8.q2 Filter: (i8.q2 = 123) - -> Result Cache + -> Memoize Output: (i8.q1), t2.f1 Cache Key: i8.q1 -> Limit @@ -4279,14 +4279,14 @@ where t1.f1 = ss2.f1; -> Seq Scan on public.int8_tbl i8 Output: i8.q1, i8.q2 Filter: (i8.q2 = 123) - -> Result Cache + -> Memoize Output: (i8.q1), t2.f1 Cache Key: i8.q1 -> Limit Output: (i8.q1), t2.f1 -> Seq Scan on public.text_tbl t2 Output: i8.q1, t2.f1 - -> Result Cache + -> Memoize Output: ((i8.q1)), (t2.f1) Cache Key: (i8.q1), t2.f1 -> Limit @@ -4339,7 +4339,7 @@ where tt1.f1 = ss1.c0; -> Seq Scan on public.text_tbl tt4 Output: tt4.f1 Filter: (tt4.f1 = 'foo'::text) - -> Result Cache + -> Memoize Output: ss1.c0 Cache Key: tt4.f1 -> Subquery Scan on ss1 @@ -5028,7 +5028,7 @@ explain (costs off) Aggregate -> Nested Loop -> Seq Scan on tenk1 a - -> Result Cache + -> Memoize Cache Key: a.two -> Function Scan on generate_series g (6 rows) @@ -5040,7 +5040,7 @@ explain (costs off) Aggregate -> Nested Loop -> Seq Scan on tenk1 a - -> Result Cache + -> Memoize Cache Key: a.two -> Function Scan on generate_series g (6 rows) @@ -5053,7 +5053,7 @@ explain (costs off) Aggregate -> Nested Loop -> Seq Scan on tenk1 a - -> Result Cache + -> Memoize Cache Key: a.two -> Function Scan on generate_series g (6 rows) @@ -5115,7 +5115,7 @@ explain (costs off) -> Nested Loop -> Index Only Scan using tenk1_unique1 on tenk1 a -> Values Scan on "*VALUES*" - -> Result Cache + -> Memoize Cache Key: "*VALUES*".column1 -> Index Only Scan using tenk1_unique2 on tenk1 b Index Cond: (unique2 = "*VALUES*".column1) diff --git a/src/test/regress/expected/resultcache.out b/src/test/regress/expected/memoize.out index 5b5dd6838e0..9a025c4a7ab 100644 --- a/src/test/regress/expected/resultcache.out +++ b/src/test/regress/expected/memoize.out @@ -1,9 +1,9 @@ --- Perform tests on the Result Cache node. --- The cache hits/misses/evictions from the Result Cache node can vary between +-- Perform tests on the Memoize node. +-- The cache hits/misses/evictions from the Memoize node can vary between -- machines. Let's just replace the number with an 'N'. In order to allow us -- to perform validation when the measure was zero, we replace a zero value -- with "Zero". All other numbers are replaced with 'N'. -create function explain_resultcache(query text, hide_hitmiss bool) returns setof text +create function explain_memoize(query text, hide_hitmiss bool) returns setof text language plpgsql as $$ declare @@ -28,21 +28,21 @@ begin end loop; end; $$; --- Ensure we get a result cache on the inner side of the nested loop +-- Ensure we get a memoize node on the inner side of the nested loop SET enable_hashjoin TO off; SET enable_bitmapscan TO off; -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty WHERE t2.unique1 < 1000;', false); - explain_resultcache + explain_memoize ------------------------------------------------------------------------------------------- Aggregate (actual rows=1 loops=N) -> Nested Loop (actual rows=1000 loops=N) -> Seq Scan on tenk1 t2 (actual rows=1000 loops=N) Filter: (unique1 < 1000) Rows Removed by Filter: 9000 - -> Result Cache (actual rows=1 loops=N) + -> Memoize (actual rows=1 loops=N) Cache Key: t2.twenty Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB -> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N) @@ -60,18 +60,18 @@ WHERE t2.unique1 < 1000; (1 row) -- Try with LATERAL joins -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 WHERE t1.unique1 < 1000;', false); - explain_resultcache + explain_memoize ------------------------------------------------------------------------------------------- Aggregate (actual rows=1 loops=N) -> Nested Loop (actual rows=1000 loops=N) -> Seq Scan on tenk1 t1 (actual rows=1000 loops=N) Filter: (unique1 < 1000) Rows Removed by Filter: 9000 - -> Result Cache (actual rows=1 loops=N) + -> Memoize (actual rows=1 loops=N) Cache Key: t1.twenty Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB -> Index Only Scan using tenk1_unique1 on tenk1 t2 (actual rows=1 loops=N) @@ -94,18 +94,18 @@ SET enable_mergejoin TO off; -- Ensure we get some evictions. We're unable to validate the hits and misses -- here as the number of entries that fit in the cache at once will vary -- between different machines. -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand WHERE t2.unique1 < 1200;', true); - explain_resultcache + explain_memoize ------------------------------------------------------------------------------------------- Aggregate (actual rows=1 loops=N) -> Nested Loop (actual rows=1200 loops=N) -> Seq Scan on tenk1 t2 (actual rows=1200 loops=N) Filter: (unique1 < 1200) Rows Removed by Filter: 8800 - -> Result Cache (actual rows=1 loops=N) + -> Memoize (actual rows=1 loops=N) Cache Key: t2.thousand Hits: N Misses: N Evictions: N Overflows: 0 Memory Usage: NkB -> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N) @@ -117,7 +117,7 @@ RESET enable_mergejoin; RESET work_mem; RESET enable_bitmapscan; RESET enable_hashjoin; --- Test parallel plans with Result Cache. +-- Test parallel plans with Memoize SET min_parallel_table_scan_size TO 0; SET parallel_setup_cost TO 0; SET parallel_tuple_cost TO 0; @@ -138,7 +138,7 @@ WHERE t1.unique1 < 1000; Recheck Cond: (unique1 < 1000) -> Bitmap Index Scan on tenk1_unique1 Index Cond: (unique1 < 1000) - -> Result Cache + -> Memoize Cache Key: t1.twenty -> Index Only Scan using tenk1_unique1 on tenk1 t2 Index Cond: (unique1 = t1.twenty) diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out index 2c62e4a7a60..7555764c779 100644 --- a/src/test/regress/expected/partition_prune.out +++ b/src/test/regress/expected/partition_prune.out @@ -2085,7 +2085,7 @@ create index ab_a3_b2_a_idx on ab_a3_b2 (a); create index ab_a3_b3_a_idx on ab_a3_b3 (a); set enable_hashjoin = 0; set enable_mergejoin = 0; -set enable_resultcache = 0; +set enable_memoize = 0; select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)'); explain_parallel_append -------------------------------------------------------------------------------------------------------- @@ -2254,7 +2254,7 @@ select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on reset enable_hashjoin; reset enable_mergejoin; -reset enable_resultcache; +reset enable_memoize; reset parallel_setup_cost; reset parallel_tuple_cost; reset min_parallel_table_scan_size; diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out index c7986fb7fcc..30615dd6bc7 100644 --- a/src/test/regress/expected/subselect.out +++ b/src/test/regress/expected/subselect.out @@ -1097,7 +1097,7 @@ where o.ten = 1; -> Nested Loop -> Seq Scan on onek o Filter: (ten = 1) - -> Result Cache + -> Memoize Cache Key: o.four -> CTE Scan on x CTE x diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out index 0bb558d93c9..6e54f3e15e2 100644 --- a/src/test/regress/expected/sysviews.out +++ b/src/test/regress/expected/sysviews.out @@ -104,6 +104,7 @@ select name, setting from pg_settings where name like 'enable%'; enable_indexonlyscan | on enable_indexscan | on enable_material | on + enable_memoize | on enable_mergejoin | on enable_nestloop | on enable_parallel_append | on @@ -111,7 +112,6 @@ select name, setting from pg_settings where name like 'enable%'; enable_partition_pruning | on enable_partitionwise_aggregate | off enable_partitionwise_join | off - enable_resultcache | on enable_seqscan | on enable_sort | on enable_tidscan | on diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule index 22b0d3584da..7be89178f0f 100644 --- a/src/test/regress/parallel_schedule +++ b/src/test/regress/parallel_schedule @@ -120,7 +120,7 @@ test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion tr # ---------- # Another group of parallel tests # ---------- -test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression resultcache +test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize # event triggers cannot run concurrently with any test that runs DDL # oidjoins is read-only, though, and should run late for best coverage diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql index eb80a2fe063..ed2d6b3bdfc 100644 --- a/src/test/regress/sql/aggregates.sql +++ b/src/test/regress/sql/aggregates.sql @@ -1098,11 +1098,11 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*) -- Make sure that generation of HashAggregate for uniqification purposes -- does not lead to array overflow due to unexpected duplicate hash keys -- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com -set enable_resultcache to off; +set enable_memoize to off; explain (costs off) select 1 from tenk1 where (hundred, thousand) in (select twothousand, twothousand from onek); -reset enable_resultcache; +reset enable_memoize; -- -- Hash Aggregation Spill tests diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 2a0e2d12d83..cb1c2309140 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -550,7 +550,7 @@ reset enable_nestloop; set work_mem to '64kB'; set enable_mergejoin to off; -set enable_resultcache to off; +set enable_memoize to off; explain (costs off) select count(*) from tenk1 a, tenk1 b @@ -560,7 +560,7 @@ select count(*) from tenk1 a, tenk1 b reset work_mem; reset enable_mergejoin; -reset enable_resultcache; +reset enable_memoize; -- -- regression test for 8.2 bug with improper re-ordering of left joins diff --git a/src/test/regress/sql/resultcache.sql b/src/test/regress/sql/memoize.sql index 43a70d56a51..548cc3eee30 100644 --- a/src/test/regress/sql/resultcache.sql +++ b/src/test/regress/sql/memoize.sql @@ -1,10 +1,10 @@ --- Perform tests on the Result Cache node. +-- Perform tests on the Memoize node. --- The cache hits/misses/evictions from the Result Cache node can vary between +-- The cache hits/misses/evictions from the Memoize node can vary between -- machines. Let's just replace the number with an 'N'. In order to allow us -- to perform validation when the measure was zero, we replace a zero value -- with "Zero". All other numbers are replaced with 'N'. -create function explain_resultcache(query text, hide_hitmiss bool) returns setof text +create function explain_memoize(query text, hide_hitmiss bool) returns setof text language plpgsql as $$ declare @@ -30,11 +30,11 @@ begin end; $$; --- Ensure we get a result cache on the inner side of the nested loop +-- Ensure we get a memoize node on the inner side of the nested loop SET enable_hashjoin TO off; SET enable_bitmapscan TO off; -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty WHERE t2.unique1 < 1000;', false); @@ -45,7 +45,7 @@ INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty WHERE t2.unique1 < 1000; -- Try with LATERAL joins -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 WHERE t1.unique1 < 1000;', false); @@ -61,7 +61,7 @@ SET enable_mergejoin TO off; -- Ensure we get some evictions. We're unable to validate the hits and misses -- here as the number of entries that fit in the cache at once will vary -- between different machines. -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand WHERE t2.unique1 < 1200;', true); @@ -70,7 +70,7 @@ RESET work_mem; RESET enable_bitmapscan; RESET enable_hashjoin; --- Test parallel plans with Result Cache. +-- Test parallel plans with Memoize SET min_parallel_table_scan_size TO 0; SET parallel_setup_cost TO 0; SET parallel_tuple_cost TO 0; diff --git a/src/test/regress/sql/partition_prune.sql b/src/test/regress/sql/partition_prune.sql index 16c8dc5f1fa..d70bd8610cb 100644 --- a/src/test/regress/sql/partition_prune.sql +++ b/src/test/regress/sql/partition_prune.sql @@ -515,7 +515,7 @@ create index ab_a3_b3_a_idx on ab_a3_b3 (a); set enable_hashjoin = 0; set enable_mergejoin = 0; -set enable_resultcache = 0; +set enable_memoize = 0; select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)'); @@ -534,7 +534,7 @@ select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on reset enable_hashjoin; reset enable_mergejoin; -reset enable_resultcache; +reset enable_memoize; reset parallel_setup_cost; reset parallel_tuple_cost; reset min_parallel_table_scan_size; |
