mirror of https://github.com/postgres/postgres
Revert b6002a796
This removes "Add Result Cache executor node". It seems that something weird is going on with the tracking of cache hits and misses as highlighted by many buildfarm animals. It's not yet clear what the problem is as other parts of the plan indicate that the cache did work correctly, it's just the hits and misses that were being reported as 0. This is especially a bad time to have the buildfarm so broken, so reverting before too many more animals go red. Discussion: https://postgr.es/m/CAApHDvq_hydhfovm4=izgWs+C5HqEeRScjMbOgbpC-jRAeK3Yw@mail.gmail.compull/64/head
parent
b6002a796d
commit
28b3e3905c
File diff suppressed because it is too large
Load Diff
@ -1,31 +0,0 @@ |
||||
/*-------------------------------------------------------------------------
|
||||
* |
||||
* nodeResultCache.h |
||||
* |
||||
* |
||||
* |
||||
* Portions Copyright (c) 2021, PostgreSQL Global Development Group |
||||
* Portions Copyright (c) 1994, Regents of the University of California |
||||
* |
||||
* src/include/executor/nodeResultCache.h |
||||
* |
||||
*------------------------------------------------------------------------- |
||||
*/ |
||||
#ifndef NODERESULTCACHE_H |
||||
#define NODERESULTCACHE_H |
||||
|
||||
#include "nodes/execnodes.h" |
||||
|
||||
extern ResultCacheState *ExecInitResultCache(ResultCache *node, EState *estate, int eflags); |
||||
extern void ExecEndResultCache(ResultCacheState *node); |
||||
extern void ExecReScanResultCache(ResultCacheState *node); |
||||
extern double ExecEstimateCacheEntryOverheadBytes(double ntuples); |
||||
extern void ExecResultCacheEstimate(ResultCacheState *node, |
||||
ParallelContext *pcxt); |
||||
extern void ExecResultCacheInitializeDSM(ResultCacheState *node, |
||||
ParallelContext *pcxt); |
||||
extern void ExecResultCacheInitializeWorker(ResultCacheState *node, |
||||
ParallelWorkerContext *pwcxt); |
||||
extern void ExecResultCacheRetrieveInstrumentation(ResultCacheState *node); |
||||
|
||||
#endif /* NODERESULTCACHE_H */ |
@ -1,159 +0,0 @@ |
||||
-- Perform tests on the Result Cache node. |
||||
-- The cache hits/misses/evictions from the Result Cache node can vary between |
||||
-- machines. Let's just replace the number with an 'N'. In order to allow us |
||||
-- to perform validation when the measure was zero, we replace a zero value |
||||
-- with "Zero". All other numbers are replaced with 'N'. |
||||
create function explain_resultcache(query text, hide_hitmiss bool) returns setof text |
||||
language plpgsql as |
||||
$$ |
||||
declare |
||||
ln text; |
||||
begin |
||||
for ln in |
||||
execute format('explain (analyze, costs off, summary off, timing off) %s', |
||||
query) |
||||
loop |
||||
if hide_hitmiss = true then |
||||
ln := regexp_replace(ln, 'Hits: 0', 'Hits: Zero'); |
||||
ln := regexp_replace(ln, 'Hits: \d+', 'Hits: N'); |
||||
ln := regexp_replace(ln, 'Misses: 0', 'Misses: Zero'); |
||||
ln := regexp_replace(ln, 'Misses: \d+', 'Misses: N'); |
||||
end if; |
||||
ln := regexp_replace(ln, 'Evictions: 0', 'Evictions: Zero'); |
||||
ln := regexp_replace(ln, 'Evictions: \d+', 'Evictions: N'); |
||||
ln := regexp_replace(ln, 'Memory Usage: \d+', 'Memory Usage: N'); |
||||
return next ln; |
||||
end loop; |
||||
end; |
||||
$$; |
||||
-- Ensure we get a result cache on the inner side of the nested loop |
||||
SET enable_hashjoin TO off; |
||||
SELECT explain_resultcache(' |
||||
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 |
||||
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty |
||||
WHERE t2.unique1 < 1000;', false); |
||||
explain_resultcache |
||||
-------------------------------------------------------------------------------------------- |
||||
Aggregate (actual rows=1 loops=1) |
||||
-> Nested Loop (actual rows=1000 loops=1) |
||||
-> Bitmap Heap Scan on tenk1 t2 (actual rows=1000 loops=1) |
||||
Recheck Cond: (unique1 < 1000) |
||||
Heap Blocks: exact=333 |
||||
-> Bitmap Index Scan on tenk1_unique1 (actual rows=1000 loops=1) |
||||
Index Cond: (unique1 < 1000) |
||||
-> Result Cache (actual rows=1 loops=1000) |
||||
Cache Key: t2.twenty |
||||
Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB |
||||
-> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=20) |
||||
Index Cond: (unique1 = t2.twenty) |
||||
Heap Fetches: 0 |
||||
(13 rows) |
||||
|
||||
-- And check we get the expected results. |
||||
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 |
||||
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty |
||||
WHERE t2.unique1 < 1000; |
||||
count | avg |
||||
-------+-------------------- |
||||
1000 | 9.5000000000000000 |
||||
(1 row) |
||||
|
||||
-- Try with LATERAL joins |
||||
SELECT explain_resultcache(' |
||||
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, |
||||
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 |
||||
WHERE t1.unique1 < 1000;', false); |
||||
explain_resultcache |
||||
-------------------------------------------------------------------------------------------- |
||||
Aggregate (actual rows=1 loops=1) |
||||
-> Nested Loop (actual rows=1000 loops=1) |
||||
-> Bitmap Heap Scan on tenk1 t1 (actual rows=1000 loops=1) |
||||
Recheck Cond: (unique1 < 1000) |
||||
Heap Blocks: exact=333 |
||||
-> Bitmap Index Scan on tenk1_unique1 (actual rows=1000 loops=1) |
||||
Index Cond: (unique1 < 1000) |
||||
-> Result Cache (actual rows=1 loops=1000) |
||||
Cache Key: t1.twenty |
||||
Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB |
||||
-> Index Only Scan using tenk1_unique1 on tenk1 t2 (actual rows=1 loops=20) |
||||
Index Cond: (unique1 = t1.twenty) |
||||
Heap Fetches: 0 |
||||
(13 rows) |
||||
|
||||
-- And check we get the expected results. |
||||
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, |
||||
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 |
||||
WHERE t1.unique1 < 1000; |
||||
count | avg |
||||
-------+-------------------- |
||||
1000 | 9.5000000000000000 |
||||
(1 row) |
||||
|
||||
-- Reduce work_mem so that we see some cache evictions |
||||
SET work_mem TO '64kB'; |
||||
SET enable_mergejoin TO off; |
||||
-- Ensure we get some evictions. We're unable to validate the hits and misses |
||||
-- here as the number of entries that fit in the cache at once will vary |
||||
-- between different machines. |
||||
SELECT explain_resultcache(' |
||||
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 |
||||
INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand |
||||
WHERE t2.unique1 < 800;', true); |
||||
explain_resultcache |
||||
--------------------------------------------------------------------------------------------- |
||||
Aggregate (actual rows=1 loops=1) |
||||
-> Nested Loop (actual rows=800 loops=1) |
||||
-> Bitmap Heap Scan on tenk1 t2 (actual rows=800 loops=1) |
||||
Recheck Cond: (unique1 < 800) |
||||
Heap Blocks: exact=318 |
||||
-> Bitmap Index Scan on tenk1_unique1 (actual rows=800 loops=1) |
||||
Index Cond: (unique1 < 800) |
||||
-> Result Cache (actual rows=1 loops=800) |
||||
Cache Key: t2.thousand |
||||
Hits: Zero Misses: N Evictions: N Overflows: 0 Memory Usage: NkB |
||||
-> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=800) |
||||
Index Cond: (unique1 = t2.thousand) |
||||
Heap Fetches: 0 |
||||
(13 rows) |
||||
|
||||
RESET enable_mergejoin; |
||||
RESET work_mem; |
||||
RESET enable_hashjoin; |
||||
-- Test parallel plans with Result Cache. |
||||
SET min_parallel_table_scan_size TO 0; |
||||
SET parallel_setup_cost TO 0; |
||||
SET parallel_tuple_cost TO 0; |
||||
-- Ensure we get a parallel plan. |
||||
EXPLAIN (COSTS OFF) |
||||
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, |
||||
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 |
||||
WHERE t1.unique1 < 1000; |
||||
QUERY PLAN |
||||
------------------------------------------------------------------------------- |
||||
Finalize Aggregate |
||||
-> Gather |
||||
Workers Planned: 2 |
||||
-> Partial Aggregate |
||||
-> Nested Loop |
||||
-> Parallel Bitmap Heap Scan on tenk1 t1 |
||||
Recheck Cond: (unique1 < 1000) |
||||
-> Bitmap Index Scan on tenk1_unique1 |
||||
Index Cond: (unique1 < 1000) |
||||
-> Result Cache |
||||
Cache Key: t1.twenty |
||||
-> Index Only Scan using tenk1_unique1 on tenk1 t2 |
||||
Index Cond: (unique1 = t1.twenty) |
||||
(13 rows) |
||||
|
||||
-- And ensure the parallel plan gives us the correct results. |
||||
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, |
||||
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 |
||||
WHERE t1.unique1 < 1000; |
||||
count | avg |
||||
-------+-------------------- |
||||
1000 | 9.5000000000000000 |
||||
(1 row) |
||||
|
||||
RESET parallel_tuple_cost; |
||||
RESET parallel_setup_cost; |
||||
RESET min_parallel_table_scan_size; |
@ -1,85 +0,0 @@ |
||||
-- Perform tests on the Result Cache node. |
||||
|
||||
-- The cache hits/misses/evictions from the Result Cache node can vary between |
||||
-- machines. Let's just replace the number with an 'N'. In order to allow us |
||||
-- to perform validation when the measure was zero, we replace a zero value |
||||
-- with "Zero". All other numbers are replaced with 'N'. |
||||
create function explain_resultcache(query text, hide_hitmiss bool) returns setof text |
||||
language plpgsql as |
||||
$$ |
||||
declare |
||||
ln text; |
||||
begin |
||||
for ln in |
||||
execute format('explain (analyze, costs off, summary off, timing off) %s', |
||||
query) |
||||
loop |
||||
if hide_hitmiss = true then |
||||
ln := regexp_replace(ln, 'Hits: 0', 'Hits: Zero'); |
||||
ln := regexp_replace(ln, 'Hits: \d+', 'Hits: N'); |
||||
ln := regexp_replace(ln, 'Misses: 0', 'Misses: Zero'); |
||||
ln := regexp_replace(ln, 'Misses: \d+', 'Misses: N'); |
||||
end if; |
||||
ln := regexp_replace(ln, 'Evictions: 0', 'Evictions: Zero'); |
||||
ln := regexp_replace(ln, 'Evictions: \d+', 'Evictions: N'); |
||||
ln := regexp_replace(ln, 'Memory Usage: \d+', 'Memory Usage: N'); |
||||
return next ln; |
||||
end loop; |
||||
end; |
||||
$$; |
||||
|
||||
-- Ensure we get a result cache on the inner side of the nested loop |
||||
SET enable_hashjoin TO off; |
||||
SELECT explain_resultcache(' |
||||
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 |
||||
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty |
||||
WHERE t2.unique1 < 1000;', false); |
||||
|
||||
-- And check we get the expected results. |
||||
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 |
||||
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty |
||||
WHERE t2.unique1 < 1000; |
||||
|
||||
-- Try with LATERAL joins |
||||
SELECT explain_resultcache(' |
||||
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, |
||||
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 |
||||
WHERE t1.unique1 < 1000;', false); |
||||
|
||||
-- And check we get the expected results. |
||||
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, |
||||
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 |
||||
WHERE t1.unique1 < 1000; |
||||
|
||||
-- Reduce work_mem so that we see some cache evictions |
||||
SET work_mem TO '64kB'; |
||||
SET enable_mergejoin TO off; |
||||
-- Ensure we get some evictions. We're unable to validate the hits and misses |
||||
-- here as the number of entries that fit in the cache at once will vary |
||||
-- between different machines. |
||||
SELECT explain_resultcache(' |
||||
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 |
||||
INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand |
||||
WHERE t2.unique1 < 800;', true); |
||||
RESET enable_mergejoin; |
||||
RESET work_mem; |
||||
RESET enable_hashjoin; |
||||
|
||||
-- Test parallel plans with Result Cache. |
||||
SET min_parallel_table_scan_size TO 0; |
||||
SET parallel_setup_cost TO 0; |
||||
SET parallel_tuple_cost TO 0; |
||||
|
||||
-- Ensure we get a parallel plan. |
||||
EXPLAIN (COSTS OFF) |
||||
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, |
||||
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 |
||||
WHERE t1.unique1 < 1000; |
||||
|
||||
-- And ensure the parallel plan gives us the correct results. |
||||
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, |
||||
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 |
||||
WHERE t1.unique1 < 1000; |
||||
RESET parallel_tuple_cost; |
||||
RESET parallel_setup_cost; |
||||
RESET min_parallel_table_scan_size; |
Loading…
Reference in new issue