@ -171,7 +171,7 @@ add_paths_to_joinrel(PlannerInfo *root,
case JOIN_ANTI :
case JOIN_ANTI :
/*
/*
* XXX it may be worth proving this to allow a ResultCach e to be
* XXX it may be worth proving this to allow a Memoiz e to be
* considered for Nested Loop Semi / Anti Joins .
* considered for Nested Loop Semi / Anti Joins .
*/
*/
extra . inner_unique = false ; /* well, unproven */
extra . inner_unique = false ; /* well, unproven */
@ -395,7 +395,7 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
OpExpr * opexpr ;
OpExpr * opexpr ;
Node * expr ;
Node * expr ;
/* can't use result cach e without a valid hash equals operator */
/* can't use a memoize nod e without a valid hash equals operator */
if ( ! OidIsValid ( rinfo - > hasheqoperator ) | |
if ( ! OidIsValid ( rinfo - > hasheqoperator ) | |
! clause_sides_match_join ( rinfo , outerrel , innerrel ) )
! clause_sides_match_join ( rinfo , outerrel , innerrel ) )
{
{
@ -436,7 +436,7 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
typentry = lookup_type_cache ( exprType ( expr ) ,
typentry = lookup_type_cache ( exprType ( expr ) ,
TYPECACHE_HASH_PROC | TYPECACHE_EQ_OPR ) ;
TYPECACHE_HASH_PROC | TYPECACHE_EQ_OPR ) ;
/* can't use result cach e without a valid hash equals operator */
/* can't use a memoize nod e without a valid hash equals operator */
if ( ! OidIsValid ( typentry - > hash_proc ) | | ! OidIsValid ( typentry - > eq_opr ) )
if ( ! OidIsValid ( typentry - > hash_proc ) | | ! OidIsValid ( typentry - > eq_opr ) )
{
{
list_free ( * operators ) ;
list_free ( * operators ) ;
@ -448,27 +448,27 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
* param_exprs = lappend ( * param_exprs , expr ) ;
* param_exprs = lappend ( * param_exprs , expr ) ;
}
}
/* We're okay to use result cach e */
/* We're okay to use memoiz e */
return true ;
return true ;
}
}
/*
/*
* get_resultcach e_path
* get_memoiz e_path
* If possible , make and return a Result Cach e path atop of ' inner_path ' .
* If possible , make and return a Memoiz e path atop of ' inner_path ' .
* Otherwise return NULL .
* Otherwise return NULL .
*/
*/
static Path *
static Path *
get_resultcach e_path ( PlannerInfo * root , RelOptInfo * innerrel ,
get_memoiz e_path ( PlannerInfo * root , RelOptInfo * innerrel ,
RelOptInfo * outerrel , Path * inner_path ,
RelOptInfo * outerrel , Path * inner_path ,
Path * outer_path , JoinType jointype ,
Path * outer_path , JoinType jointype ,
JoinPathExtraData * extra )
JoinPathExtraData * extra )
{
{
List * param_exprs ;
List * param_exprs ;
List * hash_operators ;
List * hash_operators ;
ListCell * lc ;
ListCell * lc ;
/* Obviously not if it's disabled */
/* Obviously not if it's disabled */
if ( ! enable_resultcach e )
if ( ! enable_memoiz e )
return NULL ;
return NULL ;
/*
/*
@ -481,7 +481,7 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
return NULL ;
return NULL ;
/*
/*
* We can only have a result cach e when there ' s some kind of cache key ,
* We can only have a memoize nod e when there ' s some kind of cache key ,
* either parameterized path clauses or lateral Vars . No cache key sounds
* either parameterized path clauses or lateral Vars . No cache key sounds
* more like something a Materialize node might be more useful for .
* more like something a Materialize node might be more useful for .
*/
*/
@ -493,8 +493,8 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
/*
/*
* Currently we don ' t do this for SEMI and ANTI joins unless they ' re
* Currently we don ' t do this for SEMI and ANTI joins unless they ' re
* marked as inner_unique . This is because nested loop SEMI / ANTI joins
* marked as inner_unique . This is because nested loop SEMI / ANTI joins
* don ' t scan the inner node to completion , which will mean result cache
* don ' t scan the inner node to completion , which will mean memoize cannot
* cannot mark the cache entry as complete .
* mark the cache entry as complete .
*
*
* XXX Currently we don ' t attempt to mark SEMI / ANTI joins as inner_unique
* XXX Currently we don ' t attempt to mark SEMI / ANTI joins as inner_unique
* = true . Should we ? See add_paths_to_joinrel ( )
* = true . Should we ? See add_paths_to_joinrel ( )
@ -504,8 +504,8 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
return NULL ;
return NULL ;
/*
/*
* Result Cach e normally marks cache entries as complete when it runs out
* Memoiz e normally marks cache entries as complete when it runs out of
* of tuples to read from its subplan . However , with unique joins , Nested
* tuples to read from its subplan . However , with unique joins , Nested
* Loop will skip to the next outer tuple after finding the first matching
* Loop will skip to the next outer tuple after finding the first matching
* inner tuple . This means that we may not read the inner side of the
* inner tuple . This means that we may not read the inner side of the
* join to completion which leaves no opportunity to mark the cache entry
* join to completion which leaves no opportunity to mark the cache entry
@ -516,11 +516,11 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
* condition , we can ' t be sure which part of it causes the join to be
* condition , we can ' t be sure which part of it causes the join to be
* unique . This means there are no guarantees that only 1 tuple will be
* unique . This means there are no guarantees that only 1 tuple will be
* read . We cannot mark the cache entry as complete after reading the
* read . We cannot mark the cache entry as complete after reading the
* first tuple without that guarantee . This means the scope of Result
* first tuple without that guarantee . This means the scope of Memoize
* Cach e' s usefulness is limited to only outer rows that have no join
* nod e' s usefulness is limited to only outer rows that have no join
* partner as this is the only case where Nested Loop would exhaust the
* partner as this is the only case where Nested Loop would exhaust the
* inner scan of a unique join . Since the scope is limited to that , we
* inner scan of a unique join . Since the scope is limited to that , we
* just don ' t bother making a result cach e path in this case .
* just don ' t bother making a memoiz e path in this case .
*
*
* Lateral vars needn ' t be considered here as they ' re not considered when
* Lateral vars needn ' t be considered here as they ' re not considered when
* determining if the join is unique .
* determining if the join is unique .
@ -536,7 +536,7 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
return NULL ;
return NULL ;
/*
/*
* We can ' t use a result cach e if there are volatile functions in the
* We can ' t use a memoize nod e if there are volatile functions in the
* inner rel ' s target list or restrict list . A cache hit could reduce the
* inner rel ' s target list or restrict list . A cache hit could reduce the
* number of calls to these functions .
* number of calls to these functions .
*/
*/
@ -559,13 +559,13 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
& param_exprs ,
& param_exprs ,
& hash_operators ) )
& hash_operators ) )
{
{
return ( Path * ) create_resultcach e_path ( root ,
return ( Path * ) create_memoiz e_path ( root ,
innerrel ,
innerrel ,
inner_path ,
inner_path ,
param_exprs ,
param_exprs ,
hash_operators ,
hash_operators ,
extra - > inner_unique ,
extra - > inner_unique ,
outer_path - > parent - > rows ) ;
outer_path - > parent - > rows ) ;
}
}
return NULL ;
return NULL ;
@ -1688,7 +1688,7 @@ match_unsorted_outer(PlannerInfo *root,
foreach ( lc2 , innerrel - > cheapest_parameterized_paths )
foreach ( lc2 , innerrel - > cheapest_parameterized_paths )
{
{
Path * innerpath = ( Path * ) lfirst ( lc2 ) ;
Path * innerpath = ( Path * ) lfirst ( lc2 ) ;
Path * rc path;
Path * m path;
try_nestloop_path ( root ,
try_nestloop_path ( root ,
joinrel ,
joinrel ,
@ -1699,17 +1699,17 @@ match_unsorted_outer(PlannerInfo *root,
extra ) ;
extra ) ;
/*
/*
* Try generating a result cach e path and see if that makes
* Try generating a memoiz e path and see if that makes the
* the nested loop any cheaper .
* nested loop any cheaper .
*/
*/
rcpath = get_resultcach e_path( root , innerrel , outerrel ,
mpath = get_memoiz e_path( root , innerrel , outerrel ,
innerpath , outerpath , jointype ,
innerpath , outerpath , jointype ,
extra ) ;
extra ) ;
if ( rc path ! = NULL )
if ( m path ! = NULL )
try_nestloop_path ( root ,
try_nestloop_path ( root ,
joinrel ,
joinrel ,
outerpath ,
outerpath ,
rc path,
m path,
merge_pathkeys ,
merge_pathkeys ,
jointype ,
jointype ,
extra ) ;
extra ) ;
@ -1867,7 +1867,7 @@ consider_parallel_nestloop(PlannerInfo *root,
foreach ( lc2 , innerrel - > cheapest_parameterized_paths )
foreach ( lc2 , innerrel - > cheapest_parameterized_paths )
{
{
Path * innerpath = ( Path * ) lfirst ( lc2 ) ;
Path * innerpath = ( Path * ) lfirst ( lc2 ) ;
Path * rc path;
Path * m path;
/* Can't join to an inner path that is not parallel-safe */
/* Can't join to an inner path that is not parallel-safe */
if ( ! innerpath - > parallel_safe )
if ( ! innerpath - > parallel_safe )
@ -1894,14 +1894,14 @@ consider_parallel_nestloop(PlannerInfo *root,
pathkeys , jointype , extra ) ;
pathkeys , jointype , extra ) ;
/*
/*
* Try generating a result cach e path and see if that makes the
* Try generating a memoiz e path and see if that makes the nested
* nested loop any cheaper .
* loop any cheaper .
*/
*/
rcpath = get_resultcach e_path( root , innerrel , outerrel ,
mpath = get_memoiz e_path( root , innerrel , outerrel ,
innerpath , outerpath , jointype ,
innerpath , outerpath , jointype ,
extra ) ;
extra ) ;
if ( rc path ! = NULL )
if ( m path ! = NULL )
try_partial_nestloop_path ( root , joinrel , outerpath , rc path,
try_partial_nestloop_path ( root , joinrel , outerpath , m path,
pathkeys , jointype , extra ) ;
pathkeys , jointype , extra ) ;
}
}
}
}