@ -5968,7 +5968,7 @@ get_stats_slot_range(AttStatsSlot *sslot, Oid opfuncoid, FmgrInfo *opproc,
* and fetching its low and / or high values .
* and fetching its low and / or high values .
* If successful , store values in * min and * max , and return true .
* If successful , store values in * min and * max , and return true .
* ( Either pointer can be NULL if that endpoint isn ' t needed . )
* ( Either pointer can be NULL if that endpoint isn ' t needed . )
* If no data available , return false .
* If unsuccessful , return false .
*
*
* sortop is the " < " comparison operator to use .
* sortop is the " < " comparison operator to use .
* collation is the required collation .
* collation is the required collation .
@ -6097,11 +6097,11 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
}
}
else
else
{
{
/* If min not requested, assume index is nonempty */
/* If min not requested, still want to fetch max */
have_data = true ;
have_data = true ;
}
}
/* If max is requested, and we didn't find the index is empty */
/* If max is requested, and we didn't already fail ... */
if ( max & & have_data )
if ( max & & have_data )
{
{
/* scan in the opposite direction; all else is the same */
/* scan in the opposite direction; all else is the same */
@ -6135,7 +6135,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
/*
/*
* Get one endpoint datum ( min or max depending on indexscandir ) from the
* Get one endpoint datum ( min or max depending on indexscandir ) from the
* specified index . Return true if successful , false if index is empty .
* specified index . Return true if successful , false if not .
* On success , endpoint value is stored to * endpointDatum ( and copied into
* On success , endpoint value is stored to * endpointDatum ( and copied into
* outercontext ) .
* outercontext ) .
*
*
@ -6145,6 +6145,9 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
* to probe the heap .
* to probe the heap .
* ( We could compute these values locally , but that would mean computing them
* ( We could compute these values locally , but that would mean computing them
* twice when get_actual_variable_range needs both the min and the max . )
* twice when get_actual_variable_range needs both the min and the max . )
*
* Failure occurs either when the index is empty , or we decide that it ' s
* taking too long to find a suitable tuple .
*/
*/
static bool
static bool
get_actual_variable_endpoint ( Relation heapRel ,
get_actual_variable_endpoint ( Relation heapRel ,
@ -6161,6 +6164,8 @@ get_actual_variable_endpoint(Relation heapRel,
SnapshotData SnapshotNonVacuumable ;
SnapshotData SnapshotNonVacuumable ;
IndexScanDesc index_scan ;
IndexScanDesc index_scan ;
Buffer vmbuffer = InvalidBuffer ;
Buffer vmbuffer = InvalidBuffer ;
BlockNumber last_heap_block = InvalidBlockNumber ;
int n_visited_heap_pages = 0 ;
ItemPointer tid ;
ItemPointer tid ;
Datum values [ INDEX_MAX_KEYS ] ;
Datum values [ INDEX_MAX_KEYS ] ;
bool isnull [ INDEX_MAX_KEYS ] ;
bool isnull [ INDEX_MAX_KEYS ] ;
@ -6203,6 +6208,12 @@ get_actual_variable_endpoint(Relation heapRel,
* might get a bogus answer that ' s not close to the index extremal value ,
* might get a bogus answer that ' s not close to the index extremal value ,
* or could even be NULL . We avoid this hazard because we take the data
* or could even be NULL . We avoid this hazard because we take the data
* from the index entry not the heap .
* from the index entry not the heap .
*
* Despite all this care , there are situations where we might find many
* non - visible tuples near the end of the index . We don ' t want to expend
* a huge amount of time here , so we give up once we ' ve read too many heap
* pages . When we fail for that reason , the caller will end up using
* whatever extremal value is recorded in pg_statistic .
*/
*/
InitNonVacuumableSnapshot ( SnapshotNonVacuumable ,
InitNonVacuumableSnapshot ( SnapshotNonVacuumable ,
GlobalVisTestFor ( heapRel ) ) ;
GlobalVisTestFor ( heapRel ) ) ;
@ -6217,13 +6228,37 @@ get_actual_variable_endpoint(Relation heapRel,
/* Fetch first/next tuple in specified direction */
/* Fetch first/next tuple in specified direction */
while ( ( tid = index_getnext_tid ( index_scan , indexscandir ) ) ! = NULL )
while ( ( tid = index_getnext_tid ( index_scan , indexscandir ) ) ! = NULL )
{
{
BlockNumber block = ItemPointerGetBlockNumber ( tid ) ;
if ( ! VM_ALL_VISIBLE ( heapRel ,
if ( ! VM_ALL_VISIBLE ( heapRel ,
ItemPointerGetBlockNumber ( tid ) ,
block ,
& vmbuffer ) )
& vmbuffer ) )
{
{
/* Rats, we have to visit the heap to check visibility */
/* Rats, we have to visit the heap to check visibility */
if ( ! index_fetch_heap ( index_scan , tableslot ) )
if ( ! index_fetch_heap ( index_scan , tableslot ) )
{
/*
* No visible tuple for this index entry , so we need to
* advance to the next entry . Before doing so , count heap
* page fetches and give up if we ' ve done too many .
*
* We don ' t charge a page fetch if this is the same heap page
* as the previous tuple . This is on the conservative side ,
* since other recently - accessed pages are probably still in
* buffers too ; but it ' s good enough for this heuristic .
*/
# define VISITED_PAGES_LIMIT 100
if ( block ! = last_heap_block )
{
last_heap_block = block ;
n_visited_heap_pages + + ;
if ( n_visited_heap_pages > VISITED_PAGES_LIMIT )
break ;
}
continue ; /* no visible tuple, try next index entry */
continue ; /* no visible tuple, try next index entry */
}
/* We don't actually need the heap tuple for anything */
/* We don't actually need the heap tuple for anything */
ExecClearTuple ( tableslot ) ;
ExecClearTuple ( tableslot ) ;