@ -813,7 +813,6 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
Cost startup_cost = 0 ;
Cost run_cost = 0 ;
Cost indexTotalCost ;
Selectivity indexSelectivity ;
QualCost qpqual_cost ;
Cost cpu_per_tuple ;
Cost cost_per_page ;
@ -837,54 +836,18 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
if ( ! enable_bitmapscan )
startup_cost + = disable_cost ;
/*
* Fetch total cost of obtaining the bitmap , as well as its total
* selectivity .
*/
cost_bitmap_tree_node ( bitmapqual , & indexTotalCost , & indexSelectivity ) ;
pages_fetched = compute_bitmap_pages ( root , baserel , bitmapqual ,
loop_count , & indexTotalCost ,
& tuples_fetched ) ;
startup_cost + = indexTotalCost ;
T = ( baserel - > pages > 1 ) ? ( double ) baserel - > pages : 1.0 ;
/* Fetch estimated page costs for tablespace containing table. */
get_tablespace_page_costs ( baserel - > reltablespace ,
& spc_random_page_cost ,
& spc_seq_page_cost ) ;
/*
* Estimate number of main - table pages fetched .
*/
tuples_fetched = clamp_row_est ( indexSelectivity * baserel - > tuples ) ;
T = ( baserel - > pages > 1 ) ? ( double ) baserel - > pages : 1.0 ;
if ( loop_count > 1 )
{
/*
* For repeated bitmap scans , scale up the number of tuples fetched in
* the Mackert and Lohman formula by the number of scans , so that we
* estimate the number of pages fetched by all the scans . Then
* pro - rate for one scan .
*/
pages_fetched = index_pages_fetched ( tuples_fetched * loop_count ,
baserel - > pages ,
get_indexpath_pages ( bitmapqual ) ,
root ) ;
pages_fetched / = loop_count ;
}
else
{
/*
* For a single scan , the number of heap pages that need to be fetched
* is the same as the Mackert and Lohman formula for the case T < = b
* ( ie , no re - reads needed ) .
*/
pages_fetched = ( 2.0 * T * tuples_fetched ) / ( 2.0 * T + tuples_fetched ) ;
}
if ( pages_fetched > = T )
pages_fetched = T ;
else
pages_fetched = ceil ( pages_fetched ) ;
/*
* For small numbers of pages we should charge spc_random_page_cost
* apiece , while if nearly all the table ' s pages are being read , it ' s more
@ -4820,3 +4783,69 @@ get_parallel_divisor(Path *path)
return parallel_divisor ;
}
/*
* compute_bitmap_pages
*
* compute number of pages fetched from heap in bitmap heap scan .
*/
double
compute_bitmap_pages ( PlannerInfo * root , RelOptInfo * baserel , Path * bitmapqual ,
int loop_count , Cost * cost , double * tuple )
{
Cost indexTotalCost ;
Selectivity indexSelectivity ;
double T ;
double pages_fetched ;
double tuples_fetched ;
/*
* Fetch total cost of obtaining the bitmap , as well as its total
* selectivity .
*/
cost_bitmap_tree_node ( bitmapqual , & indexTotalCost , & indexSelectivity ) ;
/*
* Estimate number of main - table pages fetched .
*/
tuples_fetched = clamp_row_est ( indexSelectivity * baserel - > tuples ) ;
T = ( baserel - > pages > 1 ) ? ( double ) baserel - > pages : 1.0 ;
if ( loop_count > 1 )
{
/*
* For repeated bitmap scans , scale up the number of tuples fetched in
* the Mackert and Lohman formula by the number of scans , so that we
* estimate the number of pages fetched by all the scans . Then
* pro - rate for one scan .
*/
pages_fetched = index_pages_fetched ( tuples_fetched * loop_count ,
baserel - > pages ,
get_indexpath_pages ( bitmapqual ) ,
root ) ;
pages_fetched / = loop_count ;
}
else
{
/*
* For a single scan , the number of heap pages that need to be fetched
* is the same as the Mackert and Lohman formula for the case T < = b
* ( ie , no re - reads needed ) .
*/
pages_fetched =
( 2.0 * T * tuples_fetched ) / ( 2.0 * T + tuples_fetched ) ;
}
if ( pages_fetched > = T )
pages_fetched = T ;
else
pages_fetched = ceil ( pages_fetched ) ;
if ( cost )
* cost = indexTotalCost ;
if ( tuple )
* tuple = tuples_fetched ;
return pages_fetched ;
}