@ -217,7 +217,6 @@ typedef struct LVRelState
*/
typedef struct LVPagePruneState
{
bool hastup ; /* Page prevents rel truncation? */
bool has_lpdead_items ; /* includes existing LP_DEAD items */
/*
@ -253,7 +252,7 @@ static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
LVPagePruneState * prunestate ) ;
static bool lazy_scan_noprune ( LVRelState * vacrel , Buffer buf ,
BlockNumber blkno , Page page ,
bool * hastup , bool * recordfreespace ) ;
bool * recordfreespace ) ;
static void lazy_vacuum ( LVRelState * vacrel ) ;
static bool lazy_vacuum_all_indexes ( LVRelState * vacrel ) ;
static void lazy_vacuum_heap_rel ( LVRelState * vacrel ) ;
@ -959,8 +958,7 @@ lazy_scan_heap(LVRelState *vacrel)
page = BufferGetPage ( buf ) ;
if ( ! ConditionalLockBufferForCleanup ( buf ) )
{
bool hastup ,
recordfreespace ;
bool recordfreespace ;
LockBuffer ( buf , BUFFER_LOCK_SHARE ) ;
@ -972,20 +970,21 @@ lazy_scan_heap(LVRelState *vacrel)
continue ;
}
/* Collect LP_DEAD items in dead_items array, count tuples */
if ( lazy_scan_noprune ( vacrel , buf , blkno , page , & hastup ,
/*
* Collect LP_DEAD items in dead_items array , count tuples ,
* determine if rel truncation is safe
*/
if ( lazy_scan_noprune ( vacrel , buf , blkno , page ,
& recordfreespace ) )
{
Size freespace = 0 ;
/*
* Processed page successfully ( without cleanup lock ) - - just
* need to perform rel truncation and FSM steps , much like the
* lazy_scan_prune case . Don ' t bother trying to match its
* visibility map setting steps , though .
* need to update the FSM , much like the lazy_scan_prune case .
* Don ' t bother trying to match its visibility map setting
* steps , though .
*/
if ( hastup )
vacrel - > nonempty_pages = blkno + 1 ;
if ( recordfreespace )
freespace = PageGetHeapFreeSpace ( page ) ;
UnlockReleaseBuffer ( buf ) ;
@ -1017,16 +1016,13 @@ lazy_scan_heap(LVRelState *vacrel)
* dead_items array . This includes LP_DEAD line pointers that we
* pruned ourselves , as well as existing LP_DEAD line pointers that
* were pruned some time earlier . Also considers freezing XIDs in the
* tuple headers of remaining items with storage .
* tuple headers of remaining items with storage . It also determines
* if truncating this block is safe .
*/
lazy_scan_prune ( vacrel , buf , blkno , page , & prunestate ) ;
Assert ( ! prunestate . all_visible | | ! prunestate . has_lpdead_items ) ;
/* Remember the location of the last page with nonremovable tuples */
if ( prunestate . hastup )
vacrel - > nonempty_pages = blkno + 1 ;
if ( vacrel - > nindexes = = 0 )
{
/*
@ -1555,6 +1551,7 @@ lazy_scan_prune(LVRelState *vacrel,
live_tuples ,
recently_dead_tuples ;
HeapPageFreeze pagefrz ;
bool hastup = false ;
int64 fpi_before = pgWalUsage . wal_fpi ;
OffsetNumber deadoffsets [ MaxHeapTuplesPerPage ] ;
HeapTupleFreeze frozen [ MaxHeapTuplesPerPage ] ;
@ -1593,7 +1590,6 @@ lazy_scan_prune(LVRelState *vacrel,
* Now scan the page to collect LP_DEAD items and check for tuples
* requiring freezing among remaining tuples with storage
*/
prunestate - > hastup = false ;
prunestate - > has_lpdead_items = false ;
prunestate - > all_visible = true ;
prunestate - > all_frozen = true ;
@ -1620,7 +1616,7 @@ lazy_scan_prune(LVRelState *vacrel,
if ( ItemIdIsRedirected ( itemid ) )
{
/* page makes rel truncation unsafe */
prunestate - > hastup = true ;
hastup = true ;
continue ;
}
@ -1750,7 +1746,7 @@ lazy_scan_prune(LVRelState *vacrel,
break ;
}
prunestate - > hastup = true ; /* page makes rel truncation unsafe */
hastup = true ; /* page makes rel truncation unsafe */
/* Tuple with storage -- consider need to freeze */
if ( heap_prepare_freeze_tuple ( htup , & vacrel - > cutoffs , & pagefrz ,
@ -1918,6 +1914,10 @@ lazy_scan_prune(LVRelState *vacrel,
vacrel - > lpdead_items + = lpdead_items ;
vacrel - > live_tuples + = live_tuples ;
vacrel - > recently_dead_tuples + = recently_dead_tuples ;
/* Can't truncate this page */
if ( hastup )
vacrel - > nonempty_pages = blkno + 1 ;
}
/*
@ -1935,7 +1935,6 @@ lazy_scan_prune(LVRelState *vacrel,
* one or more tuples on the page . We always return true for non - aggressive
* callers .
*
* See lazy_scan_prune for an explanation of hastup return flag .
* recordfreespace flag instructs caller on whether or not it should do
* generic FSM processing for page .
*/
@ -1944,7 +1943,6 @@ lazy_scan_noprune(LVRelState *vacrel,
Buffer buf ,
BlockNumber blkno ,
Page page ,
bool * hastup ,
bool * recordfreespace )
{
OffsetNumber offnum ,
@ -1953,6 +1951,7 @@ lazy_scan_noprune(LVRelState *vacrel,
live_tuples ,
recently_dead_tuples ,
missed_dead_tuples ;
bool hastup ;
HeapTupleHeader tupleheader ;
TransactionId NoFreezePageRelfrozenXid = vacrel - > NewRelfrozenXid ;
MultiXactId NoFreezePageRelminMxid = vacrel - > NewRelminMxid ;
@ -1960,7 +1959,7 @@ lazy_scan_noprune(LVRelState *vacrel,
Assert ( BufferGetBlockNumber ( buf ) = = blkno ) ;
* hastup = false ; /* for now */
hastup = false ; /* for now */
* recordfreespace = false ; /* for now */
lpdead_items = 0 ;
@ -1984,7 +1983,7 @@ lazy_scan_noprune(LVRelState *vacrel,
if ( ItemIdIsRedirected ( itemid ) )
{
* hastup = true ;
hastup = true ;
continue ;
}
@ -1998,7 +1997,7 @@ lazy_scan_noprune(LVRelState *vacrel,
continue ;
}
* hastup = true ; /* page prevents rel truncation */
hastup = true ; /* page prevents rel truncation */
tupleheader = ( HeapTupleHeader ) PageGetItem ( page , itemid ) ;
if ( heap_tuple_should_freeze ( tupleheader , & vacrel - > cutoffs ,
& NoFreezePageRelfrozenXid ,
@ -2100,7 +2099,7 @@ lazy_scan_noprune(LVRelState *vacrel,
* but it beats having to maintain specialized heap vacuuming code
* forever , for vanishingly little benefit . )
*/
* hastup = true ;
hastup = true ;
missed_dead_tuples + = lpdead_items ;
}
@ -2156,6 +2155,10 @@ lazy_scan_noprune(LVRelState *vacrel,
if ( missed_dead_tuples > 0 )
vacrel - > missed_dead_pages + + ;
/* Can't truncate this page */
if ( hastup )
vacrel - > nonempty_pages = blkno + 1 ;
/* Caller won't need to call lazy_scan_prune with same page */
return true ;
}