@ -6792,7 +6792,7 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer,
Page page = BufferGetPage ( buffer ) ;
Assert ( ntuples > 0 ) ;
Assert ( TransactionIdIsValid ( FreezeLimit ) ) ;
Assert ( TransactionIdIsNormal ( FreezeLimit ) ) ;
START_CRIT_SECTION ( ) ;
@ -6815,21 +6815,20 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer,
int nplans ;
xl_heap_freeze_page xlrec ;
XLogRecPtr recptr ;
TransactionId latestRemovedXid ;
TransactionId snapshotConflictHorizon ;
/* Prepare deduplicated representation for use in WAL record */
nplans = heap_xlog_freeze_plan ( tuples , ntuples , plans , offsets ) ;
/*
* latestRemovedXid describes the latest processed XID , whereas
* FreezeLimit is ( approximately ) the first XID not frozen by VACUUM .
* Back up caller ' s FreezeLimit to avoid false conflicts when
* FreezeLimit is precisely equal to VACUUM ' s OldestXmin cutoff .
*/
latestRemovedXid = FreezeLimit ;
TransactionIdRetreat ( latestRemovedXid ) ;
snapshotConflictHorizon = FreezeLimit ;
TransactionIdRetreat ( snapshotConflictHorizon ) ;
xlrec . latestRemovedXid = latestRemovedXid ;
xlrec . snapshotConflictHorizon = snapshotConflictHorizon ;
xlrec . nplans = nplans ;
XLogBeginInsert ( ) ;
@ -7401,15 +7400,21 @@ heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
}
/*
* If ' tuple ' contains any visible XID greater than latestRemovedXid ,
* ratchet forwards latestRemovedXid to the greatest one found .
* This is used as the basis for generating Hot Standby conflicts , so
* if a tuple was never visible then removing it should not conflict
* with queries .
* Maintain snapshotConflictHorizon for caller by ratcheting forward its value
* using any committed XIDs contained in ' tuple ' , an obsolescent heap tuple
* that caller is in the process of physically removing , e . g . via HOT pruning
* or index deletion .
*
* Caller must initialize its value to InvalidTransactionId , which is
* generally interpreted as " definitely no need for a recovery conflict " .
* Final value must reflect all heap tuples that caller will physically remove
* ( or remove TID references to ) via its ongoing pruning / deletion operation .
* ResolveRecoveryConflictWithSnapshot ( ) is passed the final value ( taken from
* caller ' s WAL record ) by REDO routine when it replays caller ' s operation .
*/
void
HeapTupleHeaderAdvanceLatestRemovedXid ( HeapTupleHeader tuple ,
TransactionId * latestRemovedXid )
HeapTupleHeaderAdvanceConflictHorizon ( HeapTupleHeader tuple ,
TransactionId * snapshotConflictHorizon )
{
TransactionId xmin = HeapTupleHeaderGetXmin ( tuple ) ;
TransactionId xmax = HeapTupleHeaderGetUpdateXid ( tuple ) ;
@ -7417,8 +7422,8 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
if ( tuple - > t_infomask & HEAP_MOVED )
{
if ( TransactionIdPrecedes ( * latestRemovedXid , xvac ) )
* latestRemovedXid = xvac ;
if ( TransactionIdPrecedes ( * snapshotConflictHorizon , xvac ) )
* snapshotConflictHorizon = xvac ;
}
/*
@ -7431,11 +7436,9 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
( ! HeapTupleHeaderXminInvalid ( tuple ) & & TransactionIdDidCommit ( xmin ) ) )
{
if ( xmax ! = xmin & &
TransactionIdFollows ( xmax , * latestRemovedXid ) )
* latestRemovedXid = xmax ;
TransactionIdFollows ( xmax , * snapshotConflictHorizon ) )
* snapshotConflictHorizon = xmax ;
}
/* *latestRemovedXid may still be invalid at end */
}
# ifdef USE_PREFETCH
@ -7558,7 +7561,7 @@ TransactionId
heap_index_delete_tuples ( Relation rel , TM_IndexDeleteOp * delstate )
{
/* Initial assumption is that earlier pruning took care of conflict */
TransactionId latestRemovedXid = InvalidTransactionId ;
TransactionId snapshotConflictHorizon = InvalidTransactionId ;
BlockNumber blkno = InvalidBlockNumber ;
Buffer buf = InvalidBuffer ;
Page page = NULL ;
@ -7769,8 +7772,8 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
}
/*
* Maintain latestRemovedXid value for deletion operation as a whole
* by advancing current value using heap tuple headers . This is
* Maintain snapshotConflictHorizon value for deletion operation as a
* whole by advancing current value using heap tuple headers . This is
* loosely based on the logic for pruning a HOT chain .
*/
offnum = ItemPointerGetOffsetNumber ( htid ) ;
@ -7805,12 +7808,12 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
* LP_DEAD item . This is okay because the earlier pruning
* operation that made the line pointer LP_DEAD in the first place
* must have considered the original tuple header as part of
* generating its own latestRemovedXid value .
* generating its own snapshotConflictHorizon value .
*
* Relying on XLOG_HEAP2_PRUNE records like this is the same
* strategy that index vacuuming uses in all cases . Index VACUUM
* WAL records don ' t even have a latestRemovedXid field of their
* own for this reason .
* WAL records don ' t even have a snapshotConflictHorizon field of
* their own for this reason .
*/
if ( ! ItemIdIsNormal ( lp ) )
break ;
@ -7824,7 +7827,8 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
! TransactionIdEquals ( HeapTupleHeaderGetXmin ( htup ) , priorXmax ) )
break ;
HeapTupleHeaderAdvanceLatestRemovedXid ( htup , & latestRemovedXid ) ;
HeapTupleHeaderAdvanceConflictHorizon ( htup ,
& snapshotConflictHorizon ) ;
/*
* If the tuple is not HOT - updated , then we are at the end of this
@ -7856,7 +7860,7 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
Assert ( finalndeltids > 0 | | delstate - > bottomup ) ;
delstate - > ndeltids = finalndeltids ;
return latestRemovedXid ;
return snapshotConflictHorizon ;
}
/*
@ -8232,6 +8236,9 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
* corresponding visibility map block . Both should have already been modified
* and dirtied .
*
* snapshotConflictHorizon comes from the largest xmin on the page being
* marked all - visible . REDO routine uses it to generate recovery conflicts .
*
* If checksums or wal_log_hints are enabled , we may also generate a full - page
* image of heap_buffer . Otherwise , we optimize away the FPI ( by specifying
* REGBUF_NO_IMAGE for the heap buffer ) , in which case the caller should * not *
@ -8239,7 +8246,7 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
*/
XLogRecPtr
log_heap_visible ( RelFileLocator rlocator , Buffer heap_buffer , Buffer vm_buffer ,
TransactionId cutoff_xid , uint8 vmflags )
TransactionId snapshotConflictHorizon , uint8 vmflags )
{
xl_heap_visible xlrec ;
XLogRecPtr recptr ;
@ -8248,7 +8255,7 @@ log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer,
Assert ( BufferIsValid ( heap_buffer ) ) ;
Assert ( BufferIsValid ( vm_buffer ) ) ;
xlrec . cutoff_xid = cutoff_xid ;
xlrec . snapshotConflictHorizon = snapshotConflictHorizon ;
xlrec . flags = vmflags ;
XLogBeginInsert ( ) ;
XLogRegisterData ( ( char * ) & xlrec , SizeOfHeapVisible ) ;
@ -8683,7 +8690,8 @@ heap_xlog_prune(XLogReaderState *record)
* no queries running for which the removed tuples are still visible .
*/
if ( InHotStandby )
ResolveRecoveryConflictWithSnapshot ( xlrec - > latestRemovedXid , rlocator ) ;
ResolveRecoveryConflictWithSnapshot ( xlrec - > snapshotConflictHorizon ,
rlocator ) ;
/*
* If we have a full - page image , restore it ( using a cleanup lock ) and
@ -8851,7 +8859,8 @@ heap_xlog_visible(XLogReaderState *record)
* rather than killing the transaction outright .
*/
if ( InHotStandby )
ResolveRecoveryConflictWithSnapshot ( xlrec - > cutoff_xid , rlocator ) ;
ResolveRecoveryConflictWithSnapshot ( xlrec - > snapshotConflictHorizon ,
rlocator ) ;
/*
* Read the heap page , if it still exists . If the heap file has dropped or
@ -8939,7 +8948,7 @@ heap_xlog_visible(XLogReaderState *record)
visibilitymap_pin ( reln , blkno , & vmbuffer ) ;
visibilitymap_set ( reln , blkno , InvalidBuffer , lsn , vmbuffer ,
xlrec - > cutoff_xid , xlrec - > flags ) ;
xlrec - > snapshotConflictHorizon , xlrec - > flags ) ;
ReleaseBuffer ( vmbuffer ) ;
FreeFakeRelcacheEntry ( reln ) ;
@ -9105,7 +9114,8 @@ heap_xlog_freeze_page(XLogReaderState *record)
RelFileLocator rlocator ;
XLogRecGetBlockTag ( record , 0 , & rlocator , NULL , NULL ) ;
ResolveRecoveryConflictWithSnapshot ( xlrec - > latestRemovedXid , rlocator ) ;
ResolveRecoveryConflictWithSnapshot ( xlrec - > snapshotConflictHorizon ,
rlocator ) ;
}
if ( XLogReadBufferForRedo ( record , 0 , & buffer ) = = BLK_NEEDS_REDO )