@ -41,6 +41,7 @@
# include "access/multixact.h"
# include "access/parallel.h"
# include "access/relscan.h"
# include "access/subtrans.h"
# include "access/sysattr.h"
# include "access/tableam.h"
# include "access/transam.h"
@ -446,8 +447,8 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
else
valid = HeapTupleSatisfiesVisibility ( & loctup , snapshot , buffer ) ;
CheckForSerializableConflictOut ( valid , scan - > rs_base . rs_rd ,
& loctup , buffer , snapshot ) ;
Heap CheckForSerializableConflictOut( valid , scan - > rs_base . rs_rd ,
& loctup , buffer , snapshot ) ;
if ( valid )
scan - > rs_vistuples [ ntup + + ] = lineoff ;
@ -668,9 +669,9 @@ heapgettup(HeapScanDesc scan,
snapshot ,
scan - > rs_cbuf ) ;
CheckForSerializableConflictOut ( valid , scan - > rs_base . rs_rd ,
tuple , scan - > rs_cbuf ,
snapshot ) ;
Heap CheckForSerializableConflictOut( valid , scan - > rs_base . rs_rd ,
tuple , scan - > rs_cbuf ,
snapshot ) ;
if ( valid & & key ! = NULL )
HeapKeyTest ( tuple , RelationGetDescr ( scan - > rs_base . rs_rd ) ,
@ -1477,9 +1478,10 @@ heap_fetch(Relation relation,
valid = HeapTupleSatisfiesVisibility ( tuple , snapshot , buffer ) ;
if ( valid )
PredicateLockTuple ( relation , tuple , snapshot ) ;
PredicateLockTID ( relation , & ( tuple - > t_self ) , snapshot ,
HeapTupleHeaderGetXmin ( tuple - > t_data ) ) ;
CheckForSerializableConflictOut ( valid , relation , tuple , buffer , snapshot ) ;
Heap CheckForSerializableConflictOut( valid , relation , tuple , buffer , snapshot ) ;
LockBuffer ( buffer , BUFFER_LOCK_UNLOCK ) ;
@ -1610,13 +1612,14 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
{
/* If it's visible per the snapshot, we must return it */
valid = HeapTupleSatisfiesVisibility ( heapTuple , snapshot , buffer ) ;
CheckForSerializableConflictOut ( valid , relation , heapTuple ,
buffer , snapshot ) ;
Heap CheckForSerializableConflictOut( valid , relation , heapTuple ,
buffer , snapshot ) ;
if ( valid )
{
ItemPointerSetOffsetNumber ( tid , offnum ) ;
PredicateLockTuple ( relation , heapTuple , snapshot ) ;
PredicateLockTID ( relation , & heapTuple - > t_self , snapshot ,
HeapTupleHeaderGetXmin ( heapTuple - > t_data ) ) ;
if ( all_dead )
* all_dead = false ;
return true ;
@ -1750,7 +1753,7 @@ heap_get_latest_tid(TableScanDesc sscan,
* candidate .
*/
valid = HeapTupleSatisfiesVisibility ( & tp , snapshot , buffer ) ;
CheckForSerializableConflictOut ( valid , relation , & tp , buffer , snapshot ) ;
Heap CheckForSerializableConflictOut( valid , relation , & tp , buffer , snapshot ) ;
if ( valid )
* tid = ctid ;
@ -1905,7 +1908,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
* lock " gaps " as index page locks do . So we don ' t need to specify a
* buffer when making the call , which makes for a faster check .
*/
CheckForSerializableConflictIn ( relation , NULL , InvalidBuff er ) ;
CheckForSerializableConflictIn ( relation , NULL , InvalidBlockNumb er ) ;
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION ( ) ;
@ -2159,7 +2162,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
* lock " gaps " as index page locks do . So we don ' t need to specify a
* buffer when making the call , which makes for a faster check .
*/
CheckForSerializableConflictIn ( relation , NULL , InvalidBuff er ) ;
CheckForSerializableConflictIn ( relation , NULL , InvalidBlockNumb er ) ;
ndone = 0 ;
while ( ndone < ntuples )
@ -2350,7 +2353,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
* lock " gaps " as index page locks do . So we don ' t need to specify a
* buffer when making the call .
*/
CheckForSerializableConflictIn ( relation , NULL , InvalidBuff er ) ;
CheckForSerializableConflictIn ( relation , NULL , InvalidBlockNumb er ) ;
/*
* If tuples are cachable , mark them for invalidation from the caches in
@ -2664,7 +2667,7 @@ l1:
* being visible to the scan ( i . e . , an exclusive buffer content lock is
* continuously held from this point until the tuple delete is visible ) .
*/
CheckForSerializableConflictIn ( relation , & tp , buffer ) ;
CheckForSerializableConflictIn ( relation , tid , BufferGetBlockNumber ( buffer ) ) ;
/* replace cid with a combo cid if necessary */
HeapTupleHeaderAdjustCmax ( tp . t_data , & cid , & iscombo ) ;
@ -3580,7 +3583,7 @@ l2:
* will include checking the relation level , there is no benefit to a
* separate check for the new tuple .
*/
CheckForSerializableConflictIn ( relation , & oldtup , buffer ) ;
CheckForSerializableConflictIn ( relation , otid , BufferGetBlockNumber ( buffer ) ) ;
/*
* At this point newbuf and buffer are both pinned and locked , and newbuf
@ -9043,3 +9046,93 @@ heap_mask(char *pagedata, BlockNumber blkno)
}
}
}
/*
* HeapCheckForSerializableConflictOut
* We are reading a tuple which has been modified . If it is visible to
* us but has been deleted , that indicates a rw - conflict out . If it ' s
* not visible and was created by a concurrent ( overlapping )
* serializable transaction , that is also a rw - conflict out ,
*
* We will determine the top level xid of the writing transaction with which
* we may be in conflict , and check for overlap with our own transaction .
* If the transactions overlap ( i . e . , they cannot see each other ' s writes ) ,
* then we have a conflict out .
*
* This function should be called just about anywhere in heapam . c where a
* tuple has been read . The caller must hold at least a shared lock on the
* buffer , because this function might set hint bits on the tuple . There is
* currently no known reason to call this function from an index AM .
*/
void
HeapCheckForSerializableConflictOut ( bool visible , Relation relation ,
HeapTuple tuple , Buffer buffer ,
Snapshot snapshot )
{
TransactionId xid ;
HTSV_Result htsvResult ;
if ( ! CheckForSerializableConflictOutNeeded ( relation , snapshot ) )
return ;
/*
* Check to see whether the tuple has been written to by a concurrent
* transaction , either to create it not visible to us , or to delete it
* while it is visible to us . The " visible " bool indicates whether the
* tuple is visible to us , while HeapTupleSatisfiesVacuum checks what else
* is going on with it .
*/
htsvResult = HeapTupleSatisfiesVacuum ( tuple , TransactionXmin , buffer ) ;
switch ( htsvResult )
{
case HEAPTUPLE_LIVE :
if ( visible )
return ;
xid = HeapTupleHeaderGetXmin ( tuple - > t_data ) ;
break ;
case HEAPTUPLE_RECENTLY_DEAD :
if ( ! visible )
return ;
xid = HeapTupleHeaderGetUpdateXid ( tuple - > t_data ) ;
break ;
case HEAPTUPLE_DELETE_IN_PROGRESS :
xid = HeapTupleHeaderGetUpdateXid ( tuple - > t_data ) ;
break ;
case HEAPTUPLE_INSERT_IN_PROGRESS :
xid = HeapTupleHeaderGetXmin ( tuple - > t_data ) ;
break ;
case HEAPTUPLE_DEAD :
return ;
default :
/*
* The only way to get to this default clause is if a new value is
* added to the enum type without adding it to this switch
* statement . That ' s a bug , so elog .
*/
elog ( ERROR , " unrecognized return value from HeapTupleSatisfiesVacuum: %u " , htsvResult ) ;
/*
* In spite of having all enum values covered and calling elog on
* this default , some compilers think this is a code path which
* allows xid to be used below without initialization . Silence
* that warning .
*/
xid = InvalidTransactionId ;
}
Assert ( TransactionIdIsValid ( xid ) ) ;
Assert ( TransactionIdFollowsOrEquals ( xid , TransactionXmin ) ) ;
/*
* Find top level xid . Bail out if xid is too early to be a conflict , or
* if it ' s our own xid .
*/
if ( TransactionIdEquals ( xid , GetTopTransactionIdIfAny ( ) ) )
return ;
xid = SubTransGetTopmostTransaction ( xid ) ;
if ( TransactionIdPrecedes ( xid , TransactionXmin ) )
return ;
return CheckForSerializableConflictOut ( relation , xid , snapshot ) ;
}