@ -2121,6 +2121,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
int ndone ;
PGAlignedBlock scratch ;
Page page ;
Buffer vmbuffer = InvalidBuffer ;
bool needwal ;
Size saveFreeSpace ;
bool need_tuple_data = RelationIsLogicallyLogged ( relation ) ;
@ -2175,8 +2176,9 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
while ( ndone < ntuples )
{
Buffer buffer ;
Buffer vmbuffer = InvalidBuffer ;
bool starting_with_empty_page ;
bool all_visible_cleared = false ;
bool all_frozen_set = false ;
int nthispage ;
CHECK_FOR_INTERRUPTS ( ) ;
@ -2184,12 +2186,20 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
/*
* Find buffer where at least the next tuple will fit . If the page is
* all - visible , this will also pin the requisite visibility map page .
*
* Also pin visibility map page if COPY FREEZE inserts tuples into an
* empty page . See all_frozen_set below .
*/
buffer = RelationGetBufferForTuple ( relation , heaptuples [ ndone ] - > t_len ,
InvalidBuffer , options , bistate ,
& vmbuffer , NULL ) ;
page = BufferGetPage ( buffer ) ;
starting_with_empty_page = PageGetMaxOffsetNumber ( page ) = = 0 ;
if ( starting_with_empty_page & & ( options & HEAP_INSERT_FROZEN ) )
all_frozen_set = true ;
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION ( ) ;
@ -2223,7 +2233,14 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
log_heap_new_cid ( relation , heaptup ) ;
}
if ( PageIsAllVisible ( page ) )
/*
* If the page is all visible , need to clear that , unless we ' re only
* going to add further frozen rows to it .
*
* If we ' re only adding already frozen rows to a previously empty
* page , mark it as all - visible .
*/
if ( PageIsAllVisible ( page ) & & ! ( options & HEAP_INSERT_FROZEN ) )
{
all_visible_cleared = true ;
PageClearAllVisible ( page ) ;
@ -2231,6 +2248,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
BufferGetBlockNumber ( buffer ) ,
vmbuffer , VISIBILITYMAP_VALID_BITS ) ;
}
else if ( all_frozen_set )
PageSetAllVisible ( page ) ;
/*
* XXX Should we set PageSetPrunable on this page ? See heap_insert ( )
@ -2254,8 +2273,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
* If the page was previously empty , we can reinit the page
* instead of restoring the whole thing .
*/
init = ( ItemPointerGetOffsetNumber ( & ( heaptuples [ ndone ] - > t_self ) ) = = FirstOffsetNumber & &
PageGetMaxOffsetNumber ( page ) = = FirstOffsetNumber + nthispage - 1 ) ;
init = starting_with_empty_page ;
/* allocate xl_heap_multi_insert struct from the scratch area */
xlrec = ( xl_heap_multi_insert * ) scratchptr ;
@ -2273,7 +2291,15 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
/* the rest of the scratch space is used for tuple data */
tupledata = scratchptr ;
xlrec - > flags = all_visible_cleared ? XLH_INSERT_ALL_VISIBLE_CLEARED : 0 ;
/* check that the mutually exclusive flags are not both set */
Assert ( ! ( all_visible_cleared & & all_frozen_set ) ) ;
xlrec - > flags = 0 ;
if ( all_visible_cleared )
xlrec - > flags = XLH_INSERT_ALL_VISIBLE_CLEARED ;
if ( all_frozen_set )
xlrec - > flags = XLH_INSERT_ALL_FROZEN_SET ;
xlrec - > ntuples = nthispage ;
/*
@ -2347,13 +2373,40 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
END_CRIT_SECTION ( ) ;
UnlockReleaseBuffer ( buffer ) ;
if ( vmbuffer ! = InvalidBuffer )
ReleaseBuffer ( vmbuffer ) ;
/*
* If we ' ve frozen everything on the page , update the visibilitymap .
* We ' re already holding pin on the vmbuffer .
*/
if ( all_frozen_set )
{
Assert ( PageIsAllVisible ( page ) ) ;
Assert ( visibilitymap_pin_ok ( BufferGetBlockNumber ( buffer ) , vmbuffer ) ) ;
/*
* It ' s fine to use InvalidTransactionId here - this is only used
* when HEAP_INSERT_FROZEN is specified , which intentionally
* violates visibility rules .
*/
visibilitymap_set ( relation , BufferGetBlockNumber ( buffer ) , buffer ,
InvalidXLogRecPtr , vmbuffer ,
InvalidTransactionId ,
VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN ) ;
}
UnlockReleaseBuffer ( buffer ) ;
ndone + = nthispage ;
/*
* NB : Only release vmbuffer after inserting all tuples - it ' s fairly
* likely that we ' ll insert into subsequent heap pages that are likely
* to use the same vm page .
*/
}
/* We're done with inserting all tuples, so release the last vmbuffer. */
if ( vmbuffer ! = InvalidBuffer )
ReleaseBuffer ( vmbuffer ) ;
/*
* We ' re done with the actual inserts . Check for conflicts again , to
* ensure that all rw - conflicts in to these inserts are detected . Without
@ -8725,6 +8778,10 @@ heap_xlog_insert(XLogReaderState *record)
if ( xlrec - > flags & XLH_INSERT_ALL_VISIBLE_CLEARED )
PageClearAllVisible ( page ) ;
/* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
if ( xlrec - > flags & XLH_INSERT_ALL_FROZEN_SET )
PageSetAllVisible ( page ) ;
MarkBufferDirty ( buffer ) ;
}
if ( BufferIsValid ( buffer ) )
@ -8775,6 +8832,10 @@ heap_xlog_multi_insert(XLogReaderState *record)
XLogRecGetBlockTag ( record , 0 , & rnode , NULL , & blkno ) ;
/* check that the mutually exclusive flags are not both set */
Assert ( ! ( ( xlrec - > flags & XLH_INSERT_ALL_VISIBLE_CLEARED ) & &
( xlrec - > flags & XLH_INSERT_ALL_FROZEN_SET ) ) ) ;
/*
* The visibility map may need to be fixed even if the heap page is
* already up - to - date .