Split heap_page_prune_and_freeze() into helpers

Refactor the setup and planning phases of pruning and freezing into
helpers. This streamlines heap_page_prune_and_freeze() and makes it more
clear when the examination of tuples ends and page modifications begin.

No code change beyond what was required to extract the code into helper
functions.

Author: Melanie Plageman <melanieplageman@gmail.com>
Reviewed-by: Andres Freund <andres@anarazel.de>
Reviewed-by: Chao Li <li.evan.chao@gmail.com>
Discussion: https://postgr.es/m/mhf4vkmh3j57zx7vuxp4jagtdzwhu3573pgfpmnjwqa6i6yj5y%40sy4ymcdtdklo
pull/255/head
Melanie Plageman 3 weeks ago
parent 9446f918ac
commit e135e04457
  1. 559
      src/backend/access/heap/pruneheap.c

@ -157,6 +157,14 @@ typedef struct
} PruneState; } PruneState;
/* Local functions */ /* Local functions */
static void prune_freeze_setup(PruneFreezeParams *params,
TransactionId new_relfrozen_xid,
MultiXactId new_relmin_mxid,
const PruneFreezeResult *presult,
PruneState *prstate);
static void prune_freeze_plan(Oid reloid, Buffer buffer,
PruneState *prstate,
OffsetNumber *off_loc);
static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate, static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate,
HeapTuple tup, HeapTuple tup,
Buffer buffer); Buffer buffer);
@ -312,203 +320,25 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
} }
/* /*
* Decide whether to proceed with freezing according to the freeze plans * Helper for heap_page_prune_and_freeze() to initialize the PruneState using
* prepared for the given heap buffer. If freezing is chosen, this function * the provided parameters.
* performs several pre-freeze checks.
*
* The values of do_prune, do_hint_prune, and did_tuple_hint_fpi must be
* determined before calling this function.
*
* prstate is both an input and output parameter.
*
* Returns true if we should apply the freeze plans and freeze tuples on the
* page, and false otherwise.
*/
static bool
heap_page_will_freeze(Relation relation, Buffer buffer,
bool did_tuple_hint_fpi,
bool do_prune,
bool do_hint_prune,
PruneState *prstate)
{
bool do_freeze = false;
/*
* If the caller specified we should not attempt to freeze any tuples,
* validate that everything is in the right state and return.
*/
if (!prstate->attempt_freeze)
{
Assert(!prstate->all_frozen && prstate->nfrozen == 0);
Assert(prstate->lpdead_items == 0 || !prstate->all_visible);
return false;
}
if (prstate->pagefrz.freeze_required)
{
/*
* heap_prepare_freeze_tuple indicated that at least one XID/MXID from
* before FreezeLimit/MultiXactCutoff is present. Must freeze to
* advance relfrozenxid/relminmxid.
*/
do_freeze = true;
}
else
{
/*
* Opportunistically freeze the page if we are generating an FPI
* anyway and if doing so means that we can set the page all-frozen
* afterwards (might not happen until VACUUM's final heap pass).
*
* XXX: Previously, we knew if pruning emitted an FPI by checking
* pgWalUsage.wal_fpi before and after pruning. Once the freeze and
* prune records were combined, this heuristic couldn't be used
* anymore. The opportunistic freeze heuristic must be improved;
* however, for now, try to approximate the old logic.
*/
if (prstate->all_frozen && prstate->nfrozen > 0)
{
Assert(prstate->all_visible);
/*
* Freezing would make the page all-frozen. Have already emitted
* an FPI or will do so anyway?
*/
if (RelationNeedsWAL(relation))
{
if (did_tuple_hint_fpi)
do_freeze = true;
else if (do_prune)
{
if (XLogCheckBufferNeedsBackup(buffer))
do_freeze = true;
}
else if (do_hint_prune)
{
if (XLogHintBitIsNeeded() && XLogCheckBufferNeedsBackup(buffer))
do_freeze = true;
}
}
}
}
if (do_freeze)
{
/*
* Validate the tuples we will be freezing before entering the
* critical section.
*/
heap_pre_freeze_checks(buffer, prstate->frozen, prstate->nfrozen);
/*
* Calculate what the snapshot conflict horizon should be for a record
* freezing tuples. We can use the visibility_cutoff_xid as our cutoff
* for conflicts when the whole page is eligible to become all-frozen
* in the VM once we're done with it. Otherwise, we generate a
* conservative cutoff by stepping back from OldestXmin.
*/
if (prstate->all_frozen)
prstate->frz_conflict_horizon = prstate->visibility_cutoff_xid;
else
{
/* Avoids false conflicts when hot_standby_feedback in use */
prstate->frz_conflict_horizon = prstate->cutoffs->OldestXmin;
TransactionIdRetreat(prstate->frz_conflict_horizon);
}
}
else if (prstate->nfrozen > 0)
{
/*
* The page contained some tuples that were not already frozen, and we
* chose not to freeze them now. The page won't be all-frozen then.
*/
Assert(!prstate->pagefrz.freeze_required);
prstate->all_frozen = false;
prstate->nfrozen = 0; /* avoid miscounts in instrumentation */
}
else
{
/*
* We have no freeze plans to execute. The page might already be
* all-frozen (perhaps only following pruning), though. Such pages
* can be marked all-frozen in the VM by our caller, even though none
* of its tuples were newly frozen here.
*/
}
return do_freeze;
}
/*
* Prune and repair fragmentation and potentially freeze tuples on the
* specified page.
*
* Caller must have pin and buffer cleanup lock on the page. Note that we
* don't update the FSM information for page on caller's behalf. Caller might
* also need to account for a reduction in the length of the line pointer
* array following array truncation by us.
*
* params contains the input parameters used to control freezing and pruning
* behavior. See the definition of PruneFreezeParams for more on what each
* parameter does.
*
* If the HEAP_PAGE_PRUNE_FREEZE option is set in params, we will freeze
* tuples if it's required in order to advance relfrozenxid / relminmxid, or
* if it's considered advantageous for overall system performance to do so
* now. The 'params.cutoffs', 'presult', 'new_relfrozen_xid' and
* 'new_relmin_mxid' arguments are required when freezing. When
* HEAP_PAGE_PRUNE_FREEZE option is passed, we also set presult->all_visible
* and presult->all_frozen after determining whether or not to
* opportunistically freeze, to indicate if the VM bits can be set. They are
* always set to false when the HEAP_PAGE_PRUNE_FREEZE option is not passed,
* because at the moment only callers that also freeze need that information.
*
* presult contains output parameters needed by callers, such as the number of
* tuples removed and the offsets of dead items on the page after pruning.
* heap_page_prune_and_freeze() is responsible for initializing it. Required
* by all callers.
*
* off_loc is the offset location required by the caller to use in error
* callback.
*
* new_relfrozen_xid and new_relmin_mxid must provided by the caller if the
* HEAP_PAGE_PRUNE_FREEZE option is set in params. On entry, they contain the
* oldest XID and multi-XID seen on the relation so far. They will be updated
* with oldest values present on the page after pruning. After processing the
* whole relation, VACUUM can use these values as the new
* relfrozenxid/relminmxid for the relation.
*/ */
void static void
heap_page_prune_and_freeze(PruneFreezeParams *params, prune_freeze_setup(PruneFreezeParams *params,
PruneFreezeResult *presult, TransactionId new_relfrozen_xid,
OffsetNumber *off_loc, MultiXactId new_relmin_mxid,
TransactionId *new_relfrozen_xid, const PruneFreezeResult *presult,
MultiXactId *new_relmin_mxid) PruneState *prstate)
{ {
Buffer buffer = params->buffer;
Page page = BufferGetPage(buffer);
BlockNumber blockno = BufferGetBlockNumber(buffer);
OffsetNumber offnum,
maxoff;
PruneState prstate;
HeapTupleData tup;
bool do_freeze;
bool do_prune;
bool do_hint_prune;
bool did_tuple_hint_fpi;
int64 fpi_before = pgWalUsage.wal_fpi;
/* Copy parameters to prstate */ /* Copy parameters to prstate */
prstate.vistest = params->vistest; prstate->vistest = params->vistest;
prstate.mark_unused_now = prstate->mark_unused_now =
(params->options & HEAP_PAGE_PRUNE_MARK_UNUSED_NOW) != 0; (params->options & HEAP_PAGE_PRUNE_MARK_UNUSED_NOW) != 0;
/* cutoffs must be provided if we will attempt freezing */ /* cutoffs must be provided if we will attempt freezing */
Assert(!(params->options & HEAP_PAGE_PRUNE_FREEZE) || params->cutoffs); Assert(!(params->options & HEAP_PAGE_PRUNE_FREEZE) || params->cutoffs);
prstate.attempt_freeze = (params->options & HEAP_PAGE_PRUNE_FREEZE) != 0; prstate->attempt_freeze = (params->options & HEAP_PAGE_PRUNE_FREEZE) != 0;
prstate.cutoffs = params->cutoffs; prstate->cutoffs = params->cutoffs;
/* /*
* Our strategy is to scan the page and make lists of items to change, * Our strategy is to scan the page and make lists of items to change,
@ -521,41 +351,42 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* prunable, we will save the lowest relevant XID in new_prune_xid. Also * prunable, we will save the lowest relevant XID in new_prune_xid. Also
* initialize the rest of our working state. * initialize the rest of our working state.
*/ */
prstate.new_prune_xid = InvalidTransactionId; prstate->new_prune_xid = InvalidTransactionId;
prstate.latest_xid_removed = InvalidTransactionId; prstate->latest_xid_removed = InvalidTransactionId;
prstate.nredirected = prstate.ndead = prstate.nunused = prstate.nfrozen = 0; prstate->nredirected = prstate->ndead = prstate->nunused = 0;
prstate.nroot_items = 0; prstate->nfrozen = 0;
prstate.nheaponly_items = 0; prstate->nroot_items = 0;
prstate->nheaponly_items = 0;
/* initialize page freezing working state */ /* initialize page freezing working state */
prstate.pagefrz.freeze_required = false; prstate->pagefrz.freeze_required = false;
if (prstate.attempt_freeze) if (prstate->attempt_freeze)
{ {
Assert(new_relfrozen_xid && new_relmin_mxid); prstate->pagefrz.FreezePageRelfrozenXid = new_relfrozen_xid;
prstate.pagefrz.FreezePageRelfrozenXid = *new_relfrozen_xid; prstate->pagefrz.NoFreezePageRelfrozenXid = new_relfrozen_xid;
prstate.pagefrz.NoFreezePageRelfrozenXid = *new_relfrozen_xid; prstate->pagefrz.FreezePageRelminMxid = new_relmin_mxid;
prstate.pagefrz.FreezePageRelminMxid = *new_relmin_mxid; prstate->pagefrz.NoFreezePageRelminMxid = new_relmin_mxid;
prstate.pagefrz.NoFreezePageRelminMxid = *new_relmin_mxid;
} }
else else
{ {
Assert(new_relfrozen_xid == NULL && new_relmin_mxid == NULL); Assert(new_relfrozen_xid == InvalidTransactionId &&
prstate.pagefrz.FreezePageRelminMxid = InvalidMultiXactId; new_relmin_mxid == InvalidMultiXactId);
prstate.pagefrz.NoFreezePageRelminMxid = InvalidMultiXactId; prstate->pagefrz.FreezePageRelminMxid = InvalidMultiXactId;
prstate.pagefrz.FreezePageRelfrozenXid = InvalidTransactionId; prstate->pagefrz.NoFreezePageRelminMxid = InvalidMultiXactId;
prstate.pagefrz.NoFreezePageRelfrozenXid = InvalidTransactionId; prstate->pagefrz.FreezePageRelfrozenXid = InvalidTransactionId;
prstate->pagefrz.NoFreezePageRelfrozenXid = InvalidTransactionId;
} }
prstate.ndeleted = 0; prstate->ndeleted = 0;
prstate.live_tuples = 0; prstate->live_tuples = 0;
prstate.recently_dead_tuples = 0; prstate->recently_dead_tuples = 0;
prstate.hastup = false; prstate->hastup = false;
prstate.lpdead_items = 0; prstate->lpdead_items = 0;
prstate.deadoffsets = presult->deadoffsets; prstate->deadoffsets = (OffsetNumber *) presult->deadoffsets;
prstate.frz_conflict_horizon = InvalidTransactionId; prstate->frz_conflict_horizon = InvalidTransactionId;
/* /*
* Caller may update the VM after we're done. We can keep track of * Vacuum may update the VM after we're done. We can keep track of
* whether the page will be all-visible and all-frozen after pruning and * whether the page will be all-visible and all-frozen after pruning and
* freezing to help the caller to do that. * freezing to help the caller to do that.
* *
@ -578,10 +409,10 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* all_frozen before we return them to the caller, so that the caller * all_frozen before we return them to the caller, so that the caller
* doesn't set the VM bits incorrectly. * doesn't set the VM bits incorrectly.
*/ */
if (prstate.attempt_freeze) if (prstate->attempt_freeze)
{ {
prstate.all_visible = true; prstate->all_visible = true;
prstate.all_frozen = true; prstate->all_frozen = true;
} }
else else
{ {
@ -589,8 +420,8 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* Initializing to false allows skipping the work to update them in * Initializing to false allows skipping the work to update them in
* heap_prune_record_unchanged_lp_normal(). * heap_prune_record_unchanged_lp_normal().
*/ */
prstate.all_visible = false; prstate->all_visible = false;
prstate.all_frozen = false; prstate->all_frozen = false;
} }
/* /*
@ -601,10 +432,29 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* running transaction on the standby does not see tuples on the page as * running transaction on the standby does not see tuples on the page as
* all-visible, so the conflict horizon remains InvalidTransactionId. * all-visible, so the conflict horizon remains InvalidTransactionId.
*/ */
prstate.visibility_cutoff_xid = InvalidTransactionId; prstate->visibility_cutoff_xid = InvalidTransactionId;
}
maxoff = PageGetMaxOffsetNumber(page); /*
tup.t_tableOid = RelationGetRelid(params->relation); * Helper for heap_page_prune_and_freeze(). Iterates over every tuple on the
* page, examines its visibility information, and determines the appropriate
* action for each tuple. All tuples are processed and classified during this
* phase, but no modifications are made to the page until the later execution
* stage.
*
* *off_loc is used for error callback and cleared before returning.
*/
static void
prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate,
OffsetNumber *off_loc)
{
Page page = BufferGetPage(buffer);
BlockNumber blockno = BufferGetBlockNumber(buffer);
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
OffsetNumber offnum;
HeapTupleData tup;
tup.t_tableOid = reloid;
/* /*
* Determine HTSV for all tuples, and queue them up for processing as HOT * Determine HTSV for all tuples, and queue them up for processing as HOT
@ -639,13 +489,13 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
*/ */
*off_loc = offnum; *off_loc = offnum;
prstate.processed[offnum] = false; prstate->processed[offnum] = false;
prstate.htsv[offnum] = -1; prstate->htsv[offnum] = -1;
/* Nothing to do if slot doesn't contain a tuple */ /* Nothing to do if slot doesn't contain a tuple */
if (!ItemIdIsUsed(itemid)) if (!ItemIdIsUsed(itemid))
{ {
heap_prune_record_unchanged_lp_unused(page, &prstate, offnum); heap_prune_record_unchanged_lp_unused(page, prstate, offnum);
continue; continue;
} }
@ -655,17 +505,17 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* If the caller set mark_unused_now true, we can set dead line * If the caller set mark_unused_now true, we can set dead line
* pointers LP_UNUSED now. * pointers LP_UNUSED now.
*/ */
if (unlikely(prstate.mark_unused_now)) if (unlikely(prstate->mark_unused_now))
heap_prune_record_unused(&prstate, offnum, false); heap_prune_record_unused(prstate, offnum, false);
else else
heap_prune_record_unchanged_lp_dead(page, &prstate, offnum); heap_prune_record_unchanged_lp_dead(page, prstate, offnum);
continue; continue;
} }
if (ItemIdIsRedirected(itemid)) if (ItemIdIsRedirected(itemid))
{ {
/* This is the start of a HOT chain */ /* This is the start of a HOT chain */
prstate.root_items[prstate.nroot_items++] = offnum; prstate->root_items[prstate->nroot_items++] = offnum;
continue; continue;
} }
@ -679,21 +529,15 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
tup.t_len = ItemIdGetLength(itemid); tup.t_len = ItemIdGetLength(itemid);
ItemPointerSet(&tup.t_self, blockno, offnum); ItemPointerSet(&tup.t_self, blockno, offnum);
prstate.htsv[offnum] = heap_prune_satisfies_vacuum(&prstate, &tup, prstate->htsv[offnum] = heap_prune_satisfies_vacuum(prstate, &tup,
buffer); buffer);
if (!HeapTupleHeaderIsHeapOnly(htup)) if (!HeapTupleHeaderIsHeapOnly(htup))
prstate.root_items[prstate.nroot_items++] = offnum; prstate->root_items[prstate->nroot_items++] = offnum;
else else
prstate.heaponly_items[prstate.nheaponly_items++] = offnum; prstate->heaponly_items[prstate->nheaponly_items++] = offnum;
} }
/*
* If checksums are enabled, heap_prune_satisfies_vacuum() may have caused
* an FPI to be emitted.
*/
did_tuple_hint_fpi = fpi_before != pgWalUsage.wal_fpi;
/* /*
* Process HOT chains. * Process HOT chains.
* *
@ -705,30 +549,30 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* the page instead of using the root_items array, also did it in * the page instead of using the root_items array, also did it in
* ascending offset number order.) * ascending offset number order.)
*/ */
for (int i = prstate.nroot_items - 1; i >= 0; i--) for (int i = prstate->nroot_items - 1; i >= 0; i--)
{ {
offnum = prstate.root_items[i]; offnum = prstate->root_items[i];
/* Ignore items already processed as part of an earlier chain */ /* Ignore items already processed as part of an earlier chain */
if (prstate.processed[offnum]) if (prstate->processed[offnum])
continue; continue;
/* see preceding loop */ /* see preceding loop */
*off_loc = offnum; *off_loc = offnum;
/* Process this item or chain of items */ /* Process this item or chain of items */
heap_prune_chain(page, blockno, maxoff, offnum, &prstate); heap_prune_chain(page, blockno, maxoff, offnum, prstate);
} }
/* /*
* Process any heap-only tuples that were not already processed as part of * Process any heap-only tuples that were not already processed as part of
* a HOT chain. * a HOT chain.
*/ */
for (int i = prstate.nheaponly_items - 1; i >= 0; i--) for (int i = prstate->nheaponly_items - 1; i >= 0; i--)
{ {
offnum = prstate.heaponly_items[i]; offnum = prstate->heaponly_items[i];
if (prstate.processed[offnum]) if (prstate->processed[offnum])
continue; continue;
/* see preceding loop */ /* see preceding loop */
@ -747,7 +591,7 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* return true for an XMIN_INVALID tuple, so this code will work even * return true for an XMIN_INVALID tuple, so this code will work even
* when there were sequential updates within the aborted transaction.) * when there were sequential updates within the aborted transaction.)
*/ */
if (prstate.htsv[offnum] == HEAPTUPLE_DEAD) if (prstate->htsv[offnum] == HEAPTUPLE_DEAD)
{ {
ItemId itemid = PageGetItemId(page, offnum); ItemId itemid = PageGetItemId(page, offnum);
HeapTupleHeader htup = (HeapTupleHeader) PageGetItem(page, itemid); HeapTupleHeader htup = (HeapTupleHeader) PageGetItem(page, itemid);
@ -755,8 +599,8 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
if (likely(!HeapTupleHeaderIsHotUpdated(htup))) if (likely(!HeapTupleHeaderIsHotUpdated(htup)))
{ {
HeapTupleHeaderAdvanceConflictHorizon(htup, HeapTupleHeaderAdvanceConflictHorizon(htup,
&prstate.latest_xid_removed); &prstate->latest_xid_removed);
heap_prune_record_unused(&prstate, offnum, true); heap_prune_record_unused(prstate, offnum, true);
} }
else else
{ {
@ -773,7 +617,7 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
} }
} }
else else
heap_prune_record_unchanged_lp_normal(page, &prstate, offnum); heap_prune_record_unchanged_lp_normal(page, prstate, offnum);
} }
/* We should now have processed every tuple exactly once */ /* We should now have processed every tuple exactly once */
@ -784,12 +628,223 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
{ {
*off_loc = offnum; *off_loc = offnum;
Assert(prstate.processed[offnum]); Assert(prstate->processed[offnum]);
} }
#endif #endif
/* Clear the offset information once we have processed the given page. */ /* Clear the offset information once we have processed the given page. */
*off_loc = InvalidOffsetNumber; *off_loc = InvalidOffsetNumber;
}
/*
* Decide whether to proceed with freezing according to the freeze plans
* prepared for the given heap buffer. If freezing is chosen, this function
* performs several pre-freeze checks.
*
* The values of do_prune, do_hint_prune, and did_tuple_hint_fpi must be
* determined before calling this function.
*
* prstate is both an input and output parameter.
*
* Returns true if we should apply the freeze plans and freeze tuples on the
* page, and false otherwise.
*/
static bool
heap_page_will_freeze(Relation relation, Buffer buffer,
bool did_tuple_hint_fpi,
bool do_prune,
bool do_hint_prune,
PruneState *prstate)
{
bool do_freeze = false;
/*
* If the caller specified we should not attempt to freeze any tuples,
* validate that everything is in the right state and return.
*/
if (!prstate->attempt_freeze)
{
Assert(!prstate->all_frozen && prstate->nfrozen == 0);
Assert(prstate->lpdead_items == 0 || !prstate->all_visible);
return false;
}
if (prstate->pagefrz.freeze_required)
{
/*
* heap_prepare_freeze_tuple indicated that at least one XID/MXID from
* before FreezeLimit/MultiXactCutoff is present. Must freeze to
* advance relfrozenxid/relminmxid.
*/
do_freeze = true;
}
else
{
/*
* Opportunistically freeze the page if we are generating an FPI
* anyway and if doing so means that we can set the page all-frozen
* afterwards (might not happen until VACUUM's final heap pass).
*
* XXX: Previously, we knew if pruning emitted an FPI by checking
* pgWalUsage.wal_fpi before and after pruning. Once the freeze and
* prune records were combined, this heuristic couldn't be used
* anymore. The opportunistic freeze heuristic must be improved;
* however, for now, try to approximate the old logic.
*/
if (prstate->all_frozen && prstate->nfrozen > 0)
{
Assert(prstate->all_visible);
/*
* Freezing would make the page all-frozen. Have already emitted
* an FPI or will do so anyway?
*/
if (RelationNeedsWAL(relation))
{
if (did_tuple_hint_fpi)
do_freeze = true;
else if (do_prune)
{
if (XLogCheckBufferNeedsBackup(buffer))
do_freeze = true;
}
else if (do_hint_prune)
{
if (XLogHintBitIsNeeded() && XLogCheckBufferNeedsBackup(buffer))
do_freeze = true;
}
}
}
}
if (do_freeze)
{
/*
* Validate the tuples we will be freezing before entering the
* critical section.
*/
heap_pre_freeze_checks(buffer, prstate->frozen, prstate->nfrozen);
/*
* Calculate what the snapshot conflict horizon should be for a record
* freezing tuples. We can use the visibility_cutoff_xid as our cutoff
* for conflicts when the whole page is eligible to become all-frozen
* in the VM once we're done with it. Otherwise, we generate a
* conservative cutoff by stepping back from OldestXmin.
*/
if (prstate->all_frozen)
prstate->frz_conflict_horizon = prstate->visibility_cutoff_xid;
else
{
/* Avoids false conflicts when hot_standby_feedback in use */
prstate->frz_conflict_horizon = prstate->cutoffs->OldestXmin;
TransactionIdRetreat(prstate->frz_conflict_horizon);
}
}
else if (prstate->nfrozen > 0)
{
/*
* The page contained some tuples that were not already frozen, and we
* chose not to freeze them now. The page won't be all-frozen then.
*/
Assert(!prstate->pagefrz.freeze_required);
prstate->all_frozen = false;
prstate->nfrozen = 0; /* avoid miscounts in instrumentation */
}
else
{
/*
* We have no freeze plans to execute. The page might already be
* all-frozen (perhaps only following pruning), though. Such pages
* can be marked all-frozen in the VM by our caller, even though none
* of its tuples were newly frozen here.
*/
}
return do_freeze;
}
/*
* Prune and repair fragmentation and potentially freeze tuples on the
* specified page.
*
* Caller must have pin and buffer cleanup lock on the page. Note that we
* don't update the FSM information for page on caller's behalf. Caller might
* also need to account for a reduction in the length of the line pointer
* array following array truncation by us.
*
* params contains the input parameters used to control freezing and pruning
* behavior. See the definition of PruneFreezeParams for more on what each
* parameter does.
*
* If the HEAP_PAGE_PRUNE_FREEZE option is set in params, we will freeze
* tuples if it's required in order to advance relfrozenxid / relminmxid, or
* if it's considered advantageous for overall system performance to do so
* now. The 'params.cutoffs', 'presult', 'new_relfrozen_xid' and
* 'new_relmin_mxid' arguments are required when freezing. When
* HEAP_PAGE_PRUNE_FREEZE option is passed, we also set presult->all_visible
* and presult->all_frozen after determining whether or not to
* opportunistically freeze, to indicate if the VM bits can be set. They are
* always set to false when the HEAP_PAGE_PRUNE_FREEZE option is not passed,
* because at the moment only callers that also freeze need that information.
*
* presult contains output parameters needed by callers, such as the number of
* tuples removed and the offsets of dead items on the page after pruning.
* heap_page_prune_and_freeze() is responsible for initializing it. Required
* by all callers.
*
* off_loc is the offset location required by the caller to use in error
* callback.
*
* new_relfrozen_xid and new_relmin_mxid must be provided by the caller if the
* HEAP_PAGE_PRUNE_FREEZE option is set in params. On entry, they contain the
* oldest XID and multi-XID seen on the relation so far. They will be updated
* with the oldest values present on the page after pruning. After processing
* the whole relation, VACUUM can use these values as the new
* relfrozenxid/relminmxid for the relation.
*/
void
heap_page_prune_and_freeze(PruneFreezeParams *params,
PruneFreezeResult *presult,
OffsetNumber *off_loc,
TransactionId *new_relfrozen_xid,
MultiXactId *new_relmin_mxid)
{
Buffer buffer = params->buffer;
Page page = BufferGetPage(buffer);
PruneState prstate;
bool do_freeze;
bool do_prune;
bool do_hint_prune;
bool did_tuple_hint_fpi;
int64 fpi_before = pgWalUsage.wal_fpi;
/* Initialize prstate */
prune_freeze_setup(params,
new_relfrozen_xid ?
*new_relfrozen_xid : InvalidTransactionId,
new_relmin_mxid ?
*new_relmin_mxid : InvalidMultiXactId,
presult,
&prstate);
/*
* Examine all line pointers and tuple visibility information to determine
* which line pointers should change state and which tuples may be frozen.
* Prepare queue of state changes to later be executed in a critical
* section.
*/
prune_freeze_plan(RelationGetRelid(params->relation),
buffer, &prstate, off_loc);
/*
* If checksums are enabled, calling heap_prune_satisfies_vacuum() while
* checking tuple visibility information in prune_freeze_plan() may have
* caused an FPI to be emitted.
*/
did_tuple_hint_fpi = fpi_before != pgWalUsage.wal_fpi;
do_prune = prstate.nredirected > 0 || do_prune = prstate.nredirected > 0 ||
prstate.ndead > 0 || prstate.ndead > 0 ||

Loading…
Cancel
Save