Code cleanup for heap_freeze_tuple.

It used to be case that lazy vacuum could call this function with only
a shared lock on the buffer, but neither lazy vacuum nor any other
code path does that any more.  Simplify the code accordingly and clean
up some related, obsolete comments.
pull/3/head
Robert Haas 14 years ago
parent e8476f46fc
commit 7386089d23
  1. 49
      src/backend/access/heap/heapam.c
  2. 7
      src/backend/access/heap/rewriteheap.c
  3. 3
      src/backend/commands/vacuumlazy.c
  4. 3
      src/include/access/heapam.h

@ -3947,10 +3947,8 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
* because this function is applied during WAL recovery, when we don't have * because this function is applied during WAL recovery, when we don't have
* access to any such state, and can't depend on the hint bits to be set.) * access to any such state, and can't depend on the hint bits to be set.)
* *
* In lazy VACUUM, we call this while initially holding only a shared lock * If the tuple is in a shared buffer, caller must hold an exclusive lock on
* on the tuple's buffer. If any change is needed, we trade that in for an * that buffer.
* exclusive lock before making the change. Caller should pass the buffer ID
* if shared lock is held, InvalidBuffer if exclusive lock is already held.
* *
* Note: it might seem we could make the changes without exclusive lock, since * Note: it might seem we could make the changes without exclusive lock, since
* TransactionId read/write is assumed atomic anyway. However there is a race * TransactionId read/write is assumed atomic anyway. However there is a race
@ -3962,8 +3960,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
* infomask bits. * infomask bits.
*/ */
bool bool
heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid)
Buffer buf)
{ {
bool changed = false; bool changed = false;
TransactionId xid; TransactionId xid;
@ -3972,13 +3969,6 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
if (TransactionIdIsNormal(xid) && if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid)) TransactionIdPrecedes(xid, cutoff_xid))
{ {
if (buf != InvalidBuffer)
{
/* trade in share lock for exclusive lock */
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
}
HeapTupleHeaderSetXmin(tuple, FrozenTransactionId); HeapTupleHeaderSetXmin(tuple, FrozenTransactionId);
/* /*
@ -3990,28 +3980,12 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
changed = true; changed = true;
} }
/*
* When we release shared lock, it's possible for someone else to change
* xmax before we get the lock back, so repeat the check after acquiring
* exclusive lock. (We don't need this pushup for xmin, because only
* VACUUM could be interested in changing an existing tuple's xmin, and
* there's only one VACUUM allowed on a table at a time.)
*/
recheck_xmax:
if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI)) if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
{ {
xid = HeapTupleHeaderGetXmax(tuple); xid = HeapTupleHeaderGetXmax(tuple);
if (TransactionIdIsNormal(xid) && if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid)) TransactionIdPrecedes(xid, cutoff_xid))
{ {
if (buf != InvalidBuffer)
{
/* trade in share lock for exclusive lock */
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
goto recheck_xmax; /* see comment above */
}
HeapTupleHeaderSetXmax(tuple, InvalidTransactionId); HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
/* /*
@ -4046,30 +4020,15 @@ recheck_xmax:
} }
/* /*
* Although xvac per se could only be set by old-style VACUUM FULL, it
* shares physical storage space with cmax, and so could be wiped out by
* someone setting xmax. Hence recheck after changing lock, same as for
* xmax itself.
*
* Old-style VACUUM FULL is gone, but we have to keep this code as long as * Old-style VACUUM FULL is gone, but we have to keep this code as long as
* we support having MOVED_OFF/MOVED_IN tuples in the database. * we support having MOVED_OFF/MOVED_IN tuples in the database.
*/ */
recheck_xvac:
if (tuple->t_infomask & HEAP_MOVED) if (tuple->t_infomask & HEAP_MOVED)
{ {
xid = HeapTupleHeaderGetXvac(tuple); xid = HeapTupleHeaderGetXvac(tuple);
if (TransactionIdIsNormal(xid) && if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid)) TransactionIdPrecedes(xid, cutoff_xid))
{ {
if (buf != InvalidBuffer)
{
/* trade in share lock for exclusive lock */
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
goto recheck_xvac; /* see comment above */
}
/* /*
* If a MOVED_OFF tuple is not dead, the xvac transaction must * If a MOVED_OFF tuple is not dead, the xvac transaction must
* have failed; whereas a non-dead MOVED_IN tuple must mean the * have failed; whereas a non-dead MOVED_IN tuple must mean the
@ -4711,7 +4670,7 @@ heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
ItemId lp = PageGetItemId(page, *offsets); ItemId lp = PageGetItemId(page, *offsets);
HeapTupleHeader tuple = (HeapTupleHeader) PageGetItem(page, lp); HeapTupleHeader tuple = (HeapTupleHeader) PageGetItem(page, lp);
(void) heap_freeze_tuple(tuple, cutoff_xid, InvalidBuffer); (void) heap_freeze_tuple(tuple, cutoff_xid);
offsets++; offsets++;
} }
} }

@ -335,13 +335,8 @@ rewrite_heap_tuple(RewriteState state,
/* /*
* While we have our hands on the tuple, we may as well freeze any * While we have our hands on the tuple, we may as well freeze any
* very-old xmin or xmax, so that future VACUUM effort can be saved. * very-old xmin or xmax, so that future VACUUM effort can be saved.
*
* Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
* be given a pointer to a tuple in a disk buffer. It happens though that
* we can get the right things to happen by passing InvalidBuffer for the
* buffer.
*/ */
heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, InvalidBuffer); heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid);
/* /*
* Invalid ctid means that ctid should point to the tuple itself. We'll * Invalid ctid means that ctid should point to the tuple itself. We'll

@ -784,8 +784,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* Each non-removable tuple must be checked to see if it needs * Each non-removable tuple must be checked to see if it needs
* freezing. Note we already have exclusive buffer lock. * freezing. Note we already have exclusive buffer lock.
*/ */
if (heap_freeze_tuple(tuple.t_data, FreezeLimit, if (heap_freeze_tuple(tuple.t_data, FreezeLimit))
InvalidBuffer))
frozen[nfrozen++] = offnum; frozen[nfrozen++] = offnum;
} }
} /* scan along page */ } /* scan along page */

@ -111,8 +111,7 @@ extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
TransactionId *update_xmax, CommandId cid, TransactionId *update_xmax, CommandId cid,
LockTupleMode mode, bool nowait); LockTupleMode mode, bool nowait);
extern void heap_inplace_update(Relation relation, HeapTuple tuple); extern void heap_inplace_update(Relation relation, HeapTuple tuple);
extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid);
Buffer buf);
extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
Buffer buf); Buffer buf);

Loading…
Cancel
Save