|
|
@ -2645,13 +2645,16 @@ l2: |
|
|
|
* visible while we were busy locking the buffer, or during some subsequent |
|
|
|
* visible while we were busy locking the buffer, or during some subsequent |
|
|
|
* window during which we had it unlocked, we'll have to unlock and |
|
|
|
* window during which we had it unlocked, we'll have to unlock and |
|
|
|
* re-lock, to avoid holding the buffer lock across an I/O. That's a bit |
|
|
|
* re-lock, to avoid holding the buffer lock across an I/O. That's a bit |
|
|
|
* unfortunate, but hopefully shouldn't happen often. |
|
|
|
* unfortunate, esepecially since we'll now have to recheck whether the |
|
|
|
|
|
|
|
* tuple has been locked or updated under us, but hopefully it won't |
|
|
|
|
|
|
|
* happen very often. |
|
|
|
*/ |
|
|
|
*/ |
|
|
|
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page)) |
|
|
|
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page)) |
|
|
|
{ |
|
|
|
{ |
|
|
|
LockBuffer(buffer, BUFFER_LOCK_UNLOCK); |
|
|
|
LockBuffer(buffer, BUFFER_LOCK_UNLOCK); |
|
|
|
visibilitymap_pin(relation, block, &vmbuffer); |
|
|
|
visibilitymap_pin(relation, block, &vmbuffer); |
|
|
|
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); |
|
|
|
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); |
|
|
|
|
|
|
|
goto l2; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
/*
|
|
|
|