@ -1220,16 +1220,13 @@ begin:;
*/
if ( StartPos / XLOG_BLCKSZ ! = EndPos / XLOG_BLCKSZ )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
/* advance global request to include new block(s) */
if ( xlogc tl- > LogwrtRqst . Write < EndPos )
xlogc tl- > LogwrtRqst . Write = EndPos ;
if ( XLogCtl - > LogwrtRqst . Write < EndPos )
XLogCtl - > LogwrtRqst . Write = EndPos ;
/* update local result copy while I have the chance */
LogwrtResult = xlogc tl- > LogwrtResult ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
LogwrtResult = XLogC tl- > LogwrtResult ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
}
/*
@ -1324,7 +1321,7 @@ static void
ReserveXLogInsertLocation ( int size , XLogRecPtr * StartPos , XLogRecPtr * EndPos ,
XLogRecPtr * PrevPtr )
{
volatile XLogCtlInsert * Insert = & XLogCtl - > Insert ;
XLogCtlInsert * Insert = & XLogCtl - > Insert ;
uint64 startbytepos ;
uint64 endbytepos ;
uint64 prevbytepos ;
@ -1379,7 +1376,7 @@ ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos,
static bool
ReserveXLogSwitch ( XLogRecPtr * StartPos , XLogRecPtr * EndPos , XLogRecPtr * PrevPtr )
{
volatile XLogCtlInsert * Insert = & XLogCtl - > Insert ;
XLogCtlInsert * Insert = & XLogCtl - > Insert ;
uint64 startbytepos ;
uint64 endbytepos ;
uint64 prevbytepos ;
@ -1697,7 +1694,7 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto)
uint64 bytepos ;
XLogRecPtr reservedUpto ;
XLogRecPtr finishedUpto ;
volatile XLogCtlInsert * Insert = & XLogCtl - > Insert ;
XLogCtlInsert * Insert = & XLogCtl - > Insert ;
int i ;
if ( MyProc = = NULL )
@ -2132,16 +2129,11 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
break ;
/* Before waiting, get info_lck and update LogwrtResult */
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
if ( xlogctl - > LogwrtRqst . Write < OldPageRqstPtr )
xlogctl - > LogwrtRqst . Write = OldPageRqstPtr ;
LogwrtResult = xlogctl - > LogwrtResult ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
}
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
if ( XLogCtl - > LogwrtRqst . Write < OldPageRqstPtr )
XLogCtl - > LogwrtRqst . Write = OldPageRqstPtr ;
LogwrtResult = XLogCtl - > LogwrtResult ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
/*
* Now that we have an up - to - date LogwrtResult value , see if we
@ -2549,16 +2541,13 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
* code in a couple of places .
*/
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > LogwrtResult = LogwrtResult ;
if ( xlogctl - > LogwrtRqst . Write < LogwrtResult . Write )
xlogctl - > LogwrtRqst . Write = LogwrtResult . Write ;
if ( xlogctl - > LogwrtRqst . Flush < LogwrtResult . Flush )
xlogctl - > LogwrtRqst . Flush = LogwrtResult . Flush ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > LogwrtResult = LogwrtResult ;
if ( XLogCtl - > LogwrtRqst . Write < LogwrtResult . Write )
XLogCtl - > LogwrtRqst . Write = LogwrtResult . Write ;
if ( XLogCtl - > LogwrtRqst . Flush < LogwrtResult . Flush )
XLogCtl - > LogwrtRqst . Flush = LogwrtResult . Flush ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
}
}
@ -2573,15 +2562,12 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
XLogRecPtr WriteRqstPtr = asyncXactLSN ;
bool sleeping ;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
LogwrtResult = xlogctl - > LogwrtResult ;
sleeping = xlogctl - > WalWriterSleeping ;
if ( xlogctl - > asyncXactLSN < asyncXactLSN )
xlogctl - > asyncXactLSN = asyncXactLSN ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
LogwrtResult = XLogCtl - > LogwrtResult ;
sleeping = XLogCtl - > WalWriterSleeping ;
if ( XLogCtl - > asyncXactLSN < asyncXactLSN )
XLogCtl - > asyncXactLSN = asyncXactLSN ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
/*
* If the WALWriter is sleeping , we should kick it to make it come out of
@ -2614,12 +2600,9 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
void
XLogSetReplicationSlotMinimumLSN ( XLogRecPtr lsn )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > replicationSlotMinLSN = lsn ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > replicationSlotMinLSN = lsn ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
}
@ -2630,13 +2613,11 @@ XLogSetReplicationSlotMinimumLSN(XLogRecPtr lsn)
static XLogRecPtr
XLogGetReplicationSlotMinimumLSN ( void )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogRecPtr retval ;
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
retval = xlogc tl- > replicationSlotMinLSN ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
retval = XLogC tl- > replicationSlotMinLSN ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
return retval ;
}
@ -2672,8 +2653,6 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
updateMinRecoveryPoint = false ;
else if ( force | | minRecoveryPoint < lsn )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogRecPtr newMinRecoveryPoint ;
TimeLineID newMinRecoveryPointTLI ;
@ -2690,10 +2669,10 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
* all . Instead , we just log a warning and continue with recovery .
* ( See also the comments about corrupt LSNs in XLogFlush . )
*/
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
newMinRecoveryPoint = xlogc tl- > replayEndRecPtr ;
newMinRecoveryPointTLI = xlogc tl- > replayEndTLI ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
newMinRecoveryPoint = XLogC tl- > replayEndRecPtr ;
newMinRecoveryPointTLI = XLogC tl- > replayEndTLI ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
if ( ! force & & newMinRecoveryPoint < lsn )
elog ( WARNING ,
@ -2777,16 +2756,14 @@ XLogFlush(XLogRecPtr record)
*/
for ( ; ; )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogRecPtr insertpos ;
/* read LogwrtResult and update local state */
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
if ( WriteRqstPtr < xlogc tl- > LogwrtRqst . Write )
WriteRqstPtr = xlogc tl- > LogwrtRqst . Write ;
LogwrtResult = xlogc tl- > LogwrtResult ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
if ( WriteRqstPtr < XLogC tl- > LogwrtRqst . Write )
WriteRqstPtr = XLogC tl- > LogwrtRqst . Write ;
LogwrtResult = XLogC tl- > LogwrtResult ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
/* done already? */
if ( record < = LogwrtResult . Flush )
@ -2923,15 +2900,10 @@ XLogBackgroundFlush(void)
return false ;
/* read LogwrtResult and update local state */
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
LogwrtResult = xlogctl - > LogwrtResult ;
WriteRqstPtr = xlogctl - > LogwrtRqst . Write ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
}
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
LogwrtResult = XLogCtl - > LogwrtResult ;
WriteRqstPtr = XLogCtl - > LogwrtRqst . Write ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
/* back off to last completed page boundary */
WriteRqstPtr - = WriteRqstPtr % XLOG_BLCKSZ ;
@ -2939,12 +2911,9 @@ XLogBackgroundFlush(void)
/* if we have already flushed that far, consider async commit records */
if ( WriteRqstPtr < = LogwrtResult . Flush )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
WriteRqstPtr = xlogctl - > asyncXactLSN ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
WriteRqstPtr = XLogCtl - > asyncXactLSN ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
flexible = false ; /* ensure it all gets written */
}
@ -3055,14 +3024,9 @@ XLogNeedsFlush(XLogRecPtr record)
return false ;
/* read LogwrtResult and update local state */
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
LogwrtResult = xlogctl - > LogwrtResult ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
}
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
LogwrtResult = XLogCtl - > LogwrtResult ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
/* check again */
if ( record < = LogwrtResult . Flush )
@ -3684,13 +3648,11 @@ PreallocXlogFiles(XLogRecPtr endptr)
void
CheckXLogRemoved ( XLogSegNo segno , TimeLineID tli )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogSegNo lastRemovedSegNo ;
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
lastRemovedSegNo = xlogc tl- > lastRemovedSegNo ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
lastRemovedSegNo = XLogC tl- > lastRemovedSegNo ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
if ( segno < = lastRemovedSegNo )
{
@ -3714,13 +3676,11 @@ CheckXLogRemoved(XLogSegNo segno, TimeLineID tli)
XLogSegNo
XLogGetLastRemovedSegno ( void )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogSegNo lastRemovedSegNo ;
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
lastRemovedSegNo = xlogc tl- > lastRemovedSegNo ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
lastRemovedSegNo = XLogC tl- > lastRemovedSegNo ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
return lastRemovedSegNo ;
}
@ -3732,17 +3692,15 @@ XLogGetLastRemovedSegno(void)
static void
UpdateLastRemovedPtr ( char * filename )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
uint32 tli ;
XLogSegNo segno ;
XLogFromFileName ( filename , & tli , & segno ) ;
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
if ( segno > xlogc tl- > lastRemovedSegNo )
xlogc tl- > lastRemovedSegNo = segno ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
if ( segno > XLogC tl- > lastRemovedSegNo )
XLogC tl- > lastRemovedSegNo = segno ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
}
/*
@ -4700,13 +4658,10 @@ GetFakeLSNForUnloggedRel(void)
{
XLogRecPtr nextUnloggedLSN ;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
/* increment the unloggedLSN counter, need SpinLock */
SpinLockAcquire ( & xlogc tl- > ulsn_lck ) ;
nextUnloggedLSN = xlogc tl- > unloggedLSN + + ;
SpinLockRelease ( & xlogc tl- > ulsn_lck ) ;
SpinLockAcquire ( & XLogCtl - > ulsn_lck ) ;
nextUnloggedLSN = XLogCtl - > unloggedLSN + + ;
SpinLockRelease ( & XLogC tl- > ulsn_lck ) ;
return nextUnloggedLSN ;
}
@ -5738,13 +5693,11 @@ recoveryPausesHere(void)
bool
RecoveryIsPaused ( void )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
bool recoveryPause ;
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
recoveryPause = xlogc tl- > recoveryPause ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
recoveryPause = XLogC tl- > recoveryPause ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
return recoveryPause ;
}
@ -5752,12 +5705,9 @@ RecoveryIsPaused(void)
void
SetRecoveryPause ( bool recoveryPause )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > recoveryPause = recoveryPause ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > recoveryPause = recoveryPause ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
}
/*
@ -5855,12 +5805,9 @@ recoveryApplyDelay(XLogRecord *record)
static void
SetLatestXTime ( TimestampTz xtime )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > recoveryLastXTime = xtime ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > recoveryLastXTime = xtime ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
}
/*
@ -5869,13 +5816,11 @@ SetLatestXTime(TimestampTz xtime)
TimestampTz
GetLatestXTime ( void )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
TimestampTz xtime ;
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
xtime = xlogc tl- > recoveryLastXTime ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
xtime = XLogC tl- > recoveryLastXTime ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
return xtime ;
}
@ -5889,12 +5834,9 @@ GetLatestXTime(void)
static void
SetCurrentChunkStartTime ( TimestampTz xtime )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > currentChunkStartTime = xtime ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > currentChunkStartTime = xtime ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
}
/*
@ -5904,13 +5846,11 @@ SetCurrentChunkStartTime(TimestampTz xtime)
TimestampTz
GetCurrentChunkReplayStartTime ( void )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
TimestampTz xtime ;
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
xtime = xlogc tl- > currentChunkStartTime ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
xtime = XLogC tl- > currentChunkStartTime ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
return xtime ;
}
@ -6434,9 +6374,6 @@ StartupXLOG(void)
{
int rmid ;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
/*
* Update pg_control to show that we are recovering and to show the
* selected checkpoint as the place we are starting from . We also mark
@ -6623,18 +6560,18 @@ StartupXLOG(void)
* if we had just replayed the record before the REDO location ( or the
* checkpoint record itself , if it ' s a shutdown checkpoint ) .
*/
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
if ( checkPoint . redo < RecPtr )
xlogc tl- > replayEndRecPtr = checkPoint . redo ;
XLogC tl- > replayEndRecPtr = checkPoint . redo ;
else
xlogc tl- > replayEndRecPtr = EndRecPtr ;
xlogc tl- > replayEndTLI = ThisTimeLineID ;
xlogc tl- > lastReplayedEndRecPtr = xlogc tl- > replayEndRecPtr ;
xlogc tl- > lastReplayedTLI = xlogc tl- > replayEndTLI ;
xlogc tl- > recoveryLastXTime = 0 ;
xlogc tl- > currentChunkStartTime = 0 ;
xlogc tl- > recoveryPause = false ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
XLogC tl- > replayEndRecPtr = EndRecPtr ;
XLogC tl- > replayEndTLI = ThisTimeLineID ;
XLogC tl- > lastReplayedEndRecPtr = XLogC tl- > replayEndRecPtr ;
XLogC tl- > lastReplayedTLI = XLogC tl- > replayEndTLI ;
XLogC tl- > recoveryLastXTime = 0 ;
XLogC tl- > currentChunkStartTime = 0 ;
XLogC tl- > recoveryPause = false ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
/* Also ensure XLogReceiptTime has a sane value */
XLogReceiptTime = GetCurrentTimestamp ( ) ;
@ -6733,7 +6670,7 @@ StartupXLOG(void)
* otherwise would is a minor issue , so it doesn ' t seem worth
* adding another spinlock cycle to prevent that .
*/
if ( xlogctl - > recoveryPause )
if ( ( ( volatile XLogCtlData * ) XLogCtl ) - > recoveryPause )
recoveryPausesHere ( ) ;
/*
@ -6758,7 +6695,7 @@ StartupXLOG(void)
* here otherwise pausing during the delay - wait wouldn ' t
* work .
*/
if ( xlogctl - > recoveryPause )
if ( ( ( volatile XLogCtlData * ) XLogCtl ) - > recoveryPause )
recoveryPausesHere ( ) ;
}
@ -6831,10 +6768,10 @@ StartupXLOG(void)
* Update shared replayEndRecPtr before replaying this record ,
* so that XLogFlush will update minRecoveryPoint correctly .
*/
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
xlogc tl- > replayEndRecPtr = EndRecPtr ;
xlogc tl- > replayEndTLI = ThisTimeLineID ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
XLogC tl- > replayEndRecPtr = EndRecPtr ;
XLogC tl- > replayEndTLI = ThisTimeLineID ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
/*
* If we are attempting to enter Hot Standby mode , process
@ -6854,10 +6791,10 @@ StartupXLOG(void)
* Update lastReplayedEndRecPtr after this record has been
* successfully replayed .
*/
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
xlogc tl- > lastReplayedEndRecPtr = EndRecPtr ;
xlogc tl- > lastReplayedTLI = ThisTimeLineID ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
XLogC tl- > lastReplayedEndRecPtr = EndRecPtr ;
XLogC tl- > lastReplayedTLI = ThisTimeLineID ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
/* Remember this record as the last-applied one */
LastRec = ReadRecPtr ;
@ -7267,14 +7204,9 @@ StartupXLOG(void)
* there are no race conditions concerning visibility of other recent
* updates to shared memory . )
*/
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > SharedRecoveryInProgress = false ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
}
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > SharedRecoveryInProgress = false ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
/*
* If there were cascading standby servers connected to us , nudge any wal
@ -7377,12 +7309,9 @@ CheckRecoveryConsistency(void)
reachedConsistency & &
IsUnderPostmaster )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > SharedHotStandbyActive = true ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > SharedHotStandbyActive = true ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
LocalHotStandbyActive = true ;
@ -7467,13 +7396,10 @@ HotStandbyActive(void)
return true ;
else
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
/* spinlock is essential on machines with weak memory ordering! */
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
LocalHotStandbyActive = xlogc tl- > SharedHotStandbyActive ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
LocalHotStandbyActive = XLogCtl - > SharedHotStandbyActive ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
return LocalHotStandbyActive ;
}
@ -7688,8 +7614,6 @@ InitXLOGAccess(void)
XLogRecPtr
GetRedoRecPtr ( void )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogRecPtr ptr ;
/*
@ -7697,9 +7621,9 @@ GetRedoRecPtr(void)
* grabbed a WAL insertion lock to read the master copy , someone might
* update it just after we ' ve released the lock .
*/
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
ptr = xlogc tl- > RedoRecPtr ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
ptr = XLogC tl- > RedoRecPtr ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
if ( RedoRecPtr < ptr )
RedoRecPtr = ptr ;
@ -7718,13 +7642,11 @@ GetRedoRecPtr(void)
XLogRecPtr
GetInsertRecPtr ( void )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogRecPtr recptr ;
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
recptr = xlogc tl- > LogwrtRqst . Write ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
recptr = XLogC tl- > LogwrtRqst . Write ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
return recptr ;
}
@ -7736,13 +7658,11 @@ GetInsertRecPtr(void)
XLogRecPtr
GetFlushRecPtr ( void )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogRecPtr recptr ;
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
recptr = xlogc tl- > LogwrtResult . Flush ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
recptr = XLogC tl- > LogwrtResult . Flush ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
return recptr ;
}
@ -7779,15 +7699,10 @@ GetNextXidAndEpoch(TransactionId *xid, uint32 *epoch)
TransactionId nextXid ;
/* Must read checkpoint info first, else have race condition */
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
ckptXidEpoch = xlogctl - > ckptXidEpoch ;
ckptXid = xlogctl - > ckptXid ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
}
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
ckptXidEpoch = XLogCtl - > ckptXidEpoch ;
ckptXid = XLogCtl - > ckptXid ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
/* Now fetch current nextXid */
nextXid = ReadNewTransactionId ( ) ;
@ -7990,8 +7905,6 @@ LogCheckpointEnd(bool restartpoint)
void
CreateCheckPoint ( int flags )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
bool shutdown ;
CheckPoint checkPoint ;
XLogRecPtr recptr ;
@ -8151,7 +8064,7 @@ CreateCheckPoint(int flags)
* XLogInserts that happen while we are dumping buffers must assume that
* their buffer changes are not included in the checkpoint .
*/
RedoRecPtr = xlogc tl- > Insert . RedoRecPtr = checkPoint . redo ;
RedoRecPtr = XLogC tl- > Insert . RedoRecPtr = checkPoint . redo ;
/*
* Now we can release the WAL insertion locks , allowing other xacts to
@ -8160,9 +8073,9 @@ CreateCheckPoint(int flags)
WALInsertLockRelease ( ) ;
/* Update the info_lck-protected copy of RedoRecPtr as well */
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
xlogc tl- > RedoRecPtr = checkPoint . redo ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
XLogC tl- > RedoRecPtr = checkPoint . redo ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
/*
* If enabled , log checkpoint start . We postpone this until now so as not
@ -8334,15 +8247,10 @@ CreateCheckPoint(int flags)
LWLockRelease ( ControlFileLock ) ;
/* Update shared-memory copy of checkpoint XID/epoch */
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > ckptXidEpoch = checkPoint . nextXidEpoch ;
xlogctl - > ckptXid = checkPoint . nextXid ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
}
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > ckptXidEpoch = checkPoint . nextXidEpoch ;
XLogCtl - > ckptXid = checkPoint . nextXid ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
/*
* We are now done with critical updates ; no need for system panic if we
@ -8497,9 +8405,6 @@ CheckPointGuts(XLogRecPtr checkPointRedo, int flags)
static void
RecoveryRestartPoint ( const CheckPoint * checkPoint )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
/*
* Also refrain from creating a restartpoint if we have seen any
* references to non - existent pages . Restarting recovery from the
@ -8521,10 +8426,10 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
* Copy the checkpoint record to shared memory , so that checkpointer can
* work out the next time it wants to perform a restartpoint .
*/
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
xlogc tl- > lastCheckPointRecPtr = ReadRecPtr ;
xlogc tl- > lastCheckPoint = * checkPoint ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
XLogC tl- > lastCheckPointRecPtr = ReadRecPtr ;
XLogC tl- > lastCheckPoint = * checkPoint ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
}
/*
@ -8546,9 +8451,6 @@ CreateRestartPoint(int flags)
XLogSegNo _logSegNo ;
TimestampTz xtime ;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
/*
* Acquire CheckpointLock to ensure only one restartpoint or checkpoint
* happens at a time .
@ -8556,10 +8458,10 @@ CreateRestartPoint(int flags)
LWLockAcquire ( CheckpointLock , LW_EXCLUSIVE ) ;
/* Get a local copy of the last safe checkpoint record. */
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
lastCheckPointRecPtr = xlogc tl- > lastCheckPointRecPtr ;
lastCheckPoint = xlogc tl- > lastCheckPoint ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
lastCheckPointRecPtr = XLogC tl- > lastCheckPointRecPtr ;
lastCheckPoint = XLogC tl- > lastCheckPoint ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
/*
* Check that we ' re still in recovery mode . It ' s ok if we exit recovery
@ -8618,13 +8520,13 @@ CreateRestartPoint(int flags)
* happening .
*/
WALInsertLockAcquireExclusive ( ) ;
xlogc tl- > Insert . RedoRecPtr = lastCheckPoint . redo ;
XLogC tl- > Insert . RedoRecPtr = lastCheckPoint . redo ;
WALInsertLockRelease ( ) ;
/* Also update the info_lck-protected copy */
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
xlogc tl- > RedoRecPtr = lastCheckPoint . redo ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
XLogC tl- > RedoRecPtr = lastCheckPoint . redo ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
/*
* Prepare to accumulate statistics .
@ -9384,15 +9286,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
ControlFile - > checkPointCopy . nextXid = checkPoint . nextXid ;
/* Update shared-memory copy of checkpoint XID/epoch */
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > ckptXidEpoch = checkPoint . nextXidEpoch ;
xlogctl - > ckptXid = checkPoint . nextXid ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
}
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > ckptXidEpoch = checkPoint . nextXidEpoch ;
XLogCtl - > ckptXid = checkPoint . nextXid ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
/*
* We should ' ve already switched to the new TLI before replaying this
@ -9436,15 +9333,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
ControlFile - > checkPointCopy . nextXid = checkPoint . nextXid ;
/* Update shared-memory copy of checkpoint XID/epoch */
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > ckptXidEpoch = checkPoint . nextXidEpoch ;
xlogctl - > ckptXid = checkPoint . nextXid ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
}
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > ckptXidEpoch = checkPoint . nextXidEpoch ;
XLogCtl - > ckptXid = checkPoint . nextXid ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
/* TLI should not change in an on-line checkpoint */
if ( checkPoint . ThisTimeLineID ! = ThisTimeLineID )
@ -9581,8 +9473,6 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
}
else if ( info = = XLOG_FPW_CHANGE )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
bool fpw ;
memcpy ( & fpw , XLogRecGetData ( record ) , sizeof ( bool ) ) ;
@ -9594,10 +9484,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
*/
if ( ! fpw )
{
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
if ( xlogc tl- > lastFpwDisableRecPtr < ReadRecPtr )
xlogc tl- > lastFpwDisableRecPtr = ReadRecPtr ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
if ( XLogC tl- > lastFpwDisableRecPtr < ReadRecPtr )
XLogC tl- > lastFpwDisableRecPtr = ReadRecPtr ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
}
/* Keep track of full_page_writes */
@ -9972,8 +9862,6 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
if ( backup_started_in_recovery )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogRecPtr recptr ;
/*
@ -9981,9 +9869,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
* ( i . e . , since last restartpoint used as backup starting
* checkpoint ) contain full - page writes .
*/
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
recptr = xlogc tl- > lastFpwDisableRecPtr ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
recptr = XLogC tl- > lastFpwDisableRecPtr ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
if ( ! checkpointfpw | | startpoint < = recptr )
ereport ( ERROR ,
@ -10326,17 +10214,15 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
*/
if ( backup_started_in_recovery )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogRecPtr recptr ;
/*
* Check to see if all WAL replayed during online backup contain
* full - page writes .
*/
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
recptr = xlogc tl- > lastFpwDisableRecPtr ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
recptr = XLogC tl- > lastFpwDisableRecPtr ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
if ( startpoint < = recptr )
ereport ( ERROR ,
@ -10523,15 +10409,13 @@ do_pg_abort_backup(void)
XLogRecPtr
GetXLogReplayRecPtr ( TimeLineID * replayTLI )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
XLogRecPtr recptr ;
TimeLineID tli ;
SpinLockAcquire ( & xlogc tl- > info_lck ) ;
recptr = xlogc tl- > lastReplayedEndRecPtr ;
tli = xlogc tl- > lastReplayedTLI ;
SpinLockRelease ( & xlogc tl- > info_lck ) ;
SpinLockAcquire ( & XLogC tl- > info_lck ) ;
recptr = XLogC tl- > lastReplayedEndRecPtr ;
tli = XLogC tl- > lastReplayedTLI ;
SpinLockRelease ( & XLogC tl- > info_lck ) ;
if ( replayTLI )
* replayTLI = tli ;
@ -10544,7 +10428,7 @@ GetXLogReplayRecPtr(TimeLineID *replayTLI)
XLogRecPtr
GetXLogInsertRecPtr ( void )
{
volatile XLogCtlInsert * Insert = & XLogCtl - > Insert ;
XLogCtlInsert * Insert = & XLogCtl - > Insert ;
uint64 current_bytepos ;
SpinLockAcquire ( & Insert - > insertpos_lck ) ;
@ -10560,14 +10444,9 @@ GetXLogInsertRecPtr(void)
XLogRecPtr
GetXLogWriteRecPtr ( void )
{
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
LogwrtResult = xlogctl - > LogwrtResult ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
}
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
LogwrtResult = XLogCtl - > LogwrtResult ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
return LogwrtResult . Write ;
}
@ -11393,10 +11272,7 @@ WakeupRecovery(void)
void
SetWalWriterSleeping ( bool sleeping )
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData * xlogctl = XLogCtl ;
SpinLockAcquire ( & xlogctl - > info_lck ) ;
xlogctl - > WalWriterSleeping = sleeping ;
SpinLockRelease ( & xlogctl - > info_lck ) ;
SpinLockAcquire ( & XLogCtl - > info_lck ) ;
XLogCtl - > WalWriterSleeping = sleeping ;
SpinLockRelease ( & XLogCtl - > info_lck ) ;
}