@ -107,7 +107,7 @@
* frontend during startup . ) The above design guarantees that notifies from
* frontend during startup . ) The above design guarantees that notifies from
* other backends will never be missed by ignoring self - notifies .
* other backends will never be missed by ignoring self - notifies .
*
*
* The amount of shared memory used for notify management ( NUM_ASYNC _BUFFERS )
* The amount of shared memory used for notify management ( NUM_NOTIFY _BUFFERS )
* can be varied without affecting anything but performance . The maximum
* can be varied without affecting anything but performance . The maximum
* amount of notification data that can be queued at one time is determined
* amount of notification data that can be queued at one time is determined
* by slru . c ' s wraparound limit ; see QUEUE_MAX_PAGE below .
* by slru . c ' s wraparound limit ; see QUEUE_MAX_PAGE below .
@ -225,7 +225,7 @@ typedef struct QueuePosition
*
*
* Resist the temptation to make this really large . While that would save
* Resist the temptation to make this really large . While that would save
* work in some places , it would add cost in others . In particular , this
* work in some places , it would add cost in others . In particular , this
* should likely be less than NUM_ASYNC _BUFFERS , to ensure that backends
* should likely be less than NUM_NOTIFY _BUFFERS , to ensure that backends
* catch up before the pages they ' ll need to read fall out of SLRU cache .
* catch up before the pages they ' ll need to read fall out of SLRU cache .
*/
*/
# define QUEUE_CLEANUP_DELAY 4
# define QUEUE_CLEANUP_DELAY 4
@ -244,7 +244,7 @@ typedef struct QueueBackendStatus
/*
/*
* Shared memory state for LISTEN / NOTIFY ( excluding its SLRU stuff )
* Shared memory state for LISTEN / NOTIFY ( excluding its SLRU stuff )
*
*
* The AsyncQueueControl structure is protected by the Async QueueLock.
* The AsyncQueueControl structure is protected by the Notify QueueLock.
*
*
* When holding the lock in SHARED mode , backends may only inspect their own
* When holding the lock in SHARED mode , backends may only inspect their own
* entries as well as the head and tail pointers . Consequently we can allow a
* entries as well as the head and tail pointers . Consequently we can allow a
@ -254,9 +254,9 @@ typedef struct QueueBackendStatus
* When holding the lock in EXCLUSIVE mode , backends can inspect the entries
* When holding the lock in EXCLUSIVE mode , backends can inspect the entries
* of other backends and also change the head and tail pointers .
* of other backends and also change the head and tail pointers .
*
*
* AsyncCtl Lock is used as the control lock for the pg_notify SLRU buffers .
* NotifySLRU Lock is used as the control lock for the pg_notify SLRU buffers .
* In order to avoid deadlocks , whenever we need both locks , we always first
* In order to avoid deadlocks , whenever we need both locks , we always first
* get AsyncQueueLock and then AsyncCtl Lock.
* get NotifyQueueLock and then NotifySLRU Lock.
*
*
* Each backend uses the backend [ ] array entry with index equal to its
* Each backend uses the backend [ ] array entry with index equal to its
* BackendId ( which can range from 1 to MaxBackends ) . We rely on this to make
* BackendId ( which can range from 1 to MaxBackends ) . We rely on this to make
@ -292,9 +292,9 @@ static AsyncQueueControl *asyncQueueControl;
/*
/*
* The SLRU buffer area through which we access the notification queue
* The SLRU buffer area through which we access the notification queue
*/
*/
static SlruCtlData Async CtlData;
static SlruCtlData Notify CtlData;
# define AsyncCtl (&Async CtlData)
# define NotifyCtl (&Notify CtlData)
# define QUEUE_PAGESIZE BLCKSZ
# define QUEUE_PAGESIZE BLCKSZ
# define QUEUE_FULL_WARN_INTERVAL 5000 /* warn at most once every 5s */
# define QUEUE_FULL_WARN_INTERVAL 5000 /* warn at most once every 5s */
@ -506,7 +506,7 @@ AsyncShmemSize(void)
size = mul_size ( MaxBackends + 1 , sizeof ( QueueBackendStatus ) ) ;
size = mul_size ( MaxBackends + 1 , sizeof ( QueueBackendStatus ) ) ;
size = add_size ( size , offsetof ( AsyncQueueControl , backend ) ) ;
size = add_size ( size , offsetof ( AsyncQueueControl , backend ) ) ;
size = add_size ( size , SimpleLruShmemSize ( NUM_ASYNC _BUFFERS , 0 ) ) ;
size = add_size ( size , SimpleLruShmemSize ( NUM_NOTIFY _BUFFERS , 0 ) ) ;
return size ;
return size ;
}
}
@ -552,18 +552,18 @@ AsyncShmemInit(void)
/*
/*
* Set up SLRU management of the pg_notify data .
* Set up SLRU management of the pg_notify data .
*/
*/
Async Ctl- > PagePrecedes = asyncQueuePagePrecedes ;
Notify Ctl- > PagePrecedes = asyncQueuePagePrecedes ;
SimpleLruInit ( Async Ctl, " async " , NUM_ASYNC _BUFFERS, 0 ,
SimpleLruInit ( Notify Ctl, " Notify " , NUM_NOTIFY _BUFFERS, 0 ,
AsyncCtl Lock, " pg_notify " , LWTRANCHE_ASYNC_BUFFERS ) ;
NotifySLRU Lock, " pg_notify " , LWTRANCHE_NOTIFY_BUFFER ) ;
/* Override default assumption that writes should be fsync'd */
/* Override default assumption that writes should be fsync'd */
Async Ctl- > do_fsync = false ;
Notify Ctl- > do_fsync = false ;
if ( ! found )
if ( ! found )
{
{
/*
/*
* During start or reboot , clean out the pg_notify directory .
* During start or reboot , clean out the pg_notify directory .
*/
*/
( void ) SlruScanDirectory ( Async Ctl, SlruScanDirCbDeleteAll , NULL ) ;
( void ) SlruScanDirectory ( Notify Ctl, SlruScanDirCbDeleteAll , NULL ) ;
}
}
}
}
@ -918,7 +918,7 @@ PreCommit_Notify(void)
* Make sure that we have an XID assigned to the current transaction .
* Make sure that we have an XID assigned to the current transaction .
* GetCurrentTransactionId is cheap if we already have an XID , but not
* GetCurrentTransactionId is cheap if we already have an XID , but not
* so cheap if we don ' t , and we ' d prefer not to do that work while
* so cheap if we don ' t , and we ' d prefer not to do that work while
* holding Async QueueLock.
* holding Notify QueueLock.
*/
*/
( void ) GetCurrentTransactionId ( ) ;
( void ) GetCurrentTransactionId ( ) ;
@ -949,7 +949,7 @@ PreCommit_Notify(void)
{
{
/*
/*
* Add the pending notifications to the queue . We acquire and
* Add the pending notifications to the queue . We acquire and
* release Async QueueLock once per page , which might be overkill
* release Notify QueueLock once per page , which might be overkill
* but it does allow readers to get in while we ' re doing this .
* but it does allow readers to get in while we ' re doing this .
*
*
* A full queue is very uncommon and should really not happen ,
* A full queue is very uncommon and should really not happen ,
@ -959,14 +959,14 @@ PreCommit_Notify(void)
* transaction , but we have not yet committed to clog , so at this
* transaction , but we have not yet committed to clog , so at this
* point in time we can still roll the transaction back .
* point in time we can still roll the transaction back .
*/
*/
LWLockAcquire ( Async QueueLock, LW_EXCLUSIVE ) ;
LWLockAcquire ( Notify QueueLock, LW_EXCLUSIVE ) ;
asyncQueueFillWarning ( ) ;
asyncQueueFillWarning ( ) ;
if ( asyncQueueIsFull ( ) )
if ( asyncQueueIsFull ( ) )
ereport ( ERROR ,
ereport ( ERROR ,
( errcode ( ERRCODE_PROGRAM_LIMIT_EXCEEDED ) ,
( errcode ( ERRCODE_PROGRAM_LIMIT_EXCEEDED ) ,
errmsg ( " too many notifications in the NOTIFY queue " ) ) ) ;
errmsg ( " too many notifications in the NOTIFY queue " ) ) ) ;
nextNotify = asyncQueueAddEntries ( nextNotify ) ;
nextNotify = asyncQueueAddEntries ( nextNotify ) ;
LWLockRelease ( Async QueueLock) ;
LWLockRelease ( Notify QueueLock) ;
}
}
}
}
}
}
@ -1075,7 +1075,7 @@ Exec_ListenPreCommit(void)
* We need exclusive lock here so we can look at other backends ' entries
* We need exclusive lock here so we can look at other backends ' entries
* and manipulate the list links .
* and manipulate the list links .
*/
*/
LWLockAcquire ( Async QueueLock, LW_EXCLUSIVE ) ;
LWLockAcquire ( Notify QueueLock, LW_EXCLUSIVE ) ;
head = QUEUE_HEAD ;
head = QUEUE_HEAD ;
max = QUEUE_TAIL ;
max = QUEUE_TAIL ;
prevListener = InvalidBackendId ;
prevListener = InvalidBackendId ;
@ -1101,7 +1101,7 @@ Exec_ListenPreCommit(void)
QUEUE_NEXT_LISTENER ( MyBackendId ) = QUEUE_FIRST_LISTENER ;
QUEUE_NEXT_LISTENER ( MyBackendId ) = QUEUE_FIRST_LISTENER ;
QUEUE_FIRST_LISTENER = MyBackendId ;
QUEUE_FIRST_LISTENER = MyBackendId ;
}
}
LWLockRelease ( Async QueueLock) ;
LWLockRelease ( Notify QueueLock) ;
/* Now we are listed in the global array, so remember we're listening */
/* Now we are listed in the global array, so remember we're listening */
amRegisteredListener = true ;
amRegisteredListener = true ;
@ -1308,7 +1308,7 @@ asyncQueueUnregister(void)
/*
/*
* Need exclusive lock here to manipulate list links .
* Need exclusive lock here to manipulate list links .
*/
*/
LWLockAcquire ( Async QueueLock, LW_EXCLUSIVE ) ;
LWLockAcquire ( Notify QueueLock, LW_EXCLUSIVE ) ;
/* Mark our entry as invalid */
/* Mark our entry as invalid */
QUEUE_BACKEND_PID ( MyBackendId ) = InvalidPid ;
QUEUE_BACKEND_PID ( MyBackendId ) = InvalidPid ;
QUEUE_BACKEND_DBOID ( MyBackendId ) = InvalidOid ;
QUEUE_BACKEND_DBOID ( MyBackendId ) = InvalidOid ;
@ -1327,7 +1327,7 @@ asyncQueueUnregister(void)
}
}
}
}
QUEUE_NEXT_LISTENER ( MyBackendId ) = InvalidBackendId ;
QUEUE_NEXT_LISTENER ( MyBackendId ) = InvalidBackendId ;
LWLockRelease ( Async QueueLock) ;
LWLockRelease ( Notify QueueLock) ;
/* mark ourselves as no longer listed in the global array */
/* mark ourselves as no longer listed in the global array */
amRegisteredListener = false ;
amRegisteredListener = false ;
@ -1336,7 +1336,7 @@ asyncQueueUnregister(void)
/*
/*
* Test whether there is room to insert more notification messages .
* Test whether there is room to insert more notification messages .
*
*
* Caller must hold at least shared Async QueueLock.
* Caller must hold at least shared Notify QueueLock.
*/
*/
static bool
static bool
asyncQueueIsFull ( void )
asyncQueueIsFull ( void )
@ -1437,8 +1437,8 @@ asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe)
* notification to write and return the first still - unwritten cell back .
* notification to write and return the first still - unwritten cell back .
* Eventually we will return NULL indicating all is done .
* Eventually we will return NULL indicating all is done .
*
*
* We are holding Async QueueLock already from the caller and grab AsyncCtlLock
* We are holding Notify QueueLock already from the caller and grab
* locally in this function .
* NotifySLRULock locally in this function .
*/
*/
static ListCell *
static ListCell *
asyncQueueAddEntries ( ListCell * nextNotify )
asyncQueueAddEntries ( ListCell * nextNotify )
@ -1449,8 +1449,8 @@ asyncQueueAddEntries(ListCell *nextNotify)
int offset ;
int offset ;
int slotno ;
int slotno ;
/* We hold both AsyncQueueLock and AsyncCtl Lock during this operation */
/* We hold both NotifyQueueLock and NotifySLRU Lock during this operation */
LWLockAcquire ( AsyncCtl Lock, LW_EXCLUSIVE ) ;
LWLockAcquire ( NotifySLRU Lock, LW_EXCLUSIVE ) ;
/*
/*
* We work with a local copy of QUEUE_HEAD , which we write back to shared
* We work with a local copy of QUEUE_HEAD , which we write back to shared
@ -1475,13 +1475,13 @@ asyncQueueAddEntries(ListCell *nextNotify)
*/
*/
pageno = QUEUE_POS_PAGE ( queue_head ) ;
pageno = QUEUE_POS_PAGE ( queue_head ) ;
if ( QUEUE_POS_IS_ZERO ( queue_head ) )
if ( QUEUE_POS_IS_ZERO ( queue_head ) )
slotno = SimpleLruZeroPage ( Async Ctl, pageno ) ;
slotno = SimpleLruZeroPage ( Notify Ctl, pageno ) ;
else
else
slotno = SimpleLruReadPage ( Async Ctl, pageno , true ,
slotno = SimpleLruReadPage ( Notify Ctl, pageno , true ,
InvalidTransactionId ) ;
InvalidTransactionId ) ;
/* Note we mark the page dirty before writing in it */
/* Note we mark the page dirty before writing in it */
Async Ctl- > shared - > page_dirty [ slotno ] = true ;
Notify Ctl- > shared - > page_dirty [ slotno ] = true ;
while ( nextNotify ! = NULL )
while ( nextNotify ! = NULL )
{
{
@ -1512,7 +1512,7 @@ asyncQueueAddEntries(ListCell *nextNotify)
}
}
/* Now copy qe into the shared buffer page */
/* Now copy qe into the shared buffer page */
memcpy ( Async Ctl- > shared - > page_buffer [ slotno ] + offset ,
memcpy ( Notify Ctl- > shared - > page_buffer [ slotno ] + offset ,
& qe ,
& qe ,
qe . length ) ;
qe . length ) ;
@ -1527,7 +1527,7 @@ asyncQueueAddEntries(ListCell *nextNotify)
* asyncQueueIsFull ( ) ensured that there is room to create this
* asyncQueueIsFull ( ) ensured that there is room to create this
* page without overrunning the queue .
* page without overrunning the queue .
*/
*/
slotno = SimpleLruZeroPage ( Async Ctl, QUEUE_POS_PAGE ( queue_head ) ) ;
slotno = SimpleLruZeroPage ( Notify Ctl, QUEUE_POS_PAGE ( queue_head ) ) ;
/*
/*
* If the new page address is a multiple of QUEUE_CLEANUP_DELAY ,
* If the new page address is a multiple of QUEUE_CLEANUP_DELAY ,
@ -1545,7 +1545,7 @@ asyncQueueAddEntries(ListCell *nextNotify)
/* Success, so update the global QUEUE_HEAD */
/* Success, so update the global QUEUE_HEAD */
QUEUE_HEAD = queue_head ;
QUEUE_HEAD = queue_head ;
LWLockRelease ( AsyncCtl Lock) ;
LWLockRelease ( NotifySLRU Lock) ;
return nextNotify ;
return nextNotify ;
}
}
@ -1562,9 +1562,9 @@ pg_notification_queue_usage(PG_FUNCTION_ARGS)
/* Advance the queue tail so we don't report a too-large result */
/* Advance the queue tail so we don't report a too-large result */
asyncQueueAdvanceTail ( ) ;
asyncQueueAdvanceTail ( ) ;
LWLockAcquire ( Async QueueLock, LW_SHARED ) ;
LWLockAcquire ( Notify QueueLock, LW_SHARED ) ;
usage = asyncQueueUsage ( ) ;
usage = asyncQueueUsage ( ) ;
LWLockRelease ( Async QueueLock) ;
LWLockRelease ( Notify QueueLock) ;
PG_RETURN_FLOAT8 ( usage ) ;
PG_RETURN_FLOAT8 ( usage ) ;
}
}
@ -1572,7 +1572,7 @@ pg_notification_queue_usage(PG_FUNCTION_ARGS)
/*
/*
* Return the fraction of the queue that is currently occupied .
* Return the fraction of the queue that is currently occupied .
*
*
* The caller must hold Async QueueLock in ( at least ) shared mode .
* The caller must hold Notify QueueLock in ( at least ) shared mode .
*/
*/
static double
static double
asyncQueueUsage ( void )
asyncQueueUsage ( void )
@ -1601,7 +1601,7 @@ asyncQueueUsage(void)
* This is unlikely given the size of the queue , but possible .
* This is unlikely given the size of the queue , but possible .
* The warnings show up at most once every QUEUE_FULL_WARN_INTERVAL .
* The warnings show up at most once every QUEUE_FULL_WARN_INTERVAL .
*
*
* Caller must hold exclusive Async QueueLock.
* Caller must hold exclusive Notify QueueLock.
*/
*/
static void
static void
asyncQueueFillWarning ( void )
asyncQueueFillWarning ( void )
@ -1665,7 +1665,7 @@ SignalBackends(void)
/*
/*
* Identify backends that we need to signal . We don ' t want to send
* Identify backends that we need to signal . We don ' t want to send
* signals while holding the Async QueueLock, so this loop just builds a
* signals while holding the Notify QueueLock, so this loop just builds a
* list of target PIDs .
* list of target PIDs .
*
*
* XXX in principle these pallocs could fail , which would be bad . Maybe
* XXX in principle these pallocs could fail , which would be bad . Maybe
@ -1676,7 +1676,7 @@ SignalBackends(void)
ids = ( BackendId * ) palloc ( MaxBackends * sizeof ( BackendId ) ) ;
ids = ( BackendId * ) palloc ( MaxBackends * sizeof ( BackendId ) ) ;
count = 0 ;
count = 0 ;
LWLockAcquire ( Async QueueLock, LW_EXCLUSIVE ) ;
LWLockAcquire ( Notify QueueLock, LW_EXCLUSIVE ) ;
for ( BackendId i = QUEUE_FIRST_LISTENER ; i > 0 ; i = QUEUE_NEXT_LISTENER ( i ) )
for ( BackendId i = QUEUE_FIRST_LISTENER ; i > 0 ; i = QUEUE_NEXT_LISTENER ( i ) )
{
{
int32 pid = QUEUE_BACKEND_PID ( i ) ;
int32 pid = QUEUE_BACKEND_PID ( i ) ;
@ -1710,7 +1710,7 @@ SignalBackends(void)
ids [ count ] = i ;
ids [ count ] = i ;
count + + ;
count + + ;
}
}
LWLockRelease ( Async QueueLock) ;
LWLockRelease ( Notify QueueLock) ;
/* Now send signals */
/* Now send signals */
for ( int i = 0 ; i < count ; i + + )
for ( int i = 0 ; i < count ; i + + )
@ -1720,7 +1720,7 @@ SignalBackends(void)
/*
/*
* Note : assuming things aren ' t broken , a signal failure here could
* Note : assuming things aren ' t broken , a signal failure here could
* only occur if the target backend exited since we released
* only occur if the target backend exited since we released
* Async QueueLock; which is unlikely but certainly possible . So we
* Notify QueueLock; which is unlikely but certainly possible . So we
* just log a low - level debug message if it happens .
* just log a low - level debug message if it happens .
*/
*/
if ( SendProcSignal ( pid , PROCSIG_NOTIFY_INTERRUPT , ids [ i ] ) < 0 )
if ( SendProcSignal ( pid , PROCSIG_NOTIFY_INTERRUPT , ids [ i ] ) < 0 )
@ -1930,12 +1930,12 @@ asyncQueueReadAllNotifications(void)
} page_buffer ;
} page_buffer ;
/* Fetch current state */
/* Fetch current state */
LWLockAcquire ( Async QueueLock, LW_SHARED ) ;
LWLockAcquire ( Notify QueueLock, LW_SHARED ) ;
/* Assert checks that we have a valid state entry */
/* Assert checks that we have a valid state entry */
Assert ( MyProcPid = = QUEUE_BACKEND_PID ( MyBackendId ) ) ;
Assert ( MyProcPid = = QUEUE_BACKEND_PID ( MyBackendId ) ) ;
pos = oldpos = QUEUE_BACKEND_POS ( MyBackendId ) ;
pos = oldpos = QUEUE_BACKEND_POS ( MyBackendId ) ;
head = QUEUE_HEAD ;
head = QUEUE_HEAD ;
LWLockRelease ( Async QueueLock) ;
LWLockRelease ( Notify QueueLock) ;
if ( QUEUE_POS_EQUAL ( pos , head ) )
if ( QUEUE_POS_EQUAL ( pos , head ) )
{
{
@ -1990,7 +1990,7 @@ asyncQueueReadAllNotifications(void)
* that happens it is critical that we not try to send the same message
* that happens it is critical that we not try to send the same message
* over and over again . Therefore , we place a PG_TRY block here that will
* over and over again . Therefore , we place a PG_TRY block here that will
* forcibly advance our queue position before we lose control to an error .
* forcibly advance our queue position before we lose control to an error .
* ( We could alternatively retake Async QueueLock and move the position
* ( We could alternatively retake Notify QueueLock and move the position
* before handling each individual message , but that seems like too much
* before handling each individual message , but that seems like too much
* lock traffic . )
* lock traffic . )
*/
*/
@ -2007,11 +2007,11 @@ asyncQueueReadAllNotifications(void)
/*
/*
* We copy the data from SLRU into a local buffer , so as to avoid
* We copy the data from SLRU into a local buffer , so as to avoid
* holding the AsyncCtl Lock while we are examining the entries and
* holding the NotifySLRU Lock while we are examining the entries
* possibly transmitting them to our frontend . Copy only the part
* and possibly transmitting them to our frontend . Copy only the
* of the page we will actually inspect .
* part of the page we will actually inspect .
*/
*/
slotno = SimpleLruReadPage_ReadOnly ( Async Ctl, curpage ,
slotno = SimpleLruReadPage_ReadOnly ( Notify Ctl, curpage ,
InvalidTransactionId ) ;
InvalidTransactionId ) ;
if ( curpage = = QUEUE_POS_PAGE ( head ) )
if ( curpage = = QUEUE_POS_PAGE ( head ) )
{
{
@ -2026,10 +2026,10 @@ asyncQueueReadAllNotifications(void)
copysize = QUEUE_PAGESIZE - curoffset ;
copysize = QUEUE_PAGESIZE - curoffset ;
}
}
memcpy ( page_buffer . buf + curoffset ,
memcpy ( page_buffer . buf + curoffset ,
Async Ctl- > shared - > page_buffer [ slotno ] + curoffset ,
Notify Ctl- > shared - > page_buffer [ slotno ] + curoffset ,
copysize ) ;
copysize ) ;
/* Release lock that we got from SimpleLruReadPage_ReadOnly() */
/* Release lock that we got from SimpleLruReadPage_ReadOnly() */
LWLockRelease ( AsyncCtl Lock) ;
LWLockRelease ( NotifySLRU Lock) ;
/*
/*
* Process messages up to the stop position , end of page , or an
* Process messages up to the stop position , end of page , or an
@ -2040,7 +2040,7 @@ asyncQueueReadAllNotifications(void)
* But if it has , we will receive ( or have already received and
* But if it has , we will receive ( or have already received and
* queued ) another signal and come here again .
* queued ) another signal and come here again .
*
*
* We are not holding Async QueueLock here ! The queue can only
* We are not holding Notify QueueLock here ! The queue can only
* extend beyond the head pointer ( see above ) and we leave our
* extend beyond the head pointer ( see above ) and we leave our
* backend ' s pointer where it is so nobody will truncate or
* backend ' s pointer where it is so nobody will truncate or
* rewrite pages under us . Especially we don ' t want to hold a lock
* rewrite pages under us . Especially we don ' t want to hold a lock
@ -2054,9 +2054,9 @@ asyncQueueReadAllNotifications(void)
PG_FINALLY ( ) ;
PG_FINALLY ( ) ;
{
{
/* Update shared state */
/* Update shared state */
LWLockAcquire ( Async QueueLock, LW_SHARED ) ;
LWLockAcquire ( Notify QueueLock, LW_SHARED ) ;
QUEUE_BACKEND_POS ( MyBackendId ) = pos ;
QUEUE_BACKEND_POS ( MyBackendId ) = pos ;
LWLockRelease ( Async QueueLock) ;
LWLockRelease ( Notify QueueLock) ;
}
}
PG_END_TRY ( ) ;
PG_END_TRY ( ) ;
@ -2070,7 +2070,7 @@ asyncQueueReadAllNotifications(void)
*
*
* The current page must have been fetched into page_buffer from shared
* The current page must have been fetched into page_buffer from shared
* memory . ( We could access the page right in shared memory , but that
* memory . ( We could access the page right in shared memory , but that
* would imply holding the AsyncCtl Lock throughout this routine . )
* would imply holding the NotifySLRU Lock throughout this routine . )
*
*
* We stop if we reach the " stop " position , or reach a notification from an
* We stop if we reach the " stop " position , or reach a notification from an
* uncommitted transaction , or reach the end of the page .
* uncommitted transaction , or reach the end of the page .
@ -2177,7 +2177,7 @@ asyncQueueAdvanceTail(void)
int newtailpage ;
int newtailpage ;
int boundary ;
int boundary ;
LWLockAcquire ( Async QueueLock, LW_EXCLUSIVE ) ;
LWLockAcquire ( Notify QueueLock, LW_EXCLUSIVE ) ;
min = QUEUE_HEAD ;
min = QUEUE_HEAD ;
for ( BackendId i = QUEUE_FIRST_LISTENER ; i > 0 ; i = QUEUE_NEXT_LISTENER ( i ) )
for ( BackendId i = QUEUE_FIRST_LISTENER ; i > 0 ; i = QUEUE_NEXT_LISTENER ( i ) )
{
{
@ -2186,7 +2186,7 @@ asyncQueueAdvanceTail(void)
}
}
oldtailpage = QUEUE_POS_PAGE ( QUEUE_TAIL ) ;
oldtailpage = QUEUE_POS_PAGE ( QUEUE_TAIL ) ;
QUEUE_TAIL = min ;
QUEUE_TAIL = min ;
LWLockRelease ( Async QueueLock) ;
LWLockRelease ( Notify QueueLock) ;
/*
/*
* We can truncate something if the global tail advanced across an SLRU
* We can truncate something if the global tail advanced across an SLRU
@ -2200,10 +2200,10 @@ asyncQueueAdvanceTail(void)
if ( asyncQueuePagePrecedes ( oldtailpage , boundary ) )
if ( asyncQueuePagePrecedes ( oldtailpage , boundary ) )
{
{
/*
/*
* SimpleLruTruncate ( ) will ask for AsyncCtl Lock but will also release
* SimpleLruTruncate ( ) will ask for NotifySLRU Lock but will also
* the lock again .
* release the lock again .
*/
*/
SimpleLruTruncate ( Async Ctl, newtailpage ) ;
SimpleLruTruncate ( Notify Ctl, newtailpage ) ;
}
}
}
}