@ -480,18 +480,99 @@ static int ckpt_buforder_comparator(const void *pa, const void *pb);
static int ts_ckpt_progress_comparator ( Datum a , Datum b , void * arg ) ;
/*
* Implementation of PrefetchBuffer ( ) for shared buffers .
*/
PrefetchBufferResult
PrefetchSharedBuffer ( SMgrRelation smgr_reln ,
ForkNumber forkNum ,
BlockNumber blockNum )
{
PrefetchBufferResult result = { InvalidBuffer , false } ;
BufferTag newTag ; /* identity of requested block */
uint32 newHash ; /* hash value for newTag */
LWLock * newPartitionLock ; /* buffer partition lock for it */
int buf_id ;
Assert ( BlockNumberIsValid ( blockNum ) ) ;
/* create a tag so we can lookup the buffer */
INIT_BUFFERTAG ( newTag , smgr_reln - > smgr_rnode . node ,
forkNum , blockNum ) ;
/* determine its hash code and partition lock ID */
newHash = BufTableHashCode ( & newTag ) ;
newPartitionLock = BufMappingPartitionLock ( newHash ) ;
/* see if the block is in the buffer pool already */
LWLockAcquire ( newPartitionLock , LW_SHARED ) ;
buf_id = BufTableLookup ( & newTag , newHash ) ;
LWLockRelease ( newPartitionLock ) ;
/* If not in buffers, initiate prefetch */
if ( buf_id < 0 )
{
# ifdef USE_PREFETCH
/*
* Try to initiate an asynchronous read . This returns false in
* recovery if the relation file doesn ' t exist .
*/
if ( smgrprefetch ( smgr_reln , forkNum , blockNum ) )
result . initiated_io = true ;
# endif /* USE_PREFETCH */
}
else
{
/*
* Report the buffer it was in at that time . The caller may be able
* to avoid a buffer table lookup , but it ' s not pinned and it must be
* rechecked !
*/
result . recent_buffer = buf_id + 1 ;
}
/*
* If the block * is * in buffers , we do nothing . This is not really ideal :
* the block might be just about to be evicted , which would be stupid
* since we know we are going to need it soon . But the only easy answer
* is to bump the usage_count , which does not seem like a great solution :
* when the caller does ultimately touch the block , usage_count would get
* bumped again , resulting in too much favoritism for blocks that are
* involved in a prefetch sequence . A real fix would involve some
* additional per - buffer state , and it ' s not clear that there ' s enough of
* a problem to justify that .
*/
return result ;
}
/*
* PrefetchBuffer - - initiate asynchronous read of a block of a relation
*
* This is named by analogy to ReadBuffer but doesn ' t actually allocate a
* buffer . Instead it tries to ensure that a future ReadBuffer for the given
* block will not be delayed by the I / O . Prefetching is optional .
* No - op if prefetching isn ' t compiled in .
*
* There are three possible outcomes :
*
* 1. If the block is already cached , the result includes a valid buffer that
* could be used by the caller to avoid the need for a later buffer lookup , but
* it ' s not pinned , so the caller must recheck it .
*
* 2. If the kernel has been asked to initiate I / O , the initated_io member is
* true . Currently there is no way to know if the data was already cached by
* the kernel and therefore didn ' t really initiate I / O , and no way to know when
* the I / O completes other than using synchronous ReadBuffer ( ) .
*
* 3. Otherwise , the buffer wasn ' t already cached by PostgreSQL , and either
* USE_PREFETCH is not defined ( this build doesn ' t support prefetching due to
* lack of a kernel facility ) , or the underlying relation file wasn ' t found and
* we are in recovery . ( If the relation file wasn ' t found and we are not in
* recovery , an error is raised ) .
*/
void
PrefetchBufferResult
PrefetchBuffer ( Relation reln , ForkNumber forkNum , BlockNumber blockNum )
{
# ifdef USE_PREFETCH
Assert ( RelationIsValid ( reln ) ) ;
Assert ( BlockNumberIsValid ( blockNum ) ) ;
@ -507,45 +588,13 @@ PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
errmsg ( " cannot access temporary tables of other sessions " ) ) ) ;
/* pass it off to localbuf.c */
LocalPrefetch Buffer( reln - > rd_smgr , forkNum , blockNum ) ;
return PrefetchLocal Buffer( reln - > rd_smgr , forkNum , blockNum ) ;
}
else
{
BufferTag newTag ; /* identity of requested block */
uint32 newHash ; /* hash value for newTag */
LWLock * newPartitionLock ; /* buffer partition lock for it */
int buf_id ;
/* create a tag so we can lookup the buffer */
INIT_BUFFERTAG ( newTag , reln - > rd_smgr - > smgr_rnode . node ,
forkNum , blockNum ) ;
/* determine its hash code and partition lock ID */
newHash = BufTableHashCode ( & newTag ) ;
newPartitionLock = BufMappingPartitionLock ( newHash ) ;
/* see if the block is in the buffer pool already */
LWLockAcquire ( newPartitionLock , LW_SHARED ) ;
buf_id = BufTableLookup ( & newTag , newHash ) ;
LWLockRelease ( newPartitionLock ) ;
/* If not in buffers, initiate prefetch */
if ( buf_id < 0 )
smgrprefetch ( reln - > rd_smgr , forkNum , blockNum ) ;
/*
* If the block * is * in buffers , we do nothing . This is not really
* ideal : the block might be just about to be evicted , which would be
* stupid since we know we are going to need it soon . But the only
* easy answer is to bump the usage_count , which does not seem like a
* great solution : when the caller does ultimately touch the block ,
* usage_count would get bumped again , resulting in too much
* favoritism for blocks that are involved in a prefetch sequence . A
* real fix would involve some additional per - buffer state , and it ' s
* not clear that there ' s enough of a problem to justify that .
*/
/* pass it to the shared buffer version */
return PrefetchSharedBuffer ( reln - > rd_smgr , forkNum , blockNum ) ;
}
# endif /* USE_PREFETCH */
}