|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* bufpage.h
|
|
|
|
* Standard POSTGRES buffer page definitions.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
|
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
* src/include/storage/bufpage.h
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#ifndef BUFPAGE_H
|
|
|
|
#define BUFPAGE_H
|
|
|
|
|
|
|
|
#include "access/xlogdefs.h"
|
|
|
|
#include "storage/block.h"
|
|
|
|
#include "storage/item.h"
|
|
|
|
#include "storage/off.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A postgres disk page is an abstraction layered on top of a postgres
|
|
|
|
* disk block (which is simply a unit of i/o, see block.h).
|
|
|
|
*
|
|
|
|
* specifically, while a disk block can be unformatted, a postgres
|
|
|
|
* disk page is always a slotted page of the form:
|
|
|
|
*
|
|
|
|
* +----------------+---------------------------------+
|
|
|
|
* | PageHeaderData | linp1 linp2 linp3 ... |
|
|
|
|
* +-----------+----+---------------------------------+
|
|
|
|
* | ... linpN | |
|
|
|
|
* +-----------+--------------------------------------+
|
|
|
|
* | ^ pd_lower |
|
|
|
|
* | |
|
|
|
|
* | v pd_upper |
|
|
|
|
* +-------------+------------------------------------+
|
|
|
|
* | | tupleN ... |
|
|
|
|
* +-------------+------------------+-----------------+
|
|
|
|
* | ... tuple3 tuple2 tuple1 | "special space" |
|
|
|
|
* +--------------------------------+-----------------+
|
|
|
|
* ^ pd_special
|
|
|
|
*
|
|
|
|
* a page is full when nothing can be added between pd_lower and
|
|
|
|
* pd_upper.
|
|
|
|
*
|
|
|
|
* all blocks written out by an access method must be disk pages.
|
|
|
|
*
|
|
|
|
* EXCEPTIONS:
|
|
|
|
*
|
|
|
|
* obviously, a page is not formatted before it is initialized by
|
|
|
|
* a call to PageInit.
|
|
|
|
*
|
|
|
|
* NOTES:
|
|
|
|
*
|
|
|
|
* linp1..N form an ItemId (line pointer) array. ItemPointers point
|
|
|
|
* to a physical block number and a logical offset (line pointer
|
|
|
|
* number) within that block/page. Note that OffsetNumbers
|
|
|
|
* conventionally start at 1, not 0.
|
|
|
|
*
|
|
|
|
* tuple1..N are added "backwards" on the page. Since an ItemPointer
|
|
|
|
* offset is used to access an ItemId entry rather than an actual
|
|
|
|
* byte-offset position, tuples can be physically shuffled on a page
|
|
|
|
* whenever the need arises. This indirection also keeps crash recovery
|
|
|
|
* relatively simple, because the low-level details of page space
|
|
|
|
* management can be controlled by standard buffer page code during
|
|
|
|
* logging, and during recovery.
|
|
|
|
*
|
|
|
|
* AM-generic per-page information is kept in PageHeaderData.
|
|
|
|
*
|
|
|
|
* AM-specific per-page data (if any) is kept in the area marked "special
|
|
|
|
* space"; each AM has an "opaque" structure defined somewhere that is
|
|
|
|
* stored as the page trailer. an access method should always
|
|
|
|
* initialize its pages with PageInit and then set its own opaque
|
|
|
|
* fields.
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef Pointer Page;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* location (byte offset) within a page.
|
|
|
|
*
|
|
|
|
* note that this is actually limited to 2^15 because we have limited
|
|
|
|
* ItemIdData.lp_off and ItemIdData.lp_len to 15 bits (see itemid.h).
|
|
|
|
*/
|
|
|
|
typedef uint16 LocationIndex;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For historical reasons, the 64-bit LSN value is stored as two 32-bit
|
|
|
|
* values.
|
|
|
|
*/
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
uint32 xlogid; /* high bits */
|
|
|
|
uint32 xrecoff; /* low bits */
|
|
|
|
} PageXLogRecPtr;
|
|
|
|
|
|
|
|
#define PageXLogRecPtrGet(val) \
|
|
|
|
((uint64) (val).xlogid << 32 | (val).xrecoff)
|
|
|
|
#define PageXLogRecPtrSet(ptr, lsn) \
|
|
|
|
((ptr).xlogid = (uint32) ((lsn) >> 32), (ptr).xrecoff = (uint32) (lsn))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* disk page organization
|
|
|
|
*
|
|
|
|
* space management information generic to any page
|
|
|
|
*
|
|
|
|
* pd_lsn - identifies xlog record for last change to this page.
|
|
|
|
* pd_checksum - page checksum, if set.
|
|
|
|
* pd_flags - flag bits.
|
|
|
|
* pd_lower - offset to start of free space.
|
|
|
|
* pd_upper - offset to end of free space.
|
|
|
|
* pd_special - offset to start of special space.
|
|
|
|
* pd_pagesize_version - size in bytes and page layout version number.
|
|
|
|
* pd_prune_xid - oldest XID among potentially prunable tuples on page.
|
|
|
|
*
|
|
|
|
* The LSN is used by the buffer manager to enforce the basic rule of WAL:
|
|
|
|
* "thou shalt write xlog before data". A dirty buffer cannot be dumped
|
|
|
|
* to disk until xlog has been flushed at least as far as the page's LSN.
|
|
|
|
*
|
|
|
|
* pd_checksum stores the page checksum, if it has been set for this page;
|
|
|
|
* zero is a valid value for a checksum. If a checksum is not in use then
|
|
|
|
* we leave the field unset. This will typically mean the field is zero
|
|
|
|
* though non-zero values may also be present if databases have been
|
|
|
|
* pg_upgraded from releases prior to 9.3, when the same byte offset was
|
|
|
|
* used to store the current timelineid when the page was last updated.
|
|
|
|
* Note that there is no indication on a page as to whether the checksum
|
|
|
|
* is valid or not, a deliberate design choice which avoids the problem
|
|
|
|
* of relying on the page contents to decide whether to verify it. Hence
|
|
|
|
* there are no flag bits relating to checksums.
|
|
|
|
*
|
|
|
|
* pd_prune_xid is a hint field that helps determine whether pruning will be
|
|
|
|
* useful. It is currently unused in index pages.
|
|
|
|
*
|
|
|
|
* The page version number and page size are packed together into a single
|
|
|
|
* uint16 field. This is for historical reasons: before PostgreSQL 7.3,
|
|
|
|
* there was no concept of a page version number, and doing it this way
|
|
|
|
* lets us pretend that pre-7.3 databases have page version number zero.
|
|
|
|
* We constrain page sizes to be multiples of 256, leaving the low eight
|
|
|
|
* bits available for a version number.
|
|
|
|
*
|
|
|
|
* Minimum possible page size is perhaps 64B to fit page header, opaque space
|
|
|
|
* and a minimal tuple; of course, in reality you want it much bigger, so
|
|
|
|
* the constraint on pagesize mod 256 is not an important restriction.
|
|
|
|
* On the high end, we can only support pages up to 32KB because lp_off/lp_len
|
|
|
|
* are 15 bits.
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct PageHeaderData
|
|
|
|
{
|
|
|
|
/* XXX LSN is member of *any* block, not only page-organized ones */
|
|
|
|
PageXLogRecPtr pd_lsn; /* LSN: next byte after last byte of xlog
|
|
|
|
* record for last change to this page */
|
|
|
|
uint16 pd_checksum; /* checksum */
|
|
|
|
uint16 pd_flags; /* flag bits, see below */
|
|
|
|
LocationIndex pd_lower; /* offset to start of free space */
|
|
|
|
LocationIndex pd_upper; /* offset to end of free space */
|
|
|
|
LocationIndex pd_special; /* offset to start of special space */
|
|
|
|
uint16 pd_pagesize_version;
|
|
|
|
TransactionId pd_prune_xid; /* oldest prunable XID, or zero if none */
|
|
|
|
ItemIdData pd_linp[FLEXIBLE_ARRAY_MEMBER]; /* line pointer array */
|
|
|
|
} PageHeaderData;
|
|
|
|
|
|
|
|
typedef PageHeaderData *PageHeader;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pd_flags contains the following flag bits. Undefined bits are initialized
|
|
|
|
* to zero and may be used in the future.
|
|
|
|
*
|
|
|
|
* PD_HAS_FREE_LINES is set if there are any LP_UNUSED line pointers before
|
|
|
|
* pd_lower. This should be considered a hint rather than the truth, since
|
|
|
|
* changes to it are not WAL-logged.
|
|
|
|
*
|
|
|
|
* PD_PAGE_FULL is set if an UPDATE doesn't find enough free space in the
|
|
|
|
* page for its new tuple version; this suggests that a prune is needed.
|
|
|
|
* Again, this is just a hint.
|
|
|
|
*/
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
8 years ago
|
|
|
#define PD_HAS_FREE_LINES 0x0001 /* are there any unused line pointers? */
|
|
|
|
#define PD_PAGE_FULL 0x0002 /* not enough free space for new tuple? */
|
|
|
|
#define PD_ALL_VISIBLE 0x0004 /* all tuples on page are visible to
|
|
|
|
* everyone */
|
|
|
|
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
8 years ago
|
|
|
#define PD_VALID_FLAG_BITS 0x0007 /* OR of all valid pd_flags bits */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Page layout version number 0 is for pre-7.3 Postgres releases.
|
|
|
|
* Releases 7.3 and 7.4 use 1, denoting a new HeapTupleHeader layout.
|
|
|
|
* Release 8.0 uses 2; it changed the HeapTupleHeader layout again.
|
|
|
|
* Release 8.1 uses 3; it redefined HeapTupleHeader infomask bits.
|
|
|
|
* Release 8.3 uses 4; it changed the HeapTupleHeader layout again, and
|
|
|
|
* added the pd_flags field (by stealing some bits from pd_tli),
|
|
|
|
* as well as adding the pd_prune_xid field (which enlarges the header).
|
|
|
|
*
|
|
|
|
* As of Release 9.3, the checksum version must also be considered when
|
|
|
|
* handling pages.
|
|
|
|
*/
|
|
|
|
#define PG_PAGE_LAYOUT_VERSION 4
|
|
|
|
#define PG_DATA_CHECKSUM_VERSION 1
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* page support macros
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageIsValid
|
|
|
|
* True iff page is valid.
|
|
|
|
*/
|
|
|
|
#define PageIsValid(page) PointerIsValid(page)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* line pointer(s) do not count as part of header
|
|
|
|
*/
|
|
|
|
#define SizeOfPageHeaderData (offsetof(PageHeaderData, pd_linp))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageIsEmpty
|
|
|
|
* returns true iff no itemid has been allocated on the page
|
|
|
|
*/
|
|
|
|
#define PageIsEmpty(page) \
|
|
|
|
(((PageHeader) (page))->pd_lower <= SizeOfPageHeaderData)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageIsNew
|
|
|
|
* returns true iff page has not been initialized (by PageInit)
|
|
|
|
*/
|
|
|
|
#define PageIsNew(page) (((PageHeader) (page))->pd_upper == 0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageGetItemId
|
|
|
|
* Returns an item identifier of a page.
|
|
|
|
*/
|
|
|
|
#define PageGetItemId(page, offsetNumber) \
|
|
|
|
((ItemId) (&((PageHeader) (page))->pd_linp[(offsetNumber) - 1]))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageGetContents
|
|
|
|
* To be used in cases where the page does not contain line pointers.
|
|
|
|
*
|
|
|
|
* Note: prior to 8.3 this was not guaranteed to yield a MAXALIGN'd result.
|
|
|
|
* Now it is. Beware of old code that might think the offset to the contents
|
|
|
|
* is just SizeOfPageHeaderData rather than MAXALIGN(SizeOfPageHeaderData).
|
|
|
|
*/
|
|
|
|
#define PageGetContents(page) \
|
|
|
|
((char *) (page) + MAXALIGN(SizeOfPageHeaderData))
|
|
|
|
|
|
|
|
/* ----------------
|
|
|
|
* macros to access page size info
|
|
|
|
* ----------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageSizeIsValid
|
|
|
|
* True iff the page size is valid.
|
|
|
|
*/
|
|
|
|
#define PageSizeIsValid(pageSize) ((pageSize) == BLCKSZ)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageGetPageSize
|
|
|
|
* Returns the page size of a page.
|
|
|
|
*
|
|
|
|
* this can only be called on a formatted page (unlike
|
|
|
|
* BufferGetPageSize, which can be called on an unformatted page).
|
|
|
|
* however, it can be called on a page that is not stored in a buffer.
|
|
|
|
*/
|
|
|
|
#define PageGetPageSize(page) \
|
|
|
|
((Size) (((PageHeader) (page))->pd_pagesize_version & (uint16) 0xFF00))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageGetPageLayoutVersion
|
|
|
|
* Returns the page layout version of a page.
|
|
|
|
*/
|
|
|
|
#define PageGetPageLayoutVersion(page) \
|
|
|
|
(((PageHeader) (page))->pd_pagesize_version & 0x00FF)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageSetPageSizeAndVersion
|
|
|
|
* Sets the page size and page layout version number of a page.
|
|
|
|
*
|
|
|
|
* We could support setting these two values separately, but there's
|
|
|
|
* no real need for it at the moment.
|
|
|
|
*/
|
|
|
|
#define PageSetPageSizeAndVersion(page, size, version) \
|
|
|
|
( \
|
|
|
|
AssertMacro(((size) & 0xFF00) == (size)), \
|
|
|
|
AssertMacro(((version) & 0x00FF) == (version)), \
|
|
|
|
((PageHeader) (page))->pd_pagesize_version = (size) | (version) \
|
|
|
|
)
|
|
|
|
|
|
|
|
/* ----------------
|
|
|
|
* page special data macros
|
|
|
|
* ----------------
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* PageGetSpecialSize
|
|
|
|
* Returns size of special space on a page.
|
|
|
|
*/
|
|
|
|
#define PageGetSpecialSize(page) \
|
|
|
|
((uint16) (PageGetPageSize(page) - ((PageHeader)(page))->pd_special))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Using assertions, validate that the page special pointer is OK.
|
|
|
|
*
|
|
|
|
* This is intended to catch use of the pointer before page initialization.
|
|
|
|
* It is implemented as a function due to the limitations of the MSVC
|
|
|
|
* compiler, which choked on doing all these tests within another macro. We
|
|
|
|
* return true so that MacroAssert() can be used while still getting the
|
|
|
|
* specifics from the macro failure within this function.
|
|
|
|
*/
|
|
|
|
static inline bool
|
|
|
|
PageValidateSpecialPointer(Page page)
|
|
|
|
{
|
|
|
|
Assert(PageIsValid(page));
|
|
|
|
Assert(((PageHeader) (page))->pd_special <= BLCKSZ);
|
|
|
|
Assert(((PageHeader) (page))->pd_special >= SizeOfPageHeaderData);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageGetSpecialPointer
|
|
|
|
* Returns pointer to special space on a page.
|
|
|
|
*/
|
|
|
|
#define PageGetSpecialPointer(page) \
|
|
|
|
( \
|
|
|
|
AssertMacro(PageValidateSpecialPointer(page)), \
|
|
|
|
(char *) ((char *) (page) + ((PageHeader) (page))->pd_special) \
|
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageGetItem
|
|
|
|
* Retrieves an item on the given page.
|
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* This does not change the status of any of the resources passed.
|
|
|
|
* The semantics may change in the future.
|
|
|
|
*/
|
|
|
|
#define PageGetItem(page, itemId) \
|
|
|
|
( \
|
|
|
|
AssertMacro(PageIsValid(page)), \
|
|
|
|
AssertMacro(ItemIdHasStorage(itemId)), \
|
|
|
|
(Item)(((char *)(page)) + ItemIdGetOffset(itemId)) \
|
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PageGetMaxOffsetNumber
|
|
|
|
* Returns the maximum offset number used by the given page.
|
|
|
|
* Since offset numbers are 1-based, this is also the number
|
|
|
|
* of items on the page.
|
|
|
|
*
|
|
|
|
* NOTE: if the page is not initialized (pd_lower == 0), we must
|
|
|
|
* return zero to ensure sane behavior. Accept double evaluation
|
|
|
|
* of the argument so that we can ensure this.
|
|
|
|
*/
|
|
|
|
#define PageGetMaxOffsetNumber(page) \
|
|
|
|
(((PageHeader) (page))->pd_lower <= SizeOfPageHeaderData ? 0 : \
|
|
|
|
((((PageHeader) (page))->pd_lower - SizeOfPageHeaderData) \
|
|
|
|
/ sizeof(ItemIdData)))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Additional macros for access to page headers. (Beware multiple evaluation
|
|
|
|
* of the arguments!)
|
|
|
|
*/
|
|
|
|
#define PageGetLSN(page) \
|
|
|
|
PageXLogRecPtrGet(((PageHeader) (page))->pd_lsn)
|
|
|
|
#define PageSetLSN(page, lsn) \
|
|
|
|
PageXLogRecPtrSet(((PageHeader) (page))->pd_lsn, lsn)
|
|
|
|
|
|
|
|
#define PageHasFreeLinePointers(page) \
|
|
|
|
(((PageHeader) (page))->pd_flags & PD_HAS_FREE_LINES)
|
|
|
|
#define PageSetHasFreeLinePointers(page) \
|
|
|
|
(((PageHeader) (page))->pd_flags |= PD_HAS_FREE_LINES)
|
|
|
|
#define PageClearHasFreeLinePointers(page) \
|
|
|
|
(((PageHeader) (page))->pd_flags &= ~PD_HAS_FREE_LINES)
|
|
|
|
|
|
|
|
#define PageIsFull(page) \
|
|
|
|
(((PageHeader) (page))->pd_flags & PD_PAGE_FULL)
|
|
|
|
#define PageSetFull(page) \
|
|
|
|
(((PageHeader) (page))->pd_flags |= PD_PAGE_FULL)
|
|
|
|
#define PageClearFull(page) \
|
|
|
|
(((PageHeader) (page))->pd_flags &= ~PD_PAGE_FULL)
|
|
|
|
|
|
|
|
#define PageIsAllVisible(page) \
|
|
|
|
(((PageHeader) (page))->pd_flags & PD_ALL_VISIBLE)
|
|
|
|
#define PageSetAllVisible(page) \
|
|
|
|
(((PageHeader) (page))->pd_flags |= PD_ALL_VISIBLE)
|
|
|
|
#define PageClearAllVisible(page) \
|
|
|
|
(((PageHeader) (page))->pd_flags &= ~PD_ALL_VISIBLE)
|
|
|
|
|
|
|
|
#define PageIsPrunable(page, oldestxmin) \
|
|
|
|
( \
|
|
|
|
AssertMacro(TransactionIdIsNormal(oldestxmin)), \
|
|
|
|
TransactionIdIsValid(((PageHeader) (page))->pd_prune_xid) && \
|
|
|
|
TransactionIdPrecedes(((PageHeader) (page))->pd_prune_xid, oldestxmin) \
|
|
|
|
)
|
|
|
|
#define PageSetPrunable(page, xid) \
|
|
|
|
do { \
|
|
|
|
Assert(TransactionIdIsNormal(xid)); \
|
|
|
|
if (!TransactionIdIsValid(((PageHeader) (page))->pd_prune_xid) || \
|
|
|
|
TransactionIdPrecedes(xid, ((PageHeader) (page))->pd_prune_xid)) \
|
|
|
|
((PageHeader) (page))->pd_prune_xid = (xid); \
|
|
|
|
} while (0)
|
|
|
|
#define PageClearPrunable(page) \
|
|
|
|
(((PageHeader) (page))->pd_prune_xid = InvalidTransactionId)
|
|
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* extern declarations
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
Fix PageAddItem BRIN bug
BRIN was relying on the ability to remove a tuple from an index page,
then putting another tuple in the same line pointer. But PageAddItem
refuses to add a tuple beyond the first free item past the last used
item, and in particular, it rejects an attempt to add an item to an
empty page anywhere other than the first line pointer. PageAddItem
issues a WARNING and indicates to the caller that it failed, which in
turn causes the BRIN calling code to issue a PANIC, so the whole
sequence looks like this:
WARNING: specified item offset is too large
PANIC: failed to add BRIN tuple
To fix, create a new function PageAddItemExtended which is like
PageAddItem except that the two boolean arguments become a flags bitmap;
the "overwrite" and "is_heap" boolean flags in PageAddItem become
PAI_OVERWITE and PAI_IS_HEAP flags in the new function, and a new flag
PAI_ALLOW_FAR_OFFSET enables the behavior required by BRIN.
PageAddItem() retains its original signature, for compatibility with
third-party modules (other callers in core code are not modified,
either).
Also, in the belt-and-suspenders spirit, I added a new sanity check in
brinGetTupleForHeapBlock to raise an error if an TID found in the revmap
is not marked as live by the page header. This causes it to react with
"ERROR: corrupted BRIN index" to the bug at hand, rather than a hard
crash.
Backpatch to 9.5.
Bug reported by Andreas Seltenreich as detected by his handy sqlsmith
fuzzer.
Discussion: https://www.postgresql.org/message-id/87mvni77jh.fsf@elite.ansel.ydns.eu
9 years ago
|
|
|
#define PAI_OVERWRITE (1 << 0)
|
|
|
|
#define PAI_IS_HEAP (1 << 1)
|
|
|
|
|
|
|
|
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap) \
|
|
|
|
PageAddItemExtended(page, item, size, offsetNumber, \
|
|
|
|
((overwrite) ? PAI_OVERWRITE : 0) | \
|
|
|
|
((is_heap) ? PAI_IS_HEAP : 0))
|
|
|
|
|
|
|
|
extern void PageInit(Page page, Size pageSize, Size specialSize);
|
|
|
|
extern bool PageIsVerified(Page page, BlockNumber blkno);
|
Fix PageAddItem BRIN bug
BRIN was relying on the ability to remove a tuple from an index page,
then putting another tuple in the same line pointer. But PageAddItem
refuses to add a tuple beyond the first free item past the last used
item, and in particular, it rejects an attempt to add an item to an
empty page anywhere other than the first line pointer. PageAddItem
issues a WARNING and indicates to the caller that it failed, which in
turn causes the BRIN calling code to issue a PANIC, so the whole
sequence looks like this:
WARNING: specified item offset is too large
PANIC: failed to add BRIN tuple
To fix, create a new function PageAddItemExtended which is like
PageAddItem except that the two boolean arguments become a flags bitmap;
the "overwrite" and "is_heap" boolean flags in PageAddItem become
PAI_OVERWITE and PAI_IS_HEAP flags in the new function, and a new flag
PAI_ALLOW_FAR_OFFSET enables the behavior required by BRIN.
PageAddItem() retains its original signature, for compatibility with
third-party modules (other callers in core code are not modified,
either).
Also, in the belt-and-suspenders spirit, I added a new sanity check in
brinGetTupleForHeapBlock to raise an error if an TID found in the revmap
is not marked as live by the page header. This causes it to react with
"ERROR: corrupted BRIN index" to the bug at hand, rather than a hard
crash.
Backpatch to 9.5.
Bug reported by Andreas Seltenreich as detected by his handy sqlsmith
fuzzer.
Discussion: https://www.postgresql.org/message-id/87mvni77jh.fsf@elite.ansel.ydns.eu
9 years ago
|
|
|
extern OffsetNumber PageAddItemExtended(Page page, Item item, Size size,
|
|
|
|
OffsetNumber offsetNumber, int flags);
|
|
|
|
extern Page PageGetTempPage(Page page);
|
|
|
|
extern Page PageGetTempPageCopy(Page page);
|
|
|
|
extern Page PageGetTempPageCopySpecial(Page page);
|
|
|
|
extern void PageRestoreTempPage(Page tempPage, Page oldPage);
|
|
|
|
extern void PageRepairFragmentation(Page page);
|
|
|
|
extern Size PageGetFreeSpace(Page page);
|
|
|
|
extern Size PageGetFreeSpaceForMultipleTuples(Page page, int ntups);
|
|
|
|
extern Size PageGetExactFreeSpace(Page page);
|
|
|
|
extern Size PageGetHeapFreeSpace(Page page);
|
|
|
|
extern void PageIndexTupleDelete(Page page, OffsetNumber offset);
|
|
|
|
extern void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems);
|
|
|
|
extern void PageIndexTupleDeleteNoCompact(Page page, OffsetNumber offset);
|
Invent PageIndexTupleOverwrite, and teach BRIN and GiST to use it.
PageIndexTupleOverwrite performs approximately the same function as
PageIndexTupleDelete (or PageIndexDeleteNoCompact) followed by PageAddItem
targeting the same item pointer offset. But in the case where the new
tuple is the same size as the old, it avoids shuffling other data around on
the page, because the new tuple is placed where the old one was rather than
being appended to the end of the page. This has been shown to provide a
substantial speedup for some GiST use-cases.
Also, this change allows some API simplifications: we can get rid of
the rather klugy and error-prone PAI_ALLOW_FAR_OFFSET flag for
PageAddItemExtended, since that was used only to cover a corner case
for BRIN that's better expressed by using PageIndexTupleOverwrite.
Note that this patch causes a rather subtle WAL incompatibility: the
physical page content change represented by certain WAL records is now
different than it was before, because while the tuples have the same
itempointer line numbers, the tuples themselves are in different places.
I have not bumped the WAL version number because I think it doesn't matter
unless you are trying to do bitwise comparisons of original and replayed
pages, and in any case we're early in a devel cycle and there will probably
be more WAL changes before v10 gets out the door.
There is probably room to make use of PageIndexTupleOverwrite in SP-GiST
and GIN too, but that is left for a future patch.
Andrey Borodin, reviewed by Anastasia Lubennikova, whacked around a bit
by me
Discussion: <CAJEAwVGQjGGOj6mMSgMwGvtFd5Kwe6VFAxY=uEPZWMDjzbn4VQ@mail.gmail.com>
9 years ago
|
|
|
extern bool PageIndexTupleOverwrite(Page page, OffsetNumber offnum,
|
|
|
|
Item newtup, Size newsize);
|
|
|
|
extern char *PageSetChecksumCopy(Page page, BlockNumber blkno);
|
|
|
|
extern void PageSetChecksumInplace(Page page, BlockNumber blkno);
|
|
|
|
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
8 years ago
|
|
|
#endif /* BUFPAGE_H */
|