mirror of https://github.com/postgres/postgres
Tag:
Branch:
Tree:
d66a922f92
REL2_0B
REL6_4
REL6_5_PATCHES
REL7_0_PATCHES
REL7_1_STABLE
REL7_2_STABLE
REL7_3_STABLE
REL7_4_STABLE
REL8_0_STABLE
REL8_1_STABLE
REL8_2_STABLE
REL8_3_STABLE
REL8_4_STABLE
REL8_5_ALPHA1_BRANCH
REL8_5_ALPHA2_BRANCH
REL8_5_ALPHA3_BRANCH
REL9_0_ALPHA4_BRANCH
REL9_0_ALPHA5_BRANCH
REL9_0_STABLE
REL9_1_STABLE
REL9_2_STABLE
REL9_3_STABLE
REL9_4_STABLE
REL9_5_STABLE
REL9_6_STABLE
REL_10_STABLE
REL_11_STABLE
REL_12_STABLE
REL_13_STABLE
REL_14_STABLE
REL_15_STABLE
REL_16_STABLE
REL_17_STABLE
REL_18_STABLE
Release_1_0_3
WIN32_DEV
ecpg_big_bison
master
PG95-1_01
PG95-1_08
PG95-1_09
REL2_0
REL6_1
REL6_1_1
REL6_2
REL6_2_1
REL6_3
REL6_3_2
REL6_4_2
REL6_5
REL6_5_1
REL6_5_2
REL6_5_3
REL7_0
REL7_0_2
REL7_0_3
REL7_1
REL7_1_1
REL7_1_2
REL7_1_3
REL7_1_BETA
REL7_1_BETA2
REL7_1_BETA3
REL7_2
REL7_2_1
REL7_2_2
REL7_2_3
REL7_2_4
REL7_2_5
REL7_2_6
REL7_2_7
REL7_2_8
REL7_2_BETA1
REL7_2_BETA2
REL7_2_BETA3
REL7_2_BETA4
REL7_2_BETA5
REL7_2_RC1
REL7_2_RC2
REL7_3
REL7_3_1
REL7_3_10
REL7_3_11
REL7_3_12
REL7_3_13
REL7_3_14
REL7_3_15
REL7_3_16
REL7_3_17
REL7_3_18
REL7_3_19
REL7_3_2
REL7_3_20
REL7_3_21
REL7_3_3
REL7_3_4
REL7_3_5
REL7_3_6
REL7_3_7
REL7_3_8
REL7_3_9
REL7_4
REL7_4_1
REL7_4_10
REL7_4_11
REL7_4_12
REL7_4_13
REL7_4_14
REL7_4_15
REL7_4_16
REL7_4_17
REL7_4_18
REL7_4_19
REL7_4_2
REL7_4_20
REL7_4_21
REL7_4_22
REL7_4_23
REL7_4_24
REL7_4_25
REL7_4_26
REL7_4_27
REL7_4_28
REL7_4_29
REL7_4_3
REL7_4_30
REL7_4_4
REL7_4_5
REL7_4_6
REL7_4_7
REL7_4_8
REL7_4_9
REL7_4_BETA1
REL7_4_BETA2
REL7_4_BETA3
REL7_4_BETA4
REL7_4_BETA5
REL7_4_RC1
REL7_4_RC2
REL8_0_0
REL8_0_0BETA1
REL8_0_0BETA2
REL8_0_0BETA3
REL8_0_0BETA4
REL8_0_0BETA5
REL8_0_0RC1
REL8_0_0RC2
REL8_0_0RC3
REL8_0_0RC4
REL8_0_0RC5
REL8_0_1
REL8_0_10
REL8_0_11
REL8_0_12
REL8_0_13
REL8_0_14
REL8_0_15
REL8_0_16
REL8_0_17
REL8_0_18
REL8_0_19
REL8_0_2
REL8_0_20
REL8_0_21
REL8_0_22
REL8_0_23
REL8_0_24
REL8_0_25
REL8_0_26
REL8_0_3
REL8_0_4
REL8_0_5
REL8_0_6
REL8_0_7
REL8_0_8
REL8_0_9
REL8_1_0
REL8_1_0BETA1
REL8_1_0BETA2
REL8_1_0BETA3
REL8_1_0BETA4
REL8_1_0RC1
REL8_1_1
REL8_1_10
REL8_1_11
REL8_1_12
REL8_1_13
REL8_1_14
REL8_1_15
REL8_1_16
REL8_1_17
REL8_1_18
REL8_1_19
REL8_1_2
REL8_1_20
REL8_1_21
REL8_1_22
REL8_1_23
REL8_1_3
REL8_1_4
REL8_1_5
REL8_1_6
REL8_1_7
REL8_1_8
REL8_1_9
REL8_2_0
REL8_2_1
REL8_2_10
REL8_2_11
REL8_2_12
REL8_2_13
REL8_2_14
REL8_2_15
REL8_2_16
REL8_2_17
REL8_2_18
REL8_2_19
REL8_2_2
REL8_2_20
REL8_2_21
REL8_2_22
REL8_2_23
REL8_2_3
REL8_2_4
REL8_2_5
REL8_2_6
REL8_2_7
REL8_2_8
REL8_2_9
REL8_2_BETA1
REL8_2_BETA2
REL8_2_BETA3
REL8_2_RC1
REL8_3_0
REL8_3_1
REL8_3_10
REL8_3_11
REL8_3_12
REL8_3_13
REL8_3_14
REL8_3_15
REL8_3_16
REL8_3_17
REL8_3_18
REL8_3_19
REL8_3_2
REL8_3_20
REL8_3_21
REL8_3_22
REL8_3_23
REL8_3_3
REL8_3_4
REL8_3_5
REL8_3_6
REL8_3_7
REL8_3_8
REL8_3_9
REL8_3_BETA1
REL8_3_BETA2
REL8_3_BETA3
REL8_3_BETA4
REL8_3_RC1
REL8_3_RC2
REL8_4_0
REL8_4_1
REL8_4_10
REL8_4_11
REL8_4_12
REL8_4_13
REL8_4_14
REL8_4_15
REL8_4_16
REL8_4_17
REL8_4_18
REL8_4_19
REL8_4_2
REL8_4_20
REL8_4_21
REL8_4_22
REL8_4_3
REL8_4_4
REL8_4_5
REL8_4_6
REL8_4_7
REL8_4_8
REL8_4_9
REL8_4_BETA1
REL8_4_BETA2
REL8_4_RC1
REL8_4_RC2
REL8_5_ALPHA1
REL8_5_ALPHA2
REL8_5_ALPHA3
REL9_0_0
REL9_0_1
REL9_0_10
REL9_0_11
REL9_0_12
REL9_0_13
REL9_0_14
REL9_0_15
REL9_0_16
REL9_0_17
REL9_0_18
REL9_0_19
REL9_0_2
REL9_0_20
REL9_0_21
REL9_0_22
REL9_0_23
REL9_0_3
REL9_0_4
REL9_0_5
REL9_0_6
REL9_0_7
REL9_0_8
REL9_0_9
REL9_0_ALPHA4
REL9_0_ALPHA5
REL9_0_BETA1
REL9_0_BETA2
REL9_0_BETA3
REL9_0_BETA4
REL9_0_RC1
REL9_1_0
REL9_1_1
REL9_1_10
REL9_1_11
REL9_1_12
REL9_1_13
REL9_1_14
REL9_1_15
REL9_1_16
REL9_1_17
REL9_1_18
REL9_1_19
REL9_1_2
REL9_1_20
REL9_1_21
REL9_1_22
REL9_1_23
REL9_1_24
REL9_1_3
REL9_1_4
REL9_1_5
REL9_1_6
REL9_1_7
REL9_1_8
REL9_1_9
REL9_1_ALPHA1
REL9_1_ALPHA2
REL9_1_ALPHA3
REL9_1_ALPHA4
REL9_1_ALPHA5
REL9_1_BETA1
REL9_1_BETA2
REL9_1_BETA3
REL9_1_RC1
REL9_2_0
REL9_2_1
REL9_2_10
REL9_2_11
REL9_2_12
REL9_2_13
REL9_2_14
REL9_2_15
REL9_2_16
REL9_2_17
REL9_2_18
REL9_2_19
REL9_2_2
REL9_2_20
REL9_2_21
REL9_2_22
REL9_2_23
REL9_2_24
REL9_2_3
REL9_2_4
REL9_2_5
REL9_2_6
REL9_2_7
REL9_2_8
REL9_2_9
REL9_2_BETA1
REL9_2_BETA2
REL9_2_BETA3
REL9_2_BETA4
REL9_2_RC1
REL9_3_0
REL9_3_1
REL9_3_10
REL9_3_11
REL9_3_12
REL9_3_13
REL9_3_14
REL9_3_15
REL9_3_16
REL9_3_17
REL9_3_18
REL9_3_19
REL9_3_2
REL9_3_20
REL9_3_21
REL9_3_22
REL9_3_23
REL9_3_24
REL9_3_25
REL9_3_3
REL9_3_4
REL9_3_5
REL9_3_6
REL9_3_7
REL9_3_8
REL9_3_9
REL9_3_BETA1
REL9_3_BETA2
REL9_3_RC1
REL9_4_0
REL9_4_1
REL9_4_10
REL9_4_11
REL9_4_12
REL9_4_13
REL9_4_14
REL9_4_15
REL9_4_16
REL9_4_17
REL9_4_18
REL9_4_19
REL9_4_2
REL9_4_20
REL9_4_21
REL9_4_22
REL9_4_23
REL9_4_24
REL9_4_25
REL9_4_26
REL9_4_3
REL9_4_4
REL9_4_5
REL9_4_6
REL9_4_7
REL9_4_8
REL9_4_9
REL9_4_BETA1
REL9_4_BETA2
REL9_4_BETA3
REL9_4_RC1
REL9_5_0
REL9_5_1
REL9_5_10
REL9_5_11
REL9_5_12
REL9_5_13
REL9_5_14
REL9_5_15
REL9_5_16
REL9_5_17
REL9_5_18
REL9_5_19
REL9_5_2
REL9_5_20
REL9_5_21
REL9_5_22
REL9_5_23
REL9_5_24
REL9_5_25
REL9_5_3
REL9_5_4
REL9_5_5
REL9_5_6
REL9_5_7
REL9_5_8
REL9_5_9
REL9_5_ALPHA1
REL9_5_ALPHA2
REL9_5_BETA1
REL9_5_BETA2
REL9_5_RC1
REL9_6_0
REL9_6_1
REL9_6_10
REL9_6_11
REL9_6_12
REL9_6_13
REL9_6_14
REL9_6_15
REL9_6_16
REL9_6_17
REL9_6_18
REL9_6_19
REL9_6_2
REL9_6_20
REL9_6_21
REL9_6_22
REL9_6_23
REL9_6_24
REL9_6_3
REL9_6_4
REL9_6_5
REL9_6_6
REL9_6_7
REL9_6_8
REL9_6_9
REL9_6_BETA1
REL9_6_BETA2
REL9_6_BETA3
REL9_6_BETA4
REL9_6_RC1
REL_10_0
REL_10_1
REL_10_10
REL_10_11
REL_10_12
REL_10_13
REL_10_14
REL_10_15
REL_10_16
REL_10_17
REL_10_18
REL_10_19
REL_10_2
REL_10_20
REL_10_21
REL_10_22
REL_10_23
REL_10_3
REL_10_4
REL_10_5
REL_10_6
REL_10_7
REL_10_8
REL_10_9
REL_10_BETA1
REL_10_BETA2
REL_10_BETA3
REL_10_BETA4
REL_10_RC1
REL_11_0
REL_11_1
REL_11_10
REL_11_11
REL_11_12
REL_11_13
REL_11_14
REL_11_15
REL_11_16
REL_11_17
REL_11_18
REL_11_19
REL_11_2
REL_11_20
REL_11_21
REL_11_22
REL_11_3
REL_11_4
REL_11_5
REL_11_6
REL_11_7
REL_11_8
REL_11_9
REL_11_BETA1
REL_11_BETA2
REL_11_BETA3
REL_11_BETA4
REL_11_RC1
REL_12_0
REL_12_1
REL_12_10
REL_12_11
REL_12_12
REL_12_13
REL_12_14
REL_12_15
REL_12_16
REL_12_17
REL_12_18
REL_12_19
REL_12_2
REL_12_20
REL_12_21
REL_12_22
REL_12_3
REL_12_4
REL_12_5
REL_12_6
REL_12_7
REL_12_8
REL_12_9
REL_12_BETA1
REL_12_BETA2
REL_12_BETA3
REL_12_BETA4
REL_12_RC1
REL_13_0
REL_13_1
REL_13_10
REL_13_11
REL_13_12
REL_13_13
REL_13_14
REL_13_15
REL_13_16
REL_13_17
REL_13_18
REL_13_19
REL_13_2
REL_13_20
REL_13_21
REL_13_22
REL_13_23
REL_13_3
REL_13_4
REL_13_5
REL_13_6
REL_13_7
REL_13_8
REL_13_9
REL_13_BETA1
REL_13_BETA2
REL_13_BETA3
REL_13_RC1
REL_14_0
REL_14_1
REL_14_10
REL_14_11
REL_14_12
REL_14_13
REL_14_14
REL_14_15
REL_14_16
REL_14_17
REL_14_18
REL_14_19
REL_14_2
REL_14_20
REL_14_3
REL_14_4
REL_14_5
REL_14_6
REL_14_7
REL_14_8
REL_14_9
REL_14_BETA1
REL_14_BETA2
REL_14_BETA3
REL_14_RC1
REL_15_0
REL_15_1
REL_15_10
REL_15_11
REL_15_12
REL_15_13
REL_15_14
REL_15_15
REL_15_2
REL_15_3
REL_15_4
REL_15_5
REL_15_6
REL_15_7
REL_15_8
REL_15_9
REL_15_BETA1
REL_15_BETA2
REL_15_BETA3
REL_15_BETA4
REL_15_RC1
REL_15_RC2
REL_16_0
REL_16_1
REL_16_10
REL_16_11
REL_16_2
REL_16_3
REL_16_4
REL_16_5
REL_16_6
REL_16_7
REL_16_8
REL_16_9
REL_16_BETA1
REL_16_BETA2
REL_16_BETA3
REL_16_RC1
REL_17_0
REL_17_1
REL_17_2
REL_17_3
REL_17_4
REL_17_5
REL_17_6
REL_17_7
REL_17_BETA1
REL_17_BETA2
REL_17_BETA3
REL_17_RC1
REL_18_0
REL_18_1
REL_18_BETA1
REL_18_BETA2
REL_18_BETA3
REL_18_RC1
Release_1_0_2
Release_2_0
Release_2_0_0
release-6-3
${ noResults }
59 Commits (d66a922f9247ceaa6caed9b51ce686f12e280091)
| Author | SHA1 | Message | Date |
|---|---|---|---|
|
|
3668c1d506 |
Detect version mismatch in brin_page_items
Commit |
1 year ago |
|
|
9b3c3c0fc2 |
Fix contrib/pageinspect's test for sequences.
I managed to break this test in two different ways in commit |
1 year ago |
|
|
6ea7f04b73 |
Reintroduce support for sequences in pgstattuple and pageinspect.
Commit
|
1 year ago |
|
|
47b47a5617 |
Remove incidental md5() function use from test
To allow test to pass in OpenSSL FIPS mode, similar to
|
1 year ago |
|
|
0bd4b0689b |
Stabilize test of BRIN parallel create
As explained in
|
2 years ago |
|
|
a89cd7bfcd |
Revert "Stabilize test of BRIN parallel create"
This reverts commit
|
2 years ago |
|
|
4d916dd876 |
Stabilize test of BRIN parallel create
The test for parallel create of BRIN indexes added by commit |
2 years ago |
|
|
8225c2fd40 |
Add regression test for BRIN parallel builds
Adds a regression test for parallel CREATE INDEX for BRIN indexes, to
improve coverage for BRIN code, particularly code to allow parallel
index builds introduced by
|
2 years ago |
|
|
8af2565248 |
Introduce a new smgr bulk loading facility.
The new facility makes it easier to optimize bulk loading, as the logic for buffering, WAL-logging, and syncing the relation only needs to be implemented once. It's also less error-prone: We have had a number of bugs in how a relation is fsync'd - or not - at the end of a bulk loading operation. By centralizing that logic to one place, we only need to write it correctly once. The new facility is faster for small relations: Instead of of calling smgrimmedsync(), we register the fsync to happen at next checkpoint, which avoids the fsync latency. That can make a big difference if you are e.g. restoring a schema-only dump with lots of relations. It is also slightly more efficient with large relations, as the WAL logging is performed multiple pages at a time. That avoids some WAL header overhead. The sorted GiST index build did that already, this moves the buffering to the new facility. The changes to pageinspect GiST test needs an explanation: Before this patch, the sorted GiST index build set the LSN on every page to the special GistBuildLSN value, not the LSN of the WAL record, even though they were WAL-logged. There was no particular need for it, it just happened naturally when we wrote out the pages before WAL-logging them. Now we WAL-log the pages first, like in B-tree build, so the pages are stamped with the record's real LSN. When the build is not WAL-logged, we still use GistBuildLSN. To make the test output predictable, use an unlogged index. Reviewed-by: Andres Freund Discussion: https://www.postgresql.org/message-id/30e8f366-58b3-b239-c521-422122dd5150%40iki.fi |
2 years ago |
|
|
2084701364 |
pageinspect: Fix failure with hash_bitmap_info() for partitioned indexes
This function reads directly a page from a relation, relying on index_open() to open the index to read from. Unfortunately, this would crash when using partitioned indexes, as these can be opened with index_open() but they have no physical pages. Alexander has fixed the module, while I have written the test. Author: Alexander Lakhin, Michael Paquier Discussion: https://postgr.es/m/18246-f4d9ff7cb3af77e6@postgresql.org Backpatch-through: 12 |
2 years ago |
|
|
dae761a87e |
Add empty BRIN ranges during CREATE INDEX
When building BRIN indexes, the brinbuildCallback only advances to the next page range when seeing a tuple that doesn't belong to the current one. This means that the index may end up missing ranges at the end of the table, if those pages do not contain any indexable tuples. We tend not to have completely empty pages at the end of a relation, but this also applies to partial indexes, where the tuples may simply not match the index predicate. This results in inefficient scans using the affected BRIN index - without the summaries, the page ranges have to be read and processed, which consumes I/O and possibly also CPU time. The existing code already added empty ranges for earlier parts of the table, this commit makes sure we add them for the ranges at the end of the table too. Patch by Matthias van de Meent, with review/improvements by me. Author: Matthias van de Meent Reviewed-by: Tomas Vondra Discussion: https://postgr.es/m/CAEze2WiMsPZg%3DxkvSF_jt4%3D69k6K7gz5B8V2wY3gCGZ%2B1BzCbQ%40mail.gmail.com |
2 years ago |
|
|
e7bff46e50 |
pageinspect: Fix gist_page_items() with included columns
Non-leaf pages of GiST indexes contain key attributes, leaf pages contain both key and non-key attributes, and gist_page_items() ignored the handling of non-key attributes. This caused a few problems when using gist_page_items() on a GiST index with INCLUDE: - On a non-leaf page, the function would crash. - On a leaf page, the function would work, but miss to display all the values for included attributes. This commit fixes gist_page_items() to handle such cases in a more appropriate way, and now displays the values of key and non-key attributes for each item separately in a style consistent with what ruleutils.c would generate for the attribute list, depending on the page type dealt with. In a way similar to how a record is displayed, values would be double-quoted for key or non-key attributes if required. ruleutils.c did not provide a routine able to control if non-key attributes should be displayed, so an extended() routine for index definitions is added to work around the leaf and non-leaf page differences. While on it, this commit fixes a third problem related to the amount of data reported for key attributes. The code originally relied on BuildIndexValueDescription() (used for error reports on constraints) that would not print all the data stored in the index but the index opclass's input type, so this limited the amount of information available. This switch makes gist_page_items() much cheaper as there is no need to run ACL checks for each item printed, which is not an issue anyway as superuser rights are required to execute the functions of pageinspect. Opclasses whose data cannot be displayed can rely on gist_page_items_bytea(). The documentation of this function was slightly incorrect for the output results generated on HEAD and v15, so adjust it on these branches. Author: Alexander Lakhin, Michael Paquier Discussion: https://postgr.es/m/17884-cb8c326522977acb@postgresql.org Backpatch-through: 14 |
3 years ago |
|
|
428c0cae92 |
Show empty BRIN ranges in brin_page_items
Commit
|
3 years ago |
|
|
be753639d3 |
pageinspect: Fix crash with gist_page_items()
Attempting to use this function with a raw page not coming from a GiST
index would cause a crash, as it was missing the same sanity checks as
gist_page_items_bytea(). This slightly refactors the code so as all the
basic validation checks for GiST pages are done in a single routine,
in the same fashion as the pageinspect functions for hash and BRIN.
This fixes an issue similar to
|
3 years ago |
|
|
1fd3dd2048 |
Add bt_multi_page_stats() function to contrib/pageinspect.
This is like the existing bt_page_stats() function, but it can report on a range of pages rather than just one at a time. I don't have a huge amount of faith in the portability of the new test cases, but they do pass in a 32-bit FreeBSD VM here. Further adjustment may be needed depending on buildfarm results. Hamid Akhtar, reviewed by Naeem Akhter, Bertrand Drouvot, Bharath Rupireddy, and myself Discussion: https://postgr.es/m/CANugjht-=oGMRmNJKMqnBC69y7vr+wHDmm0ZK6-1pJsxoBKBbA@mail.gmail.com |
3 years ago |
|
|
e2933a6e11 |
Prevent instability in contrib/pageinspect's regression test.
pageinspect has occasionally failed on slow buildfarm members, with symptoms indicating that the expected effects of VACUUM FREEZE didn't happen. This is presumably because a background transaction such as auto-analyze was holding back global xmin. We can work around that by using a temp table in the test. Since commit |
3 years ago |
|
|
cd4868a570 |
pageinspect: Fix handling of all-zero pages
Getting from get_raw_page() an all-zero page is considered as a valid case by the buffer manager and it can happen for example when finding a corrupted page with zero_damaged_pages enabled (using zero_damaged_pages to look at corrupted pages happens), or after a crash when a relation file is extended before any WAL for its new data is generated (before a vacuum or autovacuum job comes in to do some cleanup). However, all the functions of pageinspect, as of the index AMs (except hash that has its own idea of new pages), heap, the FSM or the page header have never worked with all-zero pages, causing various crashes when going through the page internals. This commit changes all the pageinspect functions to be compliant with all-zero pages, where the choice is made to return NULL or no rows for SRFs when finding a new page. get_raw_page() still works the same way, returning a batch of zeros in the bytea of the page retrieved. A hard error could be used but NULL, while more invasive, is useful when scanning relation files in full to get a batch of results for a single relation in one query. Tests are added for all the code paths impacted. Reported-by: Daria Lepikhova Author: Michael Paquier Discussion: https://postgr.es/m/561e187b-3549-c8d5-03f5-525c14e65bd0@postgrespro.ru Backpatch-through: 10 |
4 years ago |
|
|
291e517a4d |
pageinspect: Add more sanity checks to prevent out-of-bound reads
A couple of code paths use the special area on the page passed by the function caller, expecting to find some data in it. However, feeding an incorrect page can lead to out-of-bound reads when trying to access the page special area (like a heap page that has no special area, leading PageGetSpecialPointer() to grab a pointer outside the allocated page). The functions used for hash and btree indexes have some protection already against that, while some other functions using a relation OID as argument would make sure that the access method involved is correct, but functions taking in input a raw page without knowing the relation the page is attached to would run into problems. This commit improves the set of checks used in the code paths of BRIN, btree (including one check if a leaf page is found with a non-zero level), GIN and GiST to verify that the page given in input has a special area size that fits with each access method, which is done though PageGetSpecialSize(), becore calling PageGetSpecialPointer(). The scope of the checks done is limited to work with pages that one would pass after getting a block with get_raw_page(), as it is possible to craft byteas that could bypass existing code paths. Having too many checks would also impact the usability of pageinspect, as the existing code is very useful to look at the content details in a corrupted page, so the focus is really to avoid out-of-bound reads as this is never a good thing even with functions whose execution is limited to superusers. The safest approach could be to rework the functions so as these fetch a block using a relation OID and a block number, but there are also cases where using a raw page is useful. Tests are added to cover all the code paths that needed such checks, and an error message for hash indexes is reworded to fit better with what this commit adds. Reported-By: Alexander Lakhin Author: Julien Rouhaud, Michael Paquier Discussion: https://postgr.es/m/16527-ef7606186f0610a1@postgresql.org Discussion: https://postgr.es/m/561e187b-3549-c8d5-03f5-525c14e65bd0@postgrespro.ru Backpatch-through: 10 |
4 years ago |
|
|
076f4d9539 |
pageinspect: Fix handling of page sizes and AM types
This commit fixes a set of issues related to the use of the SQL functions in this module when the caller is able to pass down raw page data as input argument: - The page size check was fuzzy in a couple of places, sometimes looking after only a sub-range, but what we are looking for is an exact match on BLCKSZ. After considering a few options here, I have settled down to do a generalization of get_page_from_raw(). Most of the SQL functions already used that, and this is not strictly required if not accessing an 8-byte-wide value from a raw page, but this feels safer in the long run for alignment-picky environment, particularly if a code path begins to access such values. This also reduces the number of strings that need to be translated. - The BRIN function brin_page_items() uses a Relation but it did not check the access method of the opened index, potentially leading to crashes. All the other functions in need of a Relation already did that. - Some code paths could fail on elog(), but we should to use ereport() for failures that can be triggered by the user. Tests are added to stress all the cases that are fixed as of this commit, with some junk raw pages (\set VERBOSITY ensures that this works across all page sizes) and unexpected index types when functions open relations. Author: Michael Paquier, Justin Prysby Discussion: https://postgr.es/m/20220218030020.GA1137@telsasoft.com Backpatch-through: 10 |
4 years ago |
|
|
6bdf1a1400 |
Fix collection of typos in the code and the documentation
Some words were duplicated while other places were grammatically incorrect, including one variable name in the code. Author: Otto Kekalainen, Justin Pryzby Discussion: https://postgr.es/m/7DDBEFC5-09B6-4325-B942-B563D1A24BDC@amazon.com |
4 years ago |
|
|
f1ea98a797 |
Reduce non-leaf keys overlap in GiST indexes produced by a sorted build
The GiST sorted build currently chooses split points according to the only page space utilization. That may lead to higher non-leaf keys overlap and, in turn, slower search query answers. This commit makes the sorted build use the opclass's picksplit method. Once four pages at the level are accumulated, the picksplit method is applied until each split partition fits the page. Some of our split algorithms could show significant performance degradation while processing 4-times more data at once. But those opclasses haven't received the sorted build support and shouldn't receive it before their split algorithms are improved. Discussion: https://postgr.es/m/CAHqSB9jqtS94e9%3D0vxqQX5dxQA89N95UKyz-%3DA7Y%2B_YJt%2BVW5A%40mail.gmail.com Author: Aliaksandr Kalenik, Sergei Shoulbakov, Andrey Borodin Reviewed-by: Björn Harrtell, Darafei Praliaskouski, Andres Freund Reviewed-by: Alexander Korotkov |
4 years ago |
|
|
127404fbe2 |
pageinspect: Improve page_header() for pages of 32kB
ld_upper, ld_lower, pd_special and the page size have been using smallint as return type, which could cause those fields to return negative values in certain cases for builds configures with a page size of 32kB. Bump pageinspect to 1.10. page_header() is able to handle the correct return type of those fields at runtime when using an older version of the extension, with some tests are added to cover that. Author: Quan Zongliang Reviewed-by: Michael Paquier, Bharath Rupireddy Discussion: https://postgr.es/m/8b8ec36e-61fe-14f9-005d-07bc85aa4eed@yeah.net |
5 years ago |
|
|
2ed532ee8c |
Improve error messages about mismatching relkind
Most error messages about a relkind that was not supported or
appropriate for the command was of the pattern
"relation \"%s\" is not a table, foreign table, or materialized view"
This style can become verbose and tedious to maintain. Moreover, it's
not very helpful: If I'm trying to create a comment on a TOAST table,
which is not supported, then the information that I could have created
a comment on a materialized view is pointless.
Instead, write the primary error message shorter and saying more
directly that what was attempted is not possible. Then, in the detail
message, explain that the operation is not supported for the relkind
the object was. To simplify that, add a new function
errdetail_relkind_not_supported() that does this.
In passing, make use of RELKIND_HAS_STORAGE() where appropriate,
instead of listing out the relkinds individually.
Reviewed-by: Michael Paquier <michael@paquier.xyz>
Reviewed-by: Alvaro Herrera <alvherre@alvh.no-ip.org>
Discussion: https://www.postgresql.org/message-id/flat/dc35a398-37d0-75ce-07ea-1dd71d98f8ec@2ndquadrant.com
|
5 years ago |
|
|
e5d8a99903 |
Use full 64-bit XIDs in deleted nbtree pages.
Otherwise we risk "leaking" deleted pages by making them non-recyclable indefinitely. Commit |
5 years ago |
|
|
9e596b65f4 |
Add "LP_DEAD item?" column to GiST pageinspect functions
This brings gist_page_items() and gist_page_items_bytea() in line with
nbtree's bt_page_items() function.
Minor follow-up to commit
|
5 years ago |
|
|
c2dc1a7976 |
Disable vacuum page skipping in selected test cases.
By default VACUUM will skip pages that it can't immediately get exclusive access to, which means that even activities as harmless and unpredictable as checkpoint buffer writes might prevent a page from being processed. Ordinarily this is no big deal, but we have a small number of test cases that examine the results of VACUUM's processing and therefore will fail if the page of interest is skipped. This seems to be the explanation for some rare buildfarm failures. To fix, add the DISABLE_PAGE_SKIPPING option to the VACUUM commands in tests where this could be an issue. In passing, remove a duplicated query in pageinspect/sql/page.sql. Back-patch as necessary (some of these cases are as old as v10). Discussion: https://postgr.es/m/413923.1611006484@sss.pgh.pa.us |
5 years ago |
|
|
f18aa1b203 |
pageinspect: Change block number arguments to bigint
Block numbers are 32-bit unsigned integers. Therefore, the smallest SQL integer type that they can fit in is bigint. However, in the pageinspect module, most input and output parameters dealing with block numbers were declared as int. The behavior with block numbers larger than a signed 32-bit integer was therefore dubious. Change these arguments to type bigint and add some more explicit error checking on the block range. (Other contrib modules appear to do this correctly already.) Since we are changing argument types of existing functions, in order to not misbehave if the binary is updated before the extension is updated, we need to create new C symbols for the entry points, similar to how it's done in other extensions as well. Reported-by: Ashutosh Bapat <ashutosh.bapat.oss@gmail.com> Reviewed-by: Alvaro Herrera <alvherre@alvh.no-ip.org> Reviewed-by: Michael Paquier <michael@paquier.xyz> Discussion: https://www.postgresql.org/message-id/flat/d8f6bdd536df403b9b33816e9f7e0b9d@G08CNEXMBPEKD05.g08.fujitsu.local |
5 years ago |
|
|
5abca4b1cd |
Fix test failure with wal_level=minimal.
The newly-added gist pageinspect test prints the LSNs of GiST pages, expecting them all to be 1 (GistBuildLSN). But with wal_level=minimal, they got updated by the whole-relation WAL-logging at commit. Fix by wrapping the problematic tests in the same transaction with the CREATE INDEX. Per buildfarm failure on thorntail. Discussion: https://www.postgresql.org/message-id/3B4F97E5-40FB-4142-8CAA-B301CDFBF982%40iki.fi |
5 years ago |
|
|
6ecaaf810b |
Fix portability issues in the new gist pageinspect test.
1. The raw bytea representation of the point-type keys used in the test depends on endianess. Remove the raw key_data column from the test. 2. The items stored on non-leftmost gist page depends on how many items git on the other pages. This showed up as a failure on 32-bit i386 systems. To fix, only test the gist_page_items() function on the leftmost leaf page. Per Andrey Borodin and the buildfarm. Discussion: https://www.postgresql.org/message-id/9FCEC1DC-86FB-4A57-88EF-DD13663B36AF%40yandex-team.ru |
5 years ago |
|
|
756ab29124 |
Add functions to 'pageinspect' to inspect GiST indexes.
Author: Andrey Borodin and me Discussion: https://www.postgresql.org/message-id/3E4F9093-A1B5-4DF8-A292-0B48692E3954%40yandex-team.ru |
5 years ago |
|
|
38ce06c37e |
Add an explicit test to catch changes in checksumming calculations.
Seems like a good idea in view of |
6 years ago |
|
|
93ee38eade |
Teach pageinspect about nbtree deduplication.
Add a new bt_metap() column to display the metapage's allequalimage
field. Also add three new columns to contrib/pageinspect's
bt_page_items() function:
* Add a boolean column ("dead") that displays the LP_DEAD bit value for
each non-pivot tuple.
* Add a TID column ("htid") that displays a single heap TID value for
each tuple. This is the TID that is returned by BTreeTupleGetHeapTID(),
so comparable values are shown for pivot tuples, plain non-pivot tuples,
and posting list tuples.
* Add a TID array column ("tids") that displays TIDs from each tuple's
posting list, if any. This works just like the "tids" column from
pageinspect's gin_leafpage_items() function.
No version bump for the pageinspect extension, since there hasn't been a
stable Postgres release since the last version bump (the last bump was
part of commit
|
6 years ago |
|
|
58b4cb30a5 |
Redesign pageinspect function printing infomask bits
After more discussion, the new function added by
|
6 years ago |
|
|
ddbd5d8731 |
Add to pageinspect function to make t_infomask/t_infomask2 human-readable
Flags of t_infomask and t_infomask2 for each tuple are already included in the information returned by heap_page_items as integers, and we lacked a way to make that information human-readable. Per discussion, the function includes an option which controls if combined flags should be decomposed or not. The default is false, to not decompose combined flags. The module is bumped to version 1.8. Author: Craig Ringer, Sawada Masahiko Reviewed-by: Peter Geoghegan, Robert Haas, Álvaro Herrera, Moon Insung, Amit Kapila, Michael Paquier, Tomas Vondra Discussion: https://postgr.es/m/CAMsr+YEY7jeaXOb+oX+RhDyOFuTMdmHjGsBxL=igCm03J0go9Q@mail.gmail.com |
6 years ago |
|
|
7db0cde6b5 |
Revert "Avoid the creation of the free space map for small heap relations".
This feature was using a process local map to track the first few blocks in the relation. The map was reset each time we get the block with enough freespace. It was discussed that it would be better to track this map on a per-relation basis in relcache and then invalidate the same whenever vacuum frees up some space in the page or when FSM is created. The new design would be better both in terms of API design and performance. List of commits reverted, in reverse chronological order: |
7 years ago |
|
|
dd299df818 |
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute. Index searches can distinguish duplicates by heap TID, since heap TID is always guaranteed to be unique. This general approach has numerous benefits for performance, and is prerequisite to teaching VACUUM to perform "retail index tuple deletion". Naively adding a new attribute to every pivot tuple has unacceptable overhead (it bloats internal pages), so suffix truncation of pivot tuples is added. This will usually truncate away the "extra" heap TID attribute from pivot tuples during a leaf page split, and may also truncate away additional user attributes. This can increase fan-out, especially in a multi-column index. Truncation can only occur at the attribute granularity, which isn't particularly effective, but works well enough for now. A future patch may add support for truncating "within" text attributes by generating truncated key values using new opclass infrastructure. Only new indexes (BTREE_VERSION 4 indexes) will have insertions that treat heap TID as a tiebreaker attribute, or will have pivot tuples undergo suffix truncation during a leaf page split (on-disk compatibility with versions 2 and 3 is preserved). Upgrades to version 4 cannot be performed on-the-fly, unlike upgrades from version 2 to version 3. contrib/amcheck continues to work with version 2 and 3 indexes, while also enforcing stricter invariants when verifying version 4 indexes. These stricter invariants are the same invariants described by "3.1.12 Sequencing" from the Lehman and Yao paper. A later patch will enhance the logic used by nbtree to pick a split point. This patch is likely to negatively impact performance without smarter choices around the precise point to split leaf pages at. Making these two mostly-distinct sets of enhancements into distinct commits seems like it might clarify their design, even though neither commit is particularly useful on its own. The maximum allowed size of new tuples is reduced by an amount equal to the space required to store an extra MAXALIGN()'d TID in a new high key during leaf page splits. The user-facing definition of the "1/3 of a page" restriction is already imprecise, and so does not need to be revised. However, there should be a compatibility note in the v12 release notes. Author: Peter Geoghegan Reviewed-By: Heikki Linnakangas, Alexander Korotkov Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com |
7 years ago |
|
|
08ecdfe7e5 |
Make FSM test portable.
In
|
7 years ago |
|
|
b0eaa4c51b |
Avoid creation of the free space map for small heap relations, take 2.
Previously, all heaps had FSMs. For very small tables, this means that the FSM took up more space than the heap did. This is wasteful, so now we refrain from creating the FSM for heaps with 4 pages or fewer. If the last known target block has insufficient space, we still try to insert into some other page before giving up and extending the relation, since doing otherwise leads to table bloat. Testing showed that trying every page penalized performance slightly, so we compromise and try every other page. This way, we visit at most two pages. Any pages with wasted free space become visible at next relation extension, so we still control table bloat. As a bonus, directly attempting one or two pages can even be faster than consulting the FSM would have been. Once the FSM is created for a heap we don't remove it even if somebody deletes all the rows from the corresponding relation. We don't think it is a useful optimization as it is quite likely that relation will again grow to the same size. Author: John Naylor, Amit Kapila Reviewed-by: Amit Kapila Tested-by: Mithun C Y Discussion: https://www.postgresql.org/message-id/CAJVSVGWvB13PzpbLEecFuGFc5V2fsO736BsdTakPiPAcdMM5tQ@mail.gmail.com |
7 years ago |
|
|
a23676503b |
Revert "Avoid creation of the free space map for small heap relations."
This reverts commit
|
7 years ago |
|
|
ac88d2962a |
Avoid creation of the free space map for small heap relations.
Previously, all heaps had FSMs. For very small tables, this means that the FSM took up more space than the heap did. This is wasteful, so now we refrain from creating the FSM for heaps with 4 pages or fewer. If the last known target block has insufficient space, we still try to insert into some other page before giving up and extending the relation, since doing otherwise leads to table bloat. Testing showed that trying every page penalized performance slightly, so we compromise and try every other page. This way, we visit at most two pages. Any pages with wasted free space become visible at next relation extension, so we still control table bloat. As a bonus, directly attempting one or two pages can even be faster than consulting the FSM would have been. Once the FSM is created for a heap we don't remove it even if somebody deletes all the rows from the corresponding relation. We don't think it is a useful optimization as it is quite likely that relation will again grow to the same size. Author: John Naylor with design inputs and some code contribution by Amit Kapila Reviewed-by: Amit Kapila Tested-by: Mithun C Y Discussion: https://www.postgresql.org/message-id/CAJVSVGWvB13PzpbLEecFuGFc5V2fsO736BsdTakPiPAcdMM5tQ@mail.gmail.com |
7 years ago |
|
|
bef5fcc36b |
pgstatindex, pageinspect: handle partitioned indexes
Commit
|
8 years ago |
|
|
857f9c36cd |
Skip full index scan during cleanup of B-tree indexes when possible
Vacuum of index consists from two stages: multiple (zero of more) ambulkdelete calls and one amvacuumcleanup call. When workload on particular table is append-only, then autovacuum isn't intended to touch this table. However, user may run vacuum manually in order to fill visibility map and get benefits of index-only scans. Then ambulkdelete wouldn't be called for indexes of such table (because no heap tuples were deleted), only amvacuumcleanup would be called In this case, amvacuumcleanup would perform full index scan for two objectives: put recyclable pages into free space map and update index statistics. This patch allows btvacuumclanup to skip full index scan when two conditions are satisfied: no pages are going to be put into free space map and index statistics isn't stalled. In order to check first condition, we store oldest btpo_xact in the meta-page. When it's precedes RecentGlobalXmin, then there are some recyclable pages. In order to check second condition we store number of heap tuples observed during previous full index scan by cleanup. If fraction of newly inserted tuples is less than vacuum_cleanup_index_scale_factor, then statistics isn't considered to be stalled. vacuum_cleanup_index_scale_factor can be defined as both reloption and GUC (default). This patch bumps B-tree meta-page version. Upgrade of meta-page is performed "on the fly": during VACUUM meta-page is rewritten with new version. No special handling in pg_upgrade is required. Author: Masahiko Sawada, Alexander Korotkov Review by: Peter Geoghegan, Kyotaro Horiguchi, Alexander Korotkov, Yura Sokolov Discussion: https://www.postgresql.org/message-id/flat/CAD21AoAX+d2oD_nrd9O2YkpzHaFr=uQeGr9s1rKC3O4ENc568g@mail.gmail.com |
8 years ago |
|
|
18869e202b |
Fix new test case to not be endian-dependent.
Per buildfarm. Discussion: https://postgr.es/m/ec295792-a69f-350f-6287-25a20e8f31d5@gmail.com |
8 years ago |
|
|
39cfe86195 |
Fix incorrect computations of length of null bitmap in pageinspect.
Instead of using our standard macro for this calculation, this code did it itself ... and got it wrong, leading to incorrect display of the null bitmap in some cases. Noted and fixed by Maksim Milyutin. In passing, remove a uselessly duplicative error check. Errors were introduced in commit d6061f83a; back-patch to 9.6 where that came in. Maksim Milyutin, reviewed by Andrey Borodin Discussion: https://postgr.es/m/ec295792-a69f-350f-6287-25a20e8f31d5@gmail.com |
8 years ago |
|
|
620b49a16d |
hash: Increase the number of possible overflow bitmaps by 8x.
Per a report from AP, it's not that hard to exhaust the supply of bitmap pages if you create a table with a hash index and then insert a few billion rows - and then you start getting errors when you try to insert additional rows. In the particular case reported by AP, there's another fix that we can make to improve recycling of overflow pages, which is another way to avoid the error, but there may be other cases where this problem happens and that fix won't help. So let's buy ourselves as much headroom as we can without rearchitecting anything. The comments claim that the old limit was 64GB, but it was really only 32GB, because we didn't use all the bits in the page for bitmap bits - only the largest power of 2 that could fit after deducting space for the page header and so forth. Thus, we have 4kB per page for bitmap bits, not 8kB. The new limit is thus actually 8 times the old *real* limit but only 4 times the old *purported* limit. Since this breaks on-disk compatibility, bump HASH_VERSION. We've already done this earlier in this release cycle, so this doesn't cause any incremental inconvenience for people using pg_upgrade from releases prior to v10. However, users who use pg_upgrade to reach 10beta3 or later from 10beta2 or earlier will need to REINDEX any hash indexes again. Amit Kapila and Robert Haas Discussion: http://postgr.es/m/20170704105728.mwb72jebfmok2nm2@zip.com.au |
9 years ago |
|
|
193f5f9e91 |
pageinspect: Add bt_page_items function with bytea argument
Author: Tomas Vondra <tomas.vondra@2ndquadrant.com> Reviewed-by: Ashutosh Sharma <ashu.coek88@gmail.com> |
9 years ago |
|
|
ea69a0dead |
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
|
9 years ago |
|
|
fef2bcdcba |
pageinspect: Add page_checksum function
Author: Tomas Vondra <tomas.vondra@2ndquadrant.com> Reviewed-by: Ashutosh Sharma <ashu.coek88@gmail.com> |
9 years ago |
|
|
a02731cb10 |
pageinspect: Add test for page_header function
|
9 years ago |
|
|
c11453ce0a |
hash: Add write-ahead logging support.
The warning about hash indexes not being write-ahead logged and their use being discouraged has been removed. "snapshot too old" is now supported for tables with hash indexes. Most importantly, barring bugs, hash indexes will now be crash-safe and usable on standbys. This commit doesn't yet add WAL consistency checking for hash indexes, as we now have for other index types; a separate patch has been submitted to cure that lack. Amit Kapila, reviewed and slightly modified by me. The larger patch series of which this is a part has been reviewed and tested by Álvaro Herrera, Ashutosh Sharma, Mark Kirkwood, Jeff Janes, and Jesper Pedersen. Discussion: http://postgr.es/m/CAA4eK1JOBX=YU33631Qh-XivYXtPSALh514+jR8XeD7v+K3r_Q@mail.gmail.com |
9 years ago |