mirror of https://github.com/postgres/postgres
Tag:
Branch:
Tree:
0c67dbcc4e
REL2_0B
REL6_4
REL6_5_PATCHES
REL7_0_PATCHES
REL7_1_STABLE
REL7_2_STABLE
REL7_3_STABLE
REL7_4_STABLE
REL8_0_STABLE
REL8_1_STABLE
REL8_2_STABLE
REL8_3_STABLE
REL8_4_STABLE
REL8_5_ALPHA1_BRANCH
REL8_5_ALPHA2_BRANCH
REL8_5_ALPHA3_BRANCH
REL9_0_ALPHA4_BRANCH
REL9_0_ALPHA5_BRANCH
REL9_0_STABLE
REL9_1_STABLE
REL9_2_STABLE
REL9_3_STABLE
REL9_4_STABLE
REL9_5_STABLE
REL9_6_STABLE
REL_10_STABLE
REL_11_STABLE
REL_12_STABLE
REL_13_STABLE
REL_14_STABLE
REL_15_STABLE
REL_16_STABLE
REL_17_STABLE
REL_18_STABLE
Release_1_0_3
WIN32_DEV
ecpg_big_bison
master
PG95-1_01
PG95-1_08
PG95-1_09
REL2_0
REL6_1
REL6_1_1
REL6_2
REL6_2_1
REL6_3
REL6_3_2
REL6_4_2
REL6_5
REL6_5_1
REL6_5_2
REL6_5_3
REL7_0
REL7_0_2
REL7_0_3
REL7_1
REL7_1_1
REL7_1_2
REL7_1_3
REL7_1_BETA
REL7_1_BETA2
REL7_1_BETA3
REL7_2
REL7_2_1
REL7_2_2
REL7_2_3
REL7_2_4
REL7_2_5
REL7_2_6
REL7_2_7
REL7_2_8
REL7_2_BETA1
REL7_2_BETA2
REL7_2_BETA3
REL7_2_BETA4
REL7_2_BETA5
REL7_2_RC1
REL7_2_RC2
REL7_3
REL7_3_1
REL7_3_10
REL7_3_11
REL7_3_12
REL7_3_13
REL7_3_14
REL7_3_15
REL7_3_16
REL7_3_17
REL7_3_18
REL7_3_19
REL7_3_2
REL7_3_20
REL7_3_21
REL7_3_3
REL7_3_4
REL7_3_5
REL7_3_6
REL7_3_7
REL7_3_8
REL7_3_9
REL7_4
REL7_4_1
REL7_4_10
REL7_4_11
REL7_4_12
REL7_4_13
REL7_4_14
REL7_4_15
REL7_4_16
REL7_4_17
REL7_4_18
REL7_4_19
REL7_4_2
REL7_4_20
REL7_4_21
REL7_4_22
REL7_4_23
REL7_4_24
REL7_4_25
REL7_4_26
REL7_4_27
REL7_4_28
REL7_4_29
REL7_4_3
REL7_4_30
REL7_4_4
REL7_4_5
REL7_4_6
REL7_4_7
REL7_4_8
REL7_4_9
REL7_4_BETA1
REL7_4_BETA2
REL7_4_BETA3
REL7_4_BETA4
REL7_4_BETA5
REL7_4_RC1
REL7_4_RC2
REL8_0_0
REL8_0_0BETA1
REL8_0_0BETA2
REL8_0_0BETA3
REL8_0_0BETA4
REL8_0_0BETA5
REL8_0_0RC1
REL8_0_0RC2
REL8_0_0RC3
REL8_0_0RC4
REL8_0_0RC5
REL8_0_1
REL8_0_10
REL8_0_11
REL8_0_12
REL8_0_13
REL8_0_14
REL8_0_15
REL8_0_16
REL8_0_17
REL8_0_18
REL8_0_19
REL8_0_2
REL8_0_20
REL8_0_21
REL8_0_22
REL8_0_23
REL8_0_24
REL8_0_25
REL8_0_26
REL8_0_3
REL8_0_4
REL8_0_5
REL8_0_6
REL8_0_7
REL8_0_8
REL8_0_9
REL8_1_0
REL8_1_0BETA1
REL8_1_0BETA2
REL8_1_0BETA3
REL8_1_0BETA4
REL8_1_0RC1
REL8_1_1
REL8_1_10
REL8_1_11
REL8_1_12
REL8_1_13
REL8_1_14
REL8_1_15
REL8_1_16
REL8_1_17
REL8_1_18
REL8_1_19
REL8_1_2
REL8_1_20
REL8_1_21
REL8_1_22
REL8_1_23
REL8_1_3
REL8_1_4
REL8_1_5
REL8_1_6
REL8_1_7
REL8_1_8
REL8_1_9
REL8_2_0
REL8_2_1
REL8_2_10
REL8_2_11
REL8_2_12
REL8_2_13
REL8_2_14
REL8_2_15
REL8_2_16
REL8_2_17
REL8_2_18
REL8_2_19
REL8_2_2
REL8_2_20
REL8_2_21
REL8_2_22
REL8_2_23
REL8_2_3
REL8_2_4
REL8_2_5
REL8_2_6
REL8_2_7
REL8_2_8
REL8_2_9
REL8_2_BETA1
REL8_2_BETA2
REL8_2_BETA3
REL8_2_RC1
REL8_3_0
REL8_3_1
REL8_3_10
REL8_3_11
REL8_3_12
REL8_3_13
REL8_3_14
REL8_3_15
REL8_3_16
REL8_3_17
REL8_3_18
REL8_3_19
REL8_3_2
REL8_3_20
REL8_3_21
REL8_3_22
REL8_3_23
REL8_3_3
REL8_3_4
REL8_3_5
REL8_3_6
REL8_3_7
REL8_3_8
REL8_3_9
REL8_3_BETA1
REL8_3_BETA2
REL8_3_BETA3
REL8_3_BETA4
REL8_3_RC1
REL8_3_RC2
REL8_4_0
REL8_4_1
REL8_4_10
REL8_4_11
REL8_4_12
REL8_4_13
REL8_4_14
REL8_4_15
REL8_4_16
REL8_4_17
REL8_4_18
REL8_4_19
REL8_4_2
REL8_4_20
REL8_4_21
REL8_4_22
REL8_4_3
REL8_4_4
REL8_4_5
REL8_4_6
REL8_4_7
REL8_4_8
REL8_4_9
REL8_4_BETA1
REL8_4_BETA2
REL8_4_RC1
REL8_4_RC2
REL8_5_ALPHA1
REL8_5_ALPHA2
REL8_5_ALPHA3
REL9_0_0
REL9_0_1
REL9_0_10
REL9_0_11
REL9_0_12
REL9_0_13
REL9_0_14
REL9_0_15
REL9_0_16
REL9_0_17
REL9_0_18
REL9_0_19
REL9_0_2
REL9_0_20
REL9_0_21
REL9_0_22
REL9_0_23
REL9_0_3
REL9_0_4
REL9_0_5
REL9_0_6
REL9_0_7
REL9_0_8
REL9_0_9
REL9_0_ALPHA4
REL9_0_ALPHA5
REL9_0_BETA1
REL9_0_BETA2
REL9_0_BETA3
REL9_0_BETA4
REL9_0_RC1
REL9_1_0
REL9_1_1
REL9_1_10
REL9_1_11
REL9_1_12
REL9_1_13
REL9_1_14
REL9_1_15
REL9_1_16
REL9_1_17
REL9_1_18
REL9_1_19
REL9_1_2
REL9_1_20
REL9_1_21
REL9_1_22
REL9_1_23
REL9_1_24
REL9_1_3
REL9_1_4
REL9_1_5
REL9_1_6
REL9_1_7
REL9_1_8
REL9_1_9
REL9_1_ALPHA1
REL9_1_ALPHA2
REL9_1_ALPHA3
REL9_1_ALPHA4
REL9_1_ALPHA5
REL9_1_BETA1
REL9_1_BETA2
REL9_1_BETA3
REL9_1_RC1
REL9_2_0
REL9_2_1
REL9_2_10
REL9_2_11
REL9_2_12
REL9_2_13
REL9_2_14
REL9_2_15
REL9_2_16
REL9_2_17
REL9_2_18
REL9_2_19
REL9_2_2
REL9_2_20
REL9_2_21
REL9_2_22
REL9_2_23
REL9_2_24
REL9_2_3
REL9_2_4
REL9_2_5
REL9_2_6
REL9_2_7
REL9_2_8
REL9_2_9
REL9_2_BETA1
REL9_2_BETA2
REL9_2_BETA3
REL9_2_BETA4
REL9_2_RC1
REL9_3_0
REL9_3_1
REL9_3_10
REL9_3_11
REL9_3_12
REL9_3_13
REL9_3_14
REL9_3_15
REL9_3_16
REL9_3_17
REL9_3_18
REL9_3_19
REL9_3_2
REL9_3_20
REL9_3_21
REL9_3_22
REL9_3_23
REL9_3_24
REL9_3_25
REL9_3_3
REL9_3_4
REL9_3_5
REL9_3_6
REL9_3_7
REL9_3_8
REL9_3_9
REL9_3_BETA1
REL9_3_BETA2
REL9_3_RC1
REL9_4_0
REL9_4_1
REL9_4_10
REL9_4_11
REL9_4_12
REL9_4_13
REL9_4_14
REL9_4_15
REL9_4_16
REL9_4_17
REL9_4_18
REL9_4_19
REL9_4_2
REL9_4_20
REL9_4_21
REL9_4_22
REL9_4_23
REL9_4_24
REL9_4_25
REL9_4_26
REL9_4_3
REL9_4_4
REL9_4_5
REL9_4_6
REL9_4_7
REL9_4_8
REL9_4_9
REL9_4_BETA1
REL9_4_BETA2
REL9_4_BETA3
REL9_4_RC1
REL9_5_0
REL9_5_1
REL9_5_10
REL9_5_11
REL9_5_12
REL9_5_13
REL9_5_14
REL9_5_15
REL9_5_16
REL9_5_17
REL9_5_18
REL9_5_19
REL9_5_2
REL9_5_20
REL9_5_21
REL9_5_22
REL9_5_23
REL9_5_24
REL9_5_25
REL9_5_3
REL9_5_4
REL9_5_5
REL9_5_6
REL9_5_7
REL9_5_8
REL9_5_9
REL9_5_ALPHA1
REL9_5_ALPHA2
REL9_5_BETA1
REL9_5_BETA2
REL9_5_RC1
REL9_6_0
REL9_6_1
REL9_6_10
REL9_6_11
REL9_6_12
REL9_6_13
REL9_6_14
REL9_6_15
REL9_6_16
REL9_6_17
REL9_6_18
REL9_6_19
REL9_6_2
REL9_6_20
REL9_6_21
REL9_6_22
REL9_6_23
REL9_6_24
REL9_6_3
REL9_6_4
REL9_6_5
REL9_6_6
REL9_6_7
REL9_6_8
REL9_6_9
REL9_6_BETA1
REL9_6_BETA2
REL9_6_BETA3
REL9_6_BETA4
REL9_6_RC1
REL_10_0
REL_10_1
REL_10_10
REL_10_11
REL_10_12
REL_10_13
REL_10_14
REL_10_15
REL_10_16
REL_10_17
REL_10_18
REL_10_19
REL_10_2
REL_10_20
REL_10_21
REL_10_22
REL_10_23
REL_10_3
REL_10_4
REL_10_5
REL_10_6
REL_10_7
REL_10_8
REL_10_9
REL_10_BETA1
REL_10_BETA2
REL_10_BETA3
REL_10_BETA4
REL_10_RC1
REL_11_0
REL_11_1
REL_11_10
REL_11_11
REL_11_12
REL_11_13
REL_11_14
REL_11_15
REL_11_16
REL_11_17
REL_11_18
REL_11_19
REL_11_2
REL_11_20
REL_11_21
REL_11_22
REL_11_3
REL_11_4
REL_11_5
REL_11_6
REL_11_7
REL_11_8
REL_11_9
REL_11_BETA1
REL_11_BETA2
REL_11_BETA3
REL_11_BETA4
REL_11_RC1
REL_12_0
REL_12_1
REL_12_10
REL_12_11
REL_12_12
REL_12_13
REL_12_14
REL_12_15
REL_12_16
REL_12_17
REL_12_18
REL_12_19
REL_12_2
REL_12_20
REL_12_21
REL_12_22
REL_12_3
REL_12_4
REL_12_5
REL_12_6
REL_12_7
REL_12_8
REL_12_9
REL_12_BETA1
REL_12_BETA2
REL_12_BETA3
REL_12_BETA4
REL_12_RC1
REL_13_0
REL_13_1
REL_13_10
REL_13_11
REL_13_12
REL_13_13
REL_13_14
REL_13_15
REL_13_16
REL_13_17
REL_13_18
REL_13_19
REL_13_2
REL_13_20
REL_13_21
REL_13_22
REL_13_23
REL_13_3
REL_13_4
REL_13_5
REL_13_6
REL_13_7
REL_13_8
REL_13_9
REL_13_BETA1
REL_13_BETA2
REL_13_BETA3
REL_13_RC1
REL_14_0
REL_14_1
REL_14_10
REL_14_11
REL_14_12
REL_14_13
REL_14_14
REL_14_15
REL_14_16
REL_14_17
REL_14_18
REL_14_19
REL_14_2
REL_14_20
REL_14_3
REL_14_4
REL_14_5
REL_14_6
REL_14_7
REL_14_8
REL_14_9
REL_14_BETA1
REL_14_BETA2
REL_14_BETA3
REL_14_RC1
REL_15_0
REL_15_1
REL_15_10
REL_15_11
REL_15_12
REL_15_13
REL_15_14
REL_15_15
REL_15_2
REL_15_3
REL_15_4
REL_15_5
REL_15_6
REL_15_7
REL_15_8
REL_15_9
REL_15_BETA1
REL_15_BETA2
REL_15_BETA3
REL_15_BETA4
REL_15_RC1
REL_15_RC2
REL_16_0
REL_16_1
REL_16_10
REL_16_11
REL_16_2
REL_16_3
REL_16_4
REL_16_5
REL_16_6
REL_16_7
REL_16_8
REL_16_9
REL_16_BETA1
REL_16_BETA2
REL_16_BETA3
REL_16_RC1
REL_17_0
REL_17_1
REL_17_2
REL_17_3
REL_17_4
REL_17_5
REL_17_6
REL_17_7
REL_17_BETA1
REL_17_BETA2
REL_17_BETA3
REL_17_RC1
REL_18_0
REL_18_1
REL_18_BETA1
REL_18_BETA2
REL_18_BETA3
REL_18_RC1
Release_1_0_2
Release_2_0
Release_2_0_0
release-6-3
${ noResults }
53803 Commits (0c67dbcc4e39e160bc5c9d7db3b3f011292d5a85)
| Author | SHA1 | Message | Date |
|---|---|---|---|
|
|
7fb8c92404 |
Fix resource leaks in PL/Python error reporting, redux.
Commit |
2 months ago |
|
|
10799d025f |
Add comments explaining overflow entries in the replication lag tracker.
Commit |
2 months ago |
|
|
c4a6444a65 |
Add copyright notice to vacuum_horizon_floor.pl test.
Fix oversight in commit
|
2 months ago |
|
|
e4316ec4dd |
Fix incorrect zero extension of Datum in JIT tuple deform code
When JIT deformed tuples (controlled via the jit_tuple_deforming GUC), types narrower than sizeof(Datum) would be zero-extended up to Datum width. This wasn't the same as what fetch_att() does in the standard tuple deforming code. Logically the values are the same when fetching via the DatumGet*() marcos, but negative numbers are not the same in binary form. In the report, the problem was manifesting itself with: ERROR: could not find memoization table entry in a query which had a "Cache Mode: binary" Memoize node. However, it's currently unclear what else is affected. Anything that uses datum_image_eq() or datum_image_hash() on a Datum from a tuple deformed by JIT could be affected, but it may not be limited to that. The fix for this is simple: use signed extension instead of zero extension. Many thanks to Emmanuel Touzery for reporting this issue and providing steps and backup which allowed the problem to easily be recreated. Reported-by: Emmanuel Touzery <emmanuel.touzery@plandela.si> Author: David Rowley <dgrowleyml@gmail.com> Discussion: https://postgr.es/m/DB8P194MB08532256D5BAF894F241C06393F3A@DB8P194MB0853.EURP194.PROD.OUTLOOK.COM Backpatch-through: 13 |
2 months ago |
|
|
f106f53df6 |
Make invalid primary_slot_name follow standard GUC error reporting.
Previously, if primary_slot_name was set to an invalid slot name and the configuration file was reloaded, both the postmaster and all other backend processes reported a WARNING. With many processes running, this could produce a flood of duplicate messages. The problem was that the GUC check hook for primary_slot_name reported errors at WARNING level via ereport(). This commit changes the check hook to use GUC_check_errdetail() and GUC_check_errhint() for error reporting. As with other GUC parameters, this causes non-postmaster processes to log the message at DEBUG3, so by default, only the postmaster's message appears in the log file. Backpatch to all supported versions. Author: Fujii Masao <masao.fujii@gmail.com> Reviewed-by: Chao Li <lic@highgo.com> Reviewed-by: Amit Kapila <amit.kapila16@gmail.com> Reviewed-by: Álvaro Herrera <alvherre@kurilemu.de> Reviewed-by: Hayato Kuroda <kuroda.hayato@fujitsu.com> Discussion: https://postgr.es/m/CAHGQGwFud-cvthCTfusBfKHBS6Jj6kdAPTdLWKvP2qjUX6L_wA@mail.gmail.com Backpatch-through: 13 |
2 months ago |
|
|
a7ab6ce6a0 |
Fix stalled lag columns in pg_stat_replication when replay LSN stops advancing.
Previously, when the replay LSN reported in feedback messages from a standby stopped advancing, for example, due to a recovery conflict, the write_lag and flush_lag columns in pg_stat_replication would initially update but then stop progressing. This prevented users from correctly monitoring replication lag. The problem occurred because when any LSN stopped updating, the lag tracker's cyclic buffer became full (the write head reached the slowest read head). In that state, the lag tracker could no longer compute round-trip lag values correctly. This commit fixes the issue by handling the slowest read entry (the one causing the buffer to fill) as a separate overflow entry and freeing space so the write and other read heads can continue advancing in the buffer. As a result, write_lag and flush_lag now continue updating even if the reported replay LSN remains stalled. Backpatch to all supported versions. Author: Fujii Masao <masao.fujii@gmail.com> Reviewed-by: Chao Li <lic@highgo.com> Reviewed-by: Shinya Kato <shinya11.kato@gmail.com> Reviewed-by: Xuneng Zhou <xunengzhou@gmail.com> Discussion: https://postgr.es/m/CAHGQGwGdGQ=1-X-71Caee-LREBUXSzyohkoQJd4yZZCMt24C0g@mail.gmail.com Backpatch-through: 13 |
2 months ago |
|
|
58ba7e5ebf |
Add .abi-compliance-history to back-branches.
This file was previously added to v18 by commits |
2 months ago |
|
|
0a358eb45e |
Add previous commit to .git-blame-ignore-revs.
Backpatch-through: 13 |
2 months ago |
|
|
e95010a9c0 |
Re-pgindent brin.c.
Backpatch-through: 13 |
2 months ago |
|
|
eea24eb0ac |
Fix BRIN 32-bit counter wrap issue with huge tables
A BlockNumber (32-bit) might not be large enough to add bo_pagesPerRange to when the table contains close to 2^32 pages. At worst, this could result in a cancellable infinite loop during the BRIN index scan with power-of-2 pagesPerRange, and slow (inefficient) BRIN index scans and scanning of unneeded heap blocks for non power-of-2 pagesPerRange. Backpatch to all supported versions. Author: sunil s <sunilfeb26@gmail.com> Reviewed-by: David Rowley <dgrowleyml@gmail.com> Reviewed-by: Michael Paquier <michael@paquier.xyz> Discussion: https://postgr.es/m/CAOG6S4-tGksTQhVzJM19NzLYAHusXsK2HmADPZzGQcfZABsvpA@mail.gmail.com Backpatch-through: 13 |
2 months ago |
|
|
5274f11c17 |
Fix POSIX compliance in pgwin32_unsetenv() for "name" argument
pgwin32_unsetenv() (compatibility routine of unsetenv() on Windows)
lacks the input validation that its sibling pgwin32_setenv() has.
Without these checks, calling unsetenv() with incorrect names crashes on
WIN32. However, invalid names should be handled, failing on EINVAL.
This commit adds the same checks as setenv() to fail with EINVAL for a
"name" set to NULL, an empty string, or if '=' is included in the value,
per POSIX requirements.
Like
|
2 months ago |
|
|
40dc710a61 |
Don't rely on zlib's gzgetc() macro.
It emerges that zlib's configuration logic is not robust enough
to guarantee that the macro will have the same ideas about struct
field layout as the library itself does, leading to corruption of
zlib's state struct followed by unintelligible failure messages.
This hazard has existed for a long time, but we'd not noticed
for several reasons:
(1) We only use gzgetc() when trying to read a manually-compressed
TOC file within a directory-format dump, which is a rarely-used
scenario that we weren't even testing before
|
2 months ago |
|
|
6062c3db30
|
Fix pg_dump sorting of foreign key constraints
Apparently, commit
|
2 months ago |
|
|
f146eb45cb |
Fix privilege checks for pg_prewarm() on indexes.
pg_prewarm() currently checks for SELECT privileges on the target relation. However, indexes do not have access rights of their own, so a role may be denied permission to prewarm an index despite having the SELECT privilege on its parent table. This commit fixes this by locking the parent table before the index (to avoid deadlocks) and checking for SELECT on the parent table. Note that the code is largely borrowed from amcheck_lock_relation_and_check(). An obvious downside of this change is the extra AccessShareLock on the parent table during prewarming, but that isn't expected to cause too much trouble in practice. Author: Ayush Vatsa <ayushvatsa1810@gmail.com> Co-authored-by: Nathan Bossart <nathandbossart@gmail.com> Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> Reviewed-by: Jeff Davis <pgsql@j-davis.com> Discussion: https://postgr.es/m/CACX%2BKaMz2ZoOojh0nQ6QNBYx8Ak1Dkoko%3DD4FSb80BYW%2Bo8CHQ%40mail.gmail.com Backpatch-through: 13 |
2 months ago |
|
|
56cd332f80
|
Fix update-po for the PGXS case
The original formulation failed to take into account the fact that for the PGXS case, the source dir is not $(top_srcdir), so it ended up not doing anything. Handle it explicitly. Author: Ryo Matsumura <matsumura.ryo@fujitsu.com> Reviewed-by: Bryan Green <dbryan.green@gmail.com> Backpatch-through: 13 Discussion: https://postgr.es/m/TYCPR01MB113164770FB0B0BE6ED21E68EE8DCA@TYCPR01MB11316.jpnprd01.prod.outlook.com |
2 months ago |
|
|
24e74b9621 |
Fix EvalPlanQual handling of foreign/custom joins in ExecScanFetch.
If inside an EPQ recheck, ExecScanFetch would run the recheck method
function for foreign/custom joins even if they aren't descendant nodes
in the EPQ recheck plan tree, which is problematic at least in the
foreign-join case, because such a foreign join isn't guaranteed to have
an alternative local-join plan required for running the recheck method
function; in the postgres_fdw case this could lead to a segmentation
fault or an assert failure in an assert-enabled build when running the
recheck method function.
Even if inside an EPQ recheck, any scan nodes that aren't descendant
ones in the EPQ recheck plan tree should be normally processed by using
the access method function; fix by modifying ExecScanFetch so that if
inside an EPQ recheck, it runs the recheck method function for
foreign/custom joins that are descendant nodes in the EPQ recheck plan
tree as before and runs the access method function for foreign/custom
joins that aren't.
This fix also adds to postgres_fdw an isolation test for an EPQ recheck
that caused issues stated above.
Oversight in commit
|
2 months ago |
|
|
e91eb4ad66 |
Fix incorrect message-printing in win32security.c.
log_error() would probably fail completely if used, and would certainly print garbage for anything that needed to be interpolated into the message, because it was failing to use the correct printing subroutine for a va_list argument. This bug likely went undetected because the error cases this code is used for are rarely exercised - they only occur when Windows security API calls fail catastrophically (out of memory, security subsystem corruption, etc). The FRONTEND variant can be fixed just by calling vfprintf() instead of fprintf(). However, there was no va_list variant of write_stderr(), so create one by refactoring that function. Following the usual naming convention for such things, call it vwrite_stderr(). Author: Bryan Green <dbryan.green@gmail.com> Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/CAF+pBj8goe4fRmZ0V3Cs6eyWzYLvK+HvFLYEYWG=TzaM+tWPnw@mail.gmail.com Backpatch-through: 13 |
2 months ago |
|
|
715eb62e30 |
Doc: clarify n_distinct_inherited setting
There was some confusion around how to adjust the n_distinct estimates for partitioned tables. Here we try and clarify that n_distinct_inherited needs to be adjusted rather than n_distinct. Also fix some slightly misleading text which was talking about table size rather than table rows, fix a grammatical error, and adjust some text which indicated that ANALYZE was performing calculations based on the n_distinct settings. Really it's the query planner that does this and ANALYZE only stores the overridden n_distinct estimate value in pg_statistic. Author: David Rowley <dgrowleyml@gmail.com> Reviewed-by: David G. Johnston <david.g.johnston@gmail.com> Reviewed-by: Chao Li <li.evan.chao@gmail.com> Backpatch-through: 13 Discussion: https://postgr.es/m/CAApHDvrL7a-ZytM1SP8Uk9nEw9bR2CPzVb+uP+bcNj=_q-ZmVw@mail.gmail.com |
2 months ago |
|
|
b753be38a4
|
Stop creating constraints during DETACH CONCURRENTLY
Commit
|
2 months ago |
|
|
314004021e |
Remove overzealous _bt_killitems assertion.
An assertion in _bt_killitems expected the scan's currPos state to contain a valid LSN, saved from when currPos's page was initially read. The assertion failed to account for the fact that even logged relations can have leaf pages with an invalid LSN when built with wal_level set to "minimal". Remove the faulty assertion. Oversight in commit |
2 months ago |
|
|
f57f2760e6 |
Remove state.tmp when failing to save a replication slot
An error happening while a slot data is saved on disk in SaveSlotToPath() could cause a state.tmp file (temporary file holding the slot state data, renamed to its permanent name at the end of the function) to remain around after it has been created. This temporary file is created with O_EXCL, meaning that if an existing state.tmp is found, its creation would fail. This would prevent the slot data to be saved, requiring a manual intervention to remove state.tmp before being able to save again a slot. Possible scenarios where this temporary file could remain on disk is for example a ENOSPC case (no disk space) while writing, syncing or renaming it. The bug reports point to a write failure as the principal cause of the problems. Using O_TRUNC has been argued back in 2019 as a potential solution to discard any temporary file that could exist. This solution was rejected as O_EXCL can also act as a safety measure when saving the slot state, crash recovery offering cleanup guarantees post-crash. This commit uses the alternative approach that has been suggested by Andres Freund back in 2019. When the temporary state file cannot be written, synced, closed or renamed (note: not when created!), an unlink() is used to remove the temporary state file while holding the in-progress I/O LWLock, so as any follow-up attempts to save a slot's data would not choke on an existing file that remained around because of a previous failure. This problem has been reported a few times across the years, going back to 2019, but for some reason I have never come back to do something about it and it has been forgotten. A recent report has reminded me that this was still a problem. Reported-by: Kevin K Biju <kevinkbiju@gmail.com> Reported-by: Sergei Kornilov <sk@zsrv.org> Reported-by: Grigory Smolkin <g.smolkin@postgrespro.ru> Discussion: https://postgr.es/m/CAM45KeHa32soKL_G8Vk38CWvTBeOOXcsxAPAs7Jt7yPRf2mbVA@mail.gmail.com Discussion: https://postgr.es/m/3559061693910326@qy4q4a6esb2lebnz.sas.yp-c.yandex.net Discussion: https://postgr.es/m/08bbfab1-a61d-3750-fc18-4ab2c1aa7f09@postgrespro.ru Backpatch-through: 13 |
2 months ago |
|
|
f46d77377b |
Use SOCK_ERRNO[_SET] in fe-secure-gssapi.c.
On Windows, this code did not handle error conditions correctly at all, since it looked at "errno" which is not used for socket-related errors on that platform. This resulted, for example, in failure to connect to a PostgreSQL server with GSSAPI enabled. We have a convention for dealing with this within libpq, which is to use SOCK_ERRNO and SOCK_ERRNO_SET rather than touching errno directly; but the GSSAPI code is a relative latecomer and did not get that memo. (The equivalent backend code continues to use errno, because the backend does this differently. Maybe libpq's approach should be rethought someday.) Apparently nobody tries to build libpq with GSSAPI support on Windows, or we'd have heard about this before, because it's been broken all along. Back-patch to all supported branches. Author: Ning Wu <ning94803@gmail.com> Co-authored-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/CAFGqpvg-pRw=cdsUpKYfwY6D3d-m9tw8WMcAEE7HHWfm-oYWvw@mail.gmail.com Backpatch-through: 13 |
2 months ago |
|
|
9b3beb7c6f |
pgbench: Fail cleanly when finding a COPY result state
Currently, pgbench aborts when a COPY response is received in readCommandResponse(). However, as PQgetResult() returns an empty result when there is no asynchronous result, through getCopyResult(), the logic done at the end of readCommandResponse() for the error path leads to an infinite loop. This commit forcefully exits the COPY state with PQendcopy() before moving to the error handler when fiding a COPY state, avoiding the infinite loop. The COPY protocol is not supported by pgbench anyway, as an error is assumed in this case, so giving up is better than having the tool be stuck forever. pgbench was interruptible in this state. A TAP test is added to check that an error happens if trying to use COPY. Author: Anthonin Bonnefoy <anthonin.bonnefoy@datadoghq.com> Discussion: https://postgr.es/m/CAO6_XqpHyF2m73ifV5a=5jhXxH2chk=XrgefY+eWWPe2Eft3=A@mail.gmail.com Backpatch-through: 13 |
3 months ago |
|
|
ee2dbcad02 |
pgstattuple: Improve reports generated for indexes (hash, gist, btree)
pgstattuple checks the state of the pages retrieved for gist and hash using some check functions from each index AM, respectively gistcheckpage() and _hash_checkpage(). When these are called, they would fail when bumping on data that is found as incorrect (like opaque area size not matching, or empty pages), contrary to btree that simply discards these cases and continues to aggregate data. Zero pages can happen after a crash, with these AMs being able to do an internal cleanup when these are seen. Also, sporadic failures are annoying when doing for example a large-scale diagnostic query based on pgstattuple with a join of pg_class, as it forces one to use tricks like quals to discard hash or gist indexes, or use a PL wrapper able to catch errors. This commit changes the reports generated for btree, gist and hash to be more user-friendly; - When seeing an empty page, report it as free space. This new rule applies to gist and hash, and already applied to btree. - For btree, a check based on the size of BTPageOpaqueData is added. - For gist indexes, gistcheckpage() is not called anymore, replaced by a check based on the size of GISTPageOpaqueData. - For hash indexes, instead of _hash_getbuf_with_strategy(), use a direct call to ReadBufferExtended(), coupled with a check based on HashPageOpaqueData. The opaque area size check was already used. - Pages that do not match these criterias are discarded from the stats reports generated. There have been a couple of bug reports over the years that complained about the current behavior for hash and gist, as being not that useful, with nothing being done about it. Hence this change is backpatched down to v13. Reported-by: Noah Misch <noah@leadboat.com> Author: Nitin Motiani <nitinmotiani@google.com> Reviewed-by: Dilip Kumar <dilipbalaut@gmail.com> Discussion: https://postgr.es/m/CAH5HC95gT1J3dRYK4qEnaywG8RqjbwDdt04wuj8p39R=HukayA@mail.gmail.com Backpatch-through: 13 |
3 months ago |
|
|
cb21e1f92f |
pgbench: Fix error reporting in readCommandResponse().
pgbench uses readCommandResponse() to process server responses. When readCommandResponse() encounters an error during a call to PQgetResult() to fetch the current result, it attempts to report it with an additional error message from PQerrorMessage(). However, previously, this extra error message could be lost or become incorrect. The cause was that after fetching the current result (and detecting an error), readCommandResponse() called PQgetResult() again to peek at the next result. This second call could overwrite the libpq connection's error message before the original error was reported, causing the error message retrieved from PQerrorMessage() to be lost or overwritten. This commit fixes the issue by updating readCommandResponse() to use PQresultErrorMessage() instead of PQerrorMessage() to retrieve the error message generated when the PQgetResult() for the current result causes an error, ensuring the correct message is reported. Backpatch to all supported versions. Author: Yugo Nagata <nagata@sraoss.co.jp> Reviewed-by: Chao Li <lic@highgo.com> Reviewed-by: Fujii Masao <masao.fujii@gmail.com> Discussion: https://postgr.es/m/20250925110940.ebacc31725758ec47d5432c6@sraoss.co.jp Backpatch-through: 13 |
3 months ago |
|
|
5816baa9bc |
Fix StatisticsObjIsVisibleExt() for pg_temp.
Neighbor get_statistics_object_oid() ignores objects in pg_temp, as has
been the standard for non-relation, non-type namespace searches since
CVE-2007-2138. Hence, most operations that name a statistics object
correctly decline to map an unqualified name to a statistics object in
pg_temp. StatisticsObjIsVisibleExt() did not. Consequently,
pg_statistics_obj_is_visible() wrongly returned true for such objects,
psql \dX wrongly listed them, and getObjectDescription()-based ereport()
and pg_describe_object() wrongly omitted namespace qualification. Any
malfunction beyond that would depend on how a human or application acts
on those wrong indications. Commit
|
3 months ago |
|
|
0be39b4b1a |
Fix missed copying of groupDistinct in transformPLAssignStmt.
Because we failed to do this, DISTINCT in GROUP BY DISTINCT would be ignored in PL/pgSQL assignment statements. It's not surprising that no one noticed, since such statements will throw an error if the query produces more than one row. That eliminates most scenarios where advanced forms of GROUP BY could be useful, and indeed makes it hard even to find a simple test case. Nonetheless it's wrong. This is directly the fault of |
3 months ago |
|
|
bc476f8b8b |
pg_restore: Fix security label handling with --no-publications/subscriptions.
Previously, pg_restore did not skip security labels on publications or subscriptions even when --no-publications or --no-subscriptions was specified. As a result, it could issue SECURITY LABEL commands for objects that were never created, causing those commands to fail. This commit fixes the issue by ensuring that security labels on publications and subscriptions are also skipped when the corresponding options are used. Backpatch to all supported versions. Author: Jian He <jian.universality@gmail.com> Reviewed-by: Fujii Masao <masao.fujii@gmail.com> Discussion: https://postgr.es/m/CACJufxHCt00pR9h51AVu6+yPD5J7JQn=7dQXxqacj0XyDhc-fA@mail.gmail.com Backpatch-through: 13 |
3 months ago |
|
|
31bf096320 |
Calculate agglevelsup correctly when Aggref contains a CTE.
If an aggregate function call contains a sub-select that has an RTE referencing a CTE outside the aggregate, we must treat that reference like a Var referencing the CTE's query level for purposes of determining the aggregate's level. Otherwise we might reach the nonsensical conclusion that the aggregate should be evaluated at some query level higher than the CTE, ending in a planner error or a broken plan tree that causes executor failures. Bug: #19055 Reported-by: BugForge <dllggyx@outlook.com> Author: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/19055-6970cfa8556a394d@postgresql.org Backpatch-through: 13 |
3 months ago |
|
|
f78a690347 |
Add missing EPQ recheck for TID Range Scan
The EvalPlanQual recheck for TID Range Scan wasn't rechecking the TID qual still passed after following update chains. This could result in tuples being updated or deleted by plans using TID Range Scans where the ctid of the new (updated) tuple no longer matches the clause of the scan. This isn't desired behavior, and isn't consistent with what would happen if the chosen plan had used an Index or Seq Scan, and that could lead to hard to predict behavior for scans that contain TID quals and other quals as the planner has freedom to choose TID Range or some other non-TID scan method for such queries, and the chosen plan could change at any moment. Here we fix this by properly implementing the recheck function for TID Range Scans. Backpatch to 14, where TID Range Scans were added Reported-by: Sophie Alpert <pg@sophiebits.com> Author: Sophie Alpert <pg@sophiebits.com> Author: David Rowley <dgrowleyml@gmail.com> Reviewed-by: David Rowley <dgrowleyml@gmail.com> Reviewed-by: Chao Li <li.evan.chao@gmail.com> Discussion: https://postgr.es/m/4a6268ff-3340-453a-9bf5-c98d51a6f729@app.fastmail.com Backpatch-through: 14 |
3 months ago |
|
|
2eb7ea97d8 |
Add missing EPQ recheck for TID Scan
The EvalPlanQual recheck for TID Scan wasn't rechecking the TID qual still passed after following update chains. This could result in tuples being updated or deleted by plans using TID Scans where the ctid of the new (updated) tuple no longer matches the clause of the scan. This isn't desired behavior, and isn't consistent with what would happen if the chosen plan had used an Index or Seq Scan, and that could lead to hard to predict behavior for scans that contain TID quals and other quals as the planner has freedom to choose TID or some other scan method for such queries, and the chosen plan could change at any moment. Here we fix this by properly implementing the recheck function for TID Scans. Backpatch to 13, oldest supported version Reported-by: Sophie Alpert <pg@sophiebits.com> Author: Sophie Alpert <pg@sophiebits.com> Author: David Rowley <dgrowleyml@gmail.com> Reviewed-by: David Rowley <dgrowleyml@gmail.com> Reviewed-by: Chao Li <li.evan.chao@gmail.com> Discussion: https://postgr.es/m/4a6268ff-3340-453a-9bf5-c98d51a6f729@app.fastmail.com Backpatch-through: 13 |
3 months ago |
|
|
85d6ed31fc |
Fix pg_dump COMMENT dependency for separate domain constraints.
The COMMENT should depend on the separately-dumped constraint, not the
domain. Sufficient restore parallelism might fail the COMMENT command
by issuing it before the constraint exists. Back-patch to v13, like
commit
|
3 months ago |
|
|
295c0a644a |
pg_dump: Fix dumping of security labels on subscriptions and event triggers.
Previously, pg_dump incorrectly queried pg_seclabel to retrieve security labels for subscriptions, which are stored in pg_shseclabel as they are global objects. This could result in security labels for subscriptions not being dumped. This commit fixes the issue by updating pg_dump to query the pg_seclabels view, which aggregates entries from both pg_seclabel and pg_shseclabel. While querying pg_shseclabel directly for subscriptions was an alternative, using pg_seclabels is simpler and sufficient. In addition, pg_dump is updated to dump security labels on event triggers, which were previously omitted. Backpatch to all supported versions. Author: Jian He <jian.universality@gmail.com> Co-authored-by: Fujii Masao <masao.fujii@gmail.com> Discussion: https://postgr.es/m/CACJufxHCt00pR9h51AVu6+yPD5J7JQn=7dQXxqacj0XyDhc-fA@mail.gmail.com Backpatch-through: 13 |
3 months ago |
|
|
db900ec358 |
pg_restore: Fix comment handling with --no-publications / --no-subscriptions.
Previously, pg_restore did not skip comments on publications or subscriptions even when --no-publications or --no-subscriptions was specified. As a result, it could issue COMMENT commands for objects that were never created, causing those commands to fail. This commit fixes the issue by ensuring that comments on publications and subscriptions are also skipped when the corresponding options are used. Backpatch to all supported versions. Author: Jian He <jian.universality@gmail.com> Co-authored-by: Fujii Masao <masao.fujii@gmail.com> Discussion: https://postgr.es/m/CACJufxHCt00pR9h51AVu6+yPD5J7JQn=7dQXxqacj0XyDhc-fA@mail.gmail.com Backpatch-through: 13 |
3 months ago |
|
|
cbd3732cbb |
CREATE STATISTICS: improve misleading error message
The previous change (commit
|
3 months ago |
|
|
fbfc36e947 |
jit: fix build with LLVM-21
LLVM-21 renamed llvm::GlobalValue::getGUID() to getGUIDAssumingExternalLinkage(), so add a version guard. Author: Holger Hoffstätte <holger@applied-asynchrony.com> Discussion: https://www.postgresql.org/message-id/flat/d25e6e4a-d1b4-84d3-2f8a-6c45b975f53d%40applied-asynchrony.com |
3 months ago |
|
|
f75ff1b141 |
Amend recent fix for SIMILAR TO regex conversion.
Commit
|
3 months ago |
|
|
5e131489fe |
Fix description of WAL record blocks in hash_xlog.h
hash_xlog.h included descriptions for the blocks used in WAL records
that were was not completely consistent with how the records are
generated, with one block missing for SQUEEZE_PAGE, and inconsistent
descriptions used for block 0 in VACUUM_ONE_PAGE and MOVE_PAGE_CONTENTS.
This information was incorrect since
|
3 months ago |
|
|
5eab9b0a47 |
Fix memory leakage in nodeSubplan.c.
If the hash functions used for hashing tuples leaked any memory,
we failed to clean that up, resulting in query-lifespan memory
leakage in queries using hashed subplans. One way that could
happen is if the values being hashed require de-toasting, since
most of our hash functions don't trouble to clean up de-toasted
inputs.
Prior to commit
|
3 months ago |
|
|
ea65c88238 |
Fix compiler error introduced by 5386bfb9c1.
Per buildfarm member wrasse, void function cannot return a value. This only affects v13-v17, where an ABI-compatible wrapper function was added. Backpatch-through: 13-17 |
3 months ago |
|
|
5fd569ef2d |
Fix replica identity check for INSERT ON CONFLICT DO UPDATE.
If an INSERT has an ON CONFLICT DO UPDATE clause, the executor must check that the target relation supports UPDATE as well as INSERT. In particular, it must check that the target relation has a REPLICA IDENTITY if it publishes updates. Formerly, it was not doing this check, which could lead to silently breaking replication. Fix by adding such a check to CheckValidResultRel(), which requires adding a new onConflictAction argument. In back-branches, preserve ABI compatibility by introducing a wrapper function with the original signature. Author: Zhijie Hou <houzj.fnst@fujitsu.com> Reviewed-by: Ashutosh Bapat <ashutosh.bapat.oss@gmail.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com> Tested-by: Chao Li <li.evan.chao@gmail.com> Discussion: https://postgr.es/m/OS3PR01MB57180C87E43A679A730482DF94B62@OS3PR01MB5718.jpnprd01.prod.outlook.com Backpatch-through: 13 |
3 months ago |
|
|
160ef51c8d |
Fix planner error when estimating SubPlan cost
SubPlan nodes are typically built very early, before any RelOptInfos have been constructed for the parent query level. As a result, the simple_rel_array in the parent root has not yet been initialized. Currently, during cost estimation of a SubPlan's testexpr, we may call examine_variable() to look up statistical data about the expressions. This can lead to "no relation entry for relid" errors. To fix, pass root as NULL to cost_qual_eval() in cost_subplan(), since the root does not yet contain enough information to safely consult statistics. One exception is SubPlan nodes built for the initplans of MIN/MAX aggregates from indexes. In this case, having a NULL root is safe because testexpr will be NULL. Additionally, an initplan will by definition not consult anything from the parent plan. Backpatch to all supported branches. Although the reported call path that triggers this error is not reachable prior to v17, there's no guarantee that other code paths -- especially in extensions -- could not encounter the same issue when cost_qual_eval() is called with a root that lacks a valid simple_rel_array. The test case is not included in pre-v17 branches though. Bug: #19037 Reported-by: Alexander Lakhin <exclusion@gmail.com> Diagnosed-by: Tom Lane <tgl@sss.pgh.pa.us> Author: Richard Guo <guofenglinux@gmail.com> Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/19037-3d1c7bb553c7ce84@postgresql.org Backpatch-through: 13 |
4 months ago |
|
|
0cc540f0d0 |
libpq: Fix PQtrace() format for non-printable characters
PQtrace() was generating its output for non-printable characters without
casting the characters printed with unsigned char, leading to some extra
"\xffffff" generated in the output due to the fact that char may be
signed.
Oversights introduced by commit
|
4 months ago |
|
|
b830f9d674
|
CREATE STATISTICS: improve misleading error message
I think the error message for a different condition was inadvertently
copied.
This problem seems to have been introduced by commit
|
4 months ago |
|
|
4a593043ee |
Put "excludeOnly" GIN scan keys at the end of the scankey array.
Commit
|
4 months ago |
|
|
a7da746c11 |
Do CHECK_FOR_INTERRUPTS inside, not before, scanGetItem.
The CHECK_FOR_INTERRUPTS call in gingetbitmap turns out to be inadequate to prevent a long uninterruptible loop, because we now know a case where looping occurs within scanGetItem. While the next patch will fix the bug that caused that, it seems foolish to assume that no similar patterns are possible. Let's do the CFI within scanGetItem's retry loop, instead. This demonstrably allows canceling out of the loop exhibited in bug #19031. Bug: #19031 Reported-by: Tim Wood <washwithcare@gmail.com> Author: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/19031-0638148643d25548@postgresql.org Backpatch-through: 13 |
4 months ago |
|
|
22c6a44f01 |
Rewrite previous commit's test for TestUpgradeXversion compatibility.
v17 introduced the MAINTAIN ON TABLES privilege. That changed the applicable "baseacls" reaching buildACLCommands(). That yielded spurious TestUpgradeXversion diffs. Change to use a TYPES privilege. Types have the same one privilege in all supported versions, so they avoid the problem. Per buildfarm. Back-patch to v13, like that commit. Discussion: https://postgr.es/m/20250823144505.88.nmisch@google.com Backpatch-through: 13 |
4 months ago |
|
|
4948bb9df4 |
Sort DO_DEFAULT_ACL dump objects independent of OIDs.
Commit
|
4 months ago |
|
|
222130edd8 |
Ignore temporary relations in RelidByRelfilenumber()
Temporary relations may share the same RelFileNumber with a permanent
relation, or other temporary relations associated with other sessions.
Being able to uniquely identify a temporary relation would require
RelidByRelfilenumber() to know about the proc number of the temporary
relation it wants to identify, something it is not designed for since
its introduction in
|
4 months ago |
|
|
24145bbb9e |
doc: Improve description of wal_compression
The description of this GUC provides a list of the situations where full-page writes are generated. However, it is not completely exact, mentioning only the cases where full_page_writes=on or base backups. It is possible to generate full-page writes in more situations than these two, making the description confusing as it implies that no other cases exist. The description is slightly reworded to take into account that other cases are possible, without mentioning them directly to minimize the maintenance burden should FPWs be generated in more contexts in the future. Author: Jingtang Zhang <mrdrivingduck@gmail.com> Reviewed-by: Andrey Borodin <x4mmm@yandex-team.ru> Reviewed-by: Xuneng Zhou <xunengzhou@gmail.com> Discussion: https://postgr.es/m/CAPsk3_CtAYa_fy4p6=x7qtoutrdKvg1kGk46D5fsE=sMt2546g@mail.gmail.com Backpatch-through: 13 |
4 months ago |