mirror of https://github.com/postgres/postgres
Tag:
Branch:
Tree:
REL_13_STABLE
REL2_0B
REL6_4
REL6_5_PATCHES
REL7_0_PATCHES
REL7_1_STABLE
REL7_2_STABLE
REL7_3_STABLE
REL7_4_STABLE
REL8_0_STABLE
REL8_1_STABLE
REL8_2_STABLE
REL8_3_STABLE
REL8_4_STABLE
REL8_5_ALPHA1_BRANCH
REL8_5_ALPHA2_BRANCH
REL8_5_ALPHA3_BRANCH
REL9_0_ALPHA4_BRANCH
REL9_0_ALPHA5_BRANCH
REL9_0_STABLE
REL9_1_STABLE
REL9_2_STABLE
REL9_3_STABLE
REL9_4_STABLE
REL9_5_STABLE
REL9_6_STABLE
REL_10_STABLE
REL_11_STABLE
REL_12_STABLE
REL_13_STABLE
REL_14_STABLE
REL_15_STABLE
REL_16_STABLE
REL_17_STABLE
REL_18_STABLE
Release_1_0_3
WIN32_DEV
ecpg_big_bison
master
PG95-1_01
PG95-1_08
PG95-1_09
REL2_0
REL6_1
REL6_1_1
REL6_2
REL6_2_1
REL6_3
REL6_3_2
REL6_4_2
REL6_5
REL6_5_1
REL6_5_2
REL6_5_3
REL7_0
REL7_0_2
REL7_0_3
REL7_1
REL7_1_1
REL7_1_2
REL7_1_3
REL7_1_BETA
REL7_1_BETA2
REL7_1_BETA3
REL7_2
REL7_2_1
REL7_2_2
REL7_2_3
REL7_2_4
REL7_2_5
REL7_2_6
REL7_2_7
REL7_2_8
REL7_2_BETA1
REL7_2_BETA2
REL7_2_BETA3
REL7_2_BETA4
REL7_2_BETA5
REL7_2_RC1
REL7_2_RC2
REL7_3
REL7_3_1
REL7_3_10
REL7_3_11
REL7_3_12
REL7_3_13
REL7_3_14
REL7_3_15
REL7_3_16
REL7_3_17
REL7_3_18
REL7_3_19
REL7_3_2
REL7_3_20
REL7_3_21
REL7_3_3
REL7_3_4
REL7_3_5
REL7_3_6
REL7_3_7
REL7_3_8
REL7_3_9
REL7_4
REL7_4_1
REL7_4_10
REL7_4_11
REL7_4_12
REL7_4_13
REL7_4_14
REL7_4_15
REL7_4_16
REL7_4_17
REL7_4_18
REL7_4_19
REL7_4_2
REL7_4_20
REL7_4_21
REL7_4_22
REL7_4_23
REL7_4_24
REL7_4_25
REL7_4_26
REL7_4_27
REL7_4_28
REL7_4_29
REL7_4_3
REL7_4_30
REL7_4_4
REL7_4_5
REL7_4_6
REL7_4_7
REL7_4_8
REL7_4_9
REL7_4_BETA1
REL7_4_BETA2
REL7_4_BETA3
REL7_4_BETA4
REL7_4_BETA5
REL7_4_RC1
REL7_4_RC2
REL8_0_0
REL8_0_0BETA1
REL8_0_0BETA2
REL8_0_0BETA3
REL8_0_0BETA4
REL8_0_0BETA5
REL8_0_0RC1
REL8_0_0RC2
REL8_0_0RC3
REL8_0_0RC4
REL8_0_0RC5
REL8_0_1
REL8_0_10
REL8_0_11
REL8_0_12
REL8_0_13
REL8_0_14
REL8_0_15
REL8_0_16
REL8_0_17
REL8_0_18
REL8_0_19
REL8_0_2
REL8_0_20
REL8_0_21
REL8_0_22
REL8_0_23
REL8_0_24
REL8_0_25
REL8_0_26
REL8_0_3
REL8_0_4
REL8_0_5
REL8_0_6
REL8_0_7
REL8_0_8
REL8_0_9
REL8_1_0
REL8_1_0BETA1
REL8_1_0BETA2
REL8_1_0BETA3
REL8_1_0BETA4
REL8_1_0RC1
REL8_1_1
REL8_1_10
REL8_1_11
REL8_1_12
REL8_1_13
REL8_1_14
REL8_1_15
REL8_1_16
REL8_1_17
REL8_1_18
REL8_1_19
REL8_1_2
REL8_1_20
REL8_1_21
REL8_1_22
REL8_1_23
REL8_1_3
REL8_1_4
REL8_1_5
REL8_1_6
REL8_1_7
REL8_1_8
REL8_1_9
REL8_2_0
REL8_2_1
REL8_2_10
REL8_2_11
REL8_2_12
REL8_2_13
REL8_2_14
REL8_2_15
REL8_2_16
REL8_2_17
REL8_2_18
REL8_2_19
REL8_2_2
REL8_2_20
REL8_2_21
REL8_2_22
REL8_2_23
REL8_2_3
REL8_2_4
REL8_2_5
REL8_2_6
REL8_2_7
REL8_2_8
REL8_2_9
REL8_2_BETA1
REL8_2_BETA2
REL8_2_BETA3
REL8_2_RC1
REL8_3_0
REL8_3_1
REL8_3_10
REL8_3_11
REL8_3_12
REL8_3_13
REL8_3_14
REL8_3_15
REL8_3_16
REL8_3_17
REL8_3_18
REL8_3_19
REL8_3_2
REL8_3_20
REL8_3_21
REL8_3_22
REL8_3_23
REL8_3_3
REL8_3_4
REL8_3_5
REL8_3_6
REL8_3_7
REL8_3_8
REL8_3_9
REL8_3_BETA1
REL8_3_BETA2
REL8_3_BETA3
REL8_3_BETA4
REL8_3_RC1
REL8_3_RC2
REL8_4_0
REL8_4_1
REL8_4_10
REL8_4_11
REL8_4_12
REL8_4_13
REL8_4_14
REL8_4_15
REL8_4_16
REL8_4_17
REL8_4_18
REL8_4_19
REL8_4_2
REL8_4_20
REL8_4_21
REL8_4_22
REL8_4_3
REL8_4_4
REL8_4_5
REL8_4_6
REL8_4_7
REL8_4_8
REL8_4_9
REL8_4_BETA1
REL8_4_BETA2
REL8_4_RC1
REL8_4_RC2
REL8_5_ALPHA1
REL8_5_ALPHA2
REL8_5_ALPHA3
REL9_0_0
REL9_0_1
REL9_0_10
REL9_0_11
REL9_0_12
REL9_0_13
REL9_0_14
REL9_0_15
REL9_0_16
REL9_0_17
REL9_0_18
REL9_0_19
REL9_0_2
REL9_0_20
REL9_0_21
REL9_0_22
REL9_0_23
REL9_0_3
REL9_0_4
REL9_0_5
REL9_0_6
REL9_0_7
REL9_0_8
REL9_0_9
REL9_0_ALPHA4
REL9_0_ALPHA5
REL9_0_BETA1
REL9_0_BETA2
REL9_0_BETA3
REL9_0_BETA4
REL9_0_RC1
REL9_1_0
REL9_1_1
REL9_1_10
REL9_1_11
REL9_1_12
REL9_1_13
REL9_1_14
REL9_1_15
REL9_1_16
REL9_1_17
REL9_1_18
REL9_1_19
REL9_1_2
REL9_1_20
REL9_1_21
REL9_1_22
REL9_1_23
REL9_1_24
REL9_1_3
REL9_1_4
REL9_1_5
REL9_1_6
REL9_1_7
REL9_1_8
REL9_1_9
REL9_1_ALPHA1
REL9_1_ALPHA2
REL9_1_ALPHA3
REL9_1_ALPHA4
REL9_1_ALPHA5
REL9_1_BETA1
REL9_1_BETA2
REL9_1_BETA3
REL9_1_RC1
REL9_2_0
REL9_2_1
REL9_2_10
REL9_2_11
REL9_2_12
REL9_2_13
REL9_2_14
REL9_2_15
REL9_2_16
REL9_2_17
REL9_2_18
REL9_2_19
REL9_2_2
REL9_2_20
REL9_2_21
REL9_2_22
REL9_2_23
REL9_2_24
REL9_2_3
REL9_2_4
REL9_2_5
REL9_2_6
REL9_2_7
REL9_2_8
REL9_2_9
REL9_2_BETA1
REL9_2_BETA2
REL9_2_BETA3
REL9_2_BETA4
REL9_2_RC1
REL9_3_0
REL9_3_1
REL9_3_10
REL9_3_11
REL9_3_12
REL9_3_13
REL9_3_14
REL9_3_15
REL9_3_16
REL9_3_17
REL9_3_18
REL9_3_19
REL9_3_2
REL9_3_20
REL9_3_21
REL9_3_22
REL9_3_23
REL9_3_24
REL9_3_25
REL9_3_3
REL9_3_4
REL9_3_5
REL9_3_6
REL9_3_7
REL9_3_8
REL9_3_9
REL9_3_BETA1
REL9_3_BETA2
REL9_3_RC1
REL9_4_0
REL9_4_1
REL9_4_10
REL9_4_11
REL9_4_12
REL9_4_13
REL9_4_14
REL9_4_15
REL9_4_16
REL9_4_17
REL9_4_18
REL9_4_19
REL9_4_2
REL9_4_20
REL9_4_21
REL9_4_22
REL9_4_23
REL9_4_24
REL9_4_25
REL9_4_26
REL9_4_3
REL9_4_4
REL9_4_5
REL9_4_6
REL9_4_7
REL9_4_8
REL9_4_9
REL9_4_BETA1
REL9_4_BETA2
REL9_4_BETA3
REL9_4_RC1
REL9_5_0
REL9_5_1
REL9_5_10
REL9_5_11
REL9_5_12
REL9_5_13
REL9_5_14
REL9_5_15
REL9_5_16
REL9_5_17
REL9_5_18
REL9_5_19
REL9_5_2
REL9_5_20
REL9_5_21
REL9_5_22
REL9_5_23
REL9_5_24
REL9_5_25
REL9_5_3
REL9_5_4
REL9_5_5
REL9_5_6
REL9_5_7
REL9_5_8
REL9_5_9
REL9_5_ALPHA1
REL9_5_ALPHA2
REL9_5_BETA1
REL9_5_BETA2
REL9_5_RC1
REL9_6_0
REL9_6_1
REL9_6_10
REL9_6_11
REL9_6_12
REL9_6_13
REL9_6_14
REL9_6_15
REL9_6_16
REL9_6_17
REL9_6_18
REL9_6_19
REL9_6_2
REL9_6_20
REL9_6_21
REL9_6_22
REL9_6_23
REL9_6_24
REL9_6_3
REL9_6_4
REL9_6_5
REL9_6_6
REL9_6_7
REL9_6_8
REL9_6_9
REL9_6_BETA1
REL9_6_BETA2
REL9_6_BETA3
REL9_6_BETA4
REL9_6_RC1
REL_10_0
REL_10_1
REL_10_10
REL_10_11
REL_10_12
REL_10_13
REL_10_14
REL_10_15
REL_10_16
REL_10_17
REL_10_18
REL_10_19
REL_10_2
REL_10_20
REL_10_21
REL_10_22
REL_10_23
REL_10_3
REL_10_4
REL_10_5
REL_10_6
REL_10_7
REL_10_8
REL_10_9
REL_10_BETA1
REL_10_BETA2
REL_10_BETA3
REL_10_BETA4
REL_10_RC1
REL_11_0
REL_11_1
REL_11_10
REL_11_11
REL_11_12
REL_11_13
REL_11_14
REL_11_15
REL_11_16
REL_11_17
REL_11_18
REL_11_19
REL_11_2
REL_11_20
REL_11_21
REL_11_22
REL_11_3
REL_11_4
REL_11_5
REL_11_6
REL_11_7
REL_11_8
REL_11_9
REL_11_BETA1
REL_11_BETA2
REL_11_BETA3
REL_11_BETA4
REL_11_RC1
REL_12_0
REL_12_1
REL_12_10
REL_12_11
REL_12_12
REL_12_13
REL_12_14
REL_12_15
REL_12_16
REL_12_17
REL_12_18
REL_12_19
REL_12_2
REL_12_20
REL_12_21
REL_12_22
REL_12_3
REL_12_4
REL_12_5
REL_12_6
REL_12_7
REL_12_8
REL_12_9
REL_12_BETA1
REL_12_BETA2
REL_12_BETA3
REL_12_BETA4
REL_12_RC1
REL_13_0
REL_13_1
REL_13_10
REL_13_11
REL_13_12
REL_13_13
REL_13_14
REL_13_15
REL_13_16
REL_13_17
REL_13_18
REL_13_19
REL_13_2
REL_13_20
REL_13_21
REL_13_22
REL_13_3
REL_13_4
REL_13_5
REL_13_6
REL_13_7
REL_13_8
REL_13_9
REL_13_BETA1
REL_13_BETA2
REL_13_BETA3
REL_13_RC1
REL_14_0
REL_14_1
REL_14_10
REL_14_11
REL_14_12
REL_14_13
REL_14_14
REL_14_15
REL_14_16
REL_14_17
REL_14_18
REL_14_19
REL_14_2
REL_14_3
REL_14_4
REL_14_5
REL_14_6
REL_14_7
REL_14_8
REL_14_9
REL_14_BETA1
REL_14_BETA2
REL_14_BETA3
REL_14_RC1
REL_15_0
REL_15_1
REL_15_10
REL_15_11
REL_15_12
REL_15_13
REL_15_14
REL_15_2
REL_15_3
REL_15_4
REL_15_5
REL_15_6
REL_15_7
REL_15_8
REL_15_9
REL_15_BETA1
REL_15_BETA2
REL_15_BETA3
REL_15_BETA4
REL_15_RC1
REL_15_RC2
REL_16_0
REL_16_1
REL_16_10
REL_16_2
REL_16_3
REL_16_4
REL_16_5
REL_16_6
REL_16_7
REL_16_8
REL_16_9
REL_16_BETA1
REL_16_BETA2
REL_16_BETA3
REL_16_RC1
REL_17_0
REL_17_1
REL_17_2
REL_17_3
REL_17_4
REL_17_5
REL_17_6
REL_17_BETA1
REL_17_BETA2
REL_17_BETA3
REL_17_RC1
REL_18_BETA1
REL_18_BETA2
REL_18_BETA3
REL_18_RC1
Release_1_0_2
Release_2_0
Release_2_0_0
release-6-3
${ noResults }
3958 Commits (REL_13_STABLE)
Author | SHA1 | Message | Date |
---|---|---|---|
![]() |
7fffcc2ee9 |
Remove unnecessary pfree() in g_intbig_compress().
GiST compress functions (like all GiST opclass functions) are supposed to be called in short-lived memory contexts, so that minor memory leaks in them are not of concern, and indeed explicit pfree's are likely slightly counterproductive. But this one in g_intbig_compress() is more than slightly counterproductive, because it's guarded by "if (in != DatumGetArrayTypeP(entry->key))" which means that if this test succeeds, we've detoasted the datum twice. (And to add insult to injury, the extra detoast result is leaked.) Let's just drop the whole stanza, relying on the GiST temporary context mechanism to clean up in good time. The analogous bit in g_int_compress() is if (r != (ArrayType *) DatumGetPointer(entry->key)) pfree(r); which doesn't have the gratuitous-detoast problem so I left it alone. Perhaps there is a case for removing unnecessary pfree's more widely, but I'm not sure if it's worth the code churn. The potential extra decompress seems expensive enough to justify calling this a (minor) performance bug and back-patching. Konstantin Knizhnik, Matthias van de Meent, Tom Lane Discussion: https://postgr.es/m/CAEze2Wi86=DxErfvf+SCB2UKmU2amKOF60BKuJOX=w-RojRn0A@mail.gmail.com |
2 years ago |
![]() |
ae9aac64a3 |
intarray: Prevent out-of-bound memory reads with gist__int_ops
As gist__int_ops stands in intarray, it is possible to store GiST entries for leaf pages that can cause corruptions when decompressed. Leaf nodes are stored as decompressed all the time by the compression method, and the decompression method should map with that, retrieving the contents of the page without doing any decompression. However, the code authorized the insertion of leaf page data with a higher number of array items than what can be supported, generating a NOTICE message to inform about this matter (199 for a 8k page, for reference). When calling the decompression method, a decompression would be attempted on this leaf node item but the contents should be retrieved as they are. The NOTICE message generated when dealing with the compression of a leaf page and too many elements in the input array for gist__int_ops has been introduced by |
2 years ago |
![]() |
78bf0a256d |
hstore: Tighten key/value parsing check for whitespaces
isspace() can be locale-sensitive depending on the platform, causing
hstore to consider as whitespaces characters it should not see as such.
For example, U+0105, being decoded as 0xC4 0x85 in UTF-8, would be
discarded from the input given.
This problem is similar to
|
2 years ago |
![]() |
0409c7fc74 |
Ensure Soundex difference() function handles empty input sanely.
fuzzystrmatch's difference() function assumes that _soundex() always initializes its output buffer fully. This was not so for the case of a string containing no alphabetic characters, resulting in unstable output and Valgrind complaints. Fix by using memset() to fill the whole buffer in the early-exit case. Also make some cosmetic improvements (I didn't care for the random switches between "instr[0]" and "*instr" notation). Report and diagnosis by Alexander Lakhin (bug #17935). Back-patch to all supported branches. Discussion: https://postgr.es/m/17935-b99316aa79c18513@postgresql.org |
2 years ago |
![]() |
feb9e7fbbc |
Adjust sepgsql expected output for 681d9e462 et al.
Security: CVE-2023-2454 |
2 years ago |
![]() |
2212f7db80 |
Replace last PushOverrideSearchPath() call with set_config_option().
The two methods don't cooperate, so set_config_option("search_path", ...) has been ineffective under non-empty overrideStack. This defect enabled an attacker having database-level CREATE privilege to execute arbitrary code as the bootstrap superuser. While that particular attack requires v13+ for the trusted extension attribute, other attacks are feasible in all supported versions. Standardize on the combination of NewGUCNestLevel() and set_config_option("search_path", ...). It is newer than PushOverrideSearchPath(), more-prevalent, and has no known disadvantages. The "override" mechanism remains for now, for compatibility with out-of-tree code. Users should update such code, which likely suffers from the same sort of vulnerability closed here. Back-patch to v11 (all supported versions). Alexander Lakhin. Reported by Alexander Lakhin. Security: CVE-2023-2454 |
2 years ago |
![]() |
de2dfa0538 |
In hstore_plpython, avoid crashing when return value isn't a mapping.
Python 3 changed the behavior of PyMapping_Check(), breaking the test in plpython_to_hstore() that verifies whether a function result to be transformed is acceptable. A backwards-compatible fix is to first verify that the object doesn't pass PySequence_Check(). Perhaps accidentally, our other uses of PyMapping_Check() already follow uses of PySequence_Check(), so that no other bugs were created by this change. Per bug #17908 from Alexander Lakhin. Back-patch to all supported branches. Dmitry Dolgov and Tom Lane Discussion: https://postgr.es/m/17908-3f19a125d56a11d6@postgresql.org |
2 years ago |
![]() |
48c6825d0e |
Validate ltree siglen GiST option to be int-aligned
Unaligned siglen could lead to an unaligned access to subsequent key fields. Backpatch to 13, where opclass options were introduced. Reported-by: Alexander Lakhin Bug: 17847 Discussion: https://postgr.es/m/17847-171232970bea406b%40postgresql.org Reviewed-by: Tom Lane, Pavel Borisov, Alexander Lakhin Backpatch-through: 13 |
2 years ago |
![]() |
bc436e4a91 |
Fix misbehavior in contrib/pg_trgm with an unsatisfiable regex.
If the regex compiler can see that a regex is unsatisfiable (for example, '$foo') then it may emit an NFA having no arcs. pg_trgm's packGraph function did the wrong thing in this case; it would access off the end of a work array, and with bad luck could produce a corrupted output data structure causing more problems later. This could end with wrong answers or crashes in queries using a pg_trgm GIN or GiST index with such a regex. Fix by not trying to de-duplicate if there aren't at least 2 arcs. Per bug #17830 from Alexander Lakhin. Back-patch to all supported branches. Discussion: https://postgr.es/m/17830-57ff5f89bdb02b09@postgresql.org |
3 years ago |
![]() |
ad38e2f891 |
Fix calculation of which GENERATED columns need to be updated.
We were identifying the updatable generated columns of inheritance
children by transposing the calculation made for their parent.
However, there's nothing that says a traditional-inheritance child
can't have generated columns that aren't there in its parent, or that
have different dependencies than are in the parent's expression.
(At present it seems that we don't enforce that for partitioning
either, which is likely wrong to some degree or other; but the case
clearly needs to be handled with traditional inheritance.)
Hence, drop the very-klugy-anyway "extraUpdatedCols" RTE field
in favor of identifying which generated columns depend on updated
columns during executor startup. In HEAD we can remove
extraUpdatedCols altogether; in back branches, it's still there but
always empty. Another difference between the HEAD and back-branch
versions of this patch is that in HEAD we can add the new bitmap field
to ResultRelInfo, but that would cause an ABI break in back branches.
Like
|
3 years ago |
![]() |
d35f1d485c |
Fix contrib/seg to be more wary of long input numbers.
seg stores the number of significant digits in an input number in a "char" field. If char is signed, and the input is more than 127 digits long, the count can read out as negative causing seg_out() to print garbage (or, if you're really unlucky, even crash). To fix, clamp the digit count to be not more than FLT_DIG. (In theory this loses some information about what the original input was, but it doesn't seem like useful information; it would not survive dump/restore in any case.) Also, in case there are stored values of the seg type containing bad data, add a clamp in seg_out's restore() subroutine. Per bug #17725 from Robins Tharakan. It's been like this forever, so back-patch to all supported branches. Discussion: https://postgr.es/m/17725-0a09313b67fbe86e@postgresql.org |
3 years ago |
![]() |
9a299cf7c2 |
Replace RelationOpenSmgr() with RelationGetSmgr().
This is a back-patch of the v15-era commit |
3 years ago |
![]() |
c304c069d1 |
Fix compilation warnings with libselinux 3.1 in contrib/sepgsql/
Upstream SELinux has recently marked security_context_t as officially
deprecated, causing warnings with -Wdeprecated-declarations. This is
considered as legacy code for some time now by upstream as
security_context_t got removed from most of the code tree during the
development of 2.3 back in 2014.
This removes all the references to security_context_t in sepgsql/ to be
consistent with SELinux, fixing the warnings. Note that this does not
impact the minimum version of libselinux supported.
This has been applied first as
|
3 years ago |
![]() |
a9fdb48b73 |
pg_stat_statements: fetch stmt location/length before it disappears.
When executing a utility statement, we must fetch everything we need out of the PlannedStmt data structure before calling standard_ProcessUtility. In certain cases (possibly only ROLLBACK in extended query protocol), that data structure will get freed during command execution. The situation is probably often harmless in production builds, but in debug builds we intentionally overwrite the freed memory with garbage, leading to picking up garbage values of statement location and length, typically causing an assertion failure later in pg_stat_statements. In non-debug builds, if something did go wrong it would likely lead to storing garbage for the query string. Report and fix by zhaoqigui (with cosmetic adjustments by me). It's an old problem, so back-patch to all supported versions. Discussion: https://postgr.es/m/17663-a344fd0675f92128@postgresql.org Discussion: https://postgr.es/m/1667307420050.56657@hundsun.com |
3 years ago |
![]() |
25f7be1ca2 |
Fix assertion failures while processing NEW_CID record in logical decoding.
When the logical decoding restarts from NEW_CID, since there is no association between the top transaction and its subtransaction, both are created as top transactions and have the same LSN. This caused the assertion failure in AssertTXNLsnOrder(). This patch skips the assertion check until we reach the LSN at which we start decoding the contents of the transaction, specifically start_decoding_at LSN in SnapBuild. This is okay because we don't guarantee to make the association between top transaction and subtransaction until we try to decode the actual contents of transaction. The ordering of the records prior to the start_decoding_at LSN should have been checked before the restart. The other assertion failure is due to the reason that we forgot to track that we have considered top-level transaction id in the list of catalog changing transactions that were committed when one of its subtransactions is marked as containing catalog change. Reported-by: Tomas Vondra, Osumi Takamichi Author: Masahiko Sawada, Kuroda Hayato Reviewed-by: Amit Kapila, Dilip Kumar, Kuroda Hayato, Kyotaro Horiguchi, Masahiko Sawada Backpatch-through: 10 Discussion: https://postgr.es/m/a89b46b6-0239-2fd5-71a9-b19b1f7a7145%40enterprisedb.com Discussion: https://postgr.es/m/TYCPR01MB83733C6CEAE47D0280814D5AED7A9%40TYCPR01MB8373.jpnprd01.prod.outlook.com |
3 years ago |
![]() |
6749d4e8c7 |
postgres_fdw: Avoid 'variable not found in subplan target list' error.
The tlist of the EvalPlanQual outer plan for a ForeignScan node is adjusted to produce a tuple whose descriptor matches the scan tuple slot for the ForeignScan node. But in the case where the outer plan contains an extra Sort node, if the new tlist contained columns required only for evaluating PlaceHolderVars or columns required only for evaluating local conditions, this would cause setrefs.c to fail with the error. The cause of this is that when creating the outer plan by injecting the Sort node into an alternative local join plan that could emit such extra columns as well, we fail to arrange for the outer plan to propagate them up through the Sort node, causing setrefs.c to fail to match up them in the new tlist to what is available from the outer plan. Repair. Per report from Alexander Pyhalov. Richard Guo and Etsuro Fujita, reviewed by Alexander Pyhalov and Tom Lane. Backpatch to all supported versions. Discussion: http://postgr.es/m/cfb17bf6dfdf876467bd5ef533852d18%40postgrespro.ru |
3 years ago |
![]() |
a61095aa79 |
Reject bogus output from uuid_create(3).
When using the BSD UUID functions, contrib/uuid-ossp expects uuid_create() to produce a version-1 UUID. FreeBSD still does so, but in recent NetBSD releases that function produces a version-4 (random) UUID instead. That's not acceptable for our purposes: if the user wanted v4 she would have asked for v4, not v1. Hence, check the version digit and complain if it's not '1'. Also drop the documentation's claim that the NetBSD implementation is usable. It might be, depending on which OS version you're using, but we're not going to get into that kind of detail. (Maybe someday we should ditch all these external libraries and just write our own UUID code, but today is not that day.) Nazir Bilal Yavuz, with cosmetic adjustments and docs by me. Backpatch to all supported versions. Discussion: https://postgr.es/m/3848059.1661038772@sss.pgh.pa.us Discussion: https://postgr.es/m/17358-89806e7420797025@postgresql.org |
3 years ago |
![]() |
547b963683 |
Fix catalog lookup with the wrong snapshot during logical decoding.
Previously, we relied on HEAP2_NEW_CID records and XACT_INVALIDATION records to know if the transaction has modified the catalog, and that information is not serialized to snapshot. Therefore, after the restart, if the logical decoding decodes only the commit record of the transaction that has actually modified a catalog, we will miss adding its XID to the snapshot. Thus, we will end up looking at catalogs with the wrong snapshot. To fix this problem, this changes the snapshot builder so that it remembers the last-running-xacts list of the decoded RUNNING_XACTS record after restoring the previously serialized snapshot. Then, we mark the transaction as containing catalog changes if it's in the list of initial running transactions and its commit record has XACT_XINFO_HAS_INVALS. To avoid ABI breakage, we store the array of the initial running transactions in the static variables InitialRunningXacts and NInitialRunningXacts, instead of storing those in SnapBuild or ReorderBuffer. This approach has a false positive; we could end up adding the transaction that didn't change catalog to the snapshot since we cannot distinguish whether the transaction has catalog changes only by checking the COMMIT record. It doesn't have the information on which (sub) transaction has catalog changes, and XACT_XINFO_HAS_INVALS doesn't necessarily indicate that the transaction has catalog change. But that won't be a problem since we use snapshot built during decoding only to read system catalogs. On the master branch, we took a more future-proof approach by writing catalog modifying transactions to the serialized snapshot which avoids the above false positive. But we cannot backpatch it because of a change in the SnapBuild. Reported-by: Mike Oh Author: Masahiko Sawada Reviewed-by: Amit Kapila, Shi yu, Takamichi Osumi, Kyotaro Horiguchi, Bertrand Drouvot, Ahsan Hadi Backpatch-through: 10 Discussion: https://postgr.es/m/81D0D8B0-E7C4-4999-B616-1E5004DBDCD2%40amazon.com |
3 years ago |
![]() |
6b67db10c3 |
Be more wary about 32-bit integer overflow in pg_stat_statements.
We've heard a couple of reports of people having trouble with
multi-gigabyte-sized query-texts files. It occurred to me that on
32-bit platforms, there could be an issue with integer overflow
of calculations associated with the total query text size.
Address that with several changes:
1. Limit pg_stat_statements.max to INT_MAX / 2 not INT_MAX.
The hashtable code will bound it to that anyway unless "long"
is 64 bits. We still need overflow guards on its use, but
this helps.
2. Add a check to prevent extending the query-texts file to
more than MaxAllocHugeSize. If it got that big, qtext_load_file
would certainly fail, so there's not much point in allowing it.
Without this, we'd need to consider whether extent, query_offset,
and related variables shouldn't be off_t not size_t.
3. Adjust the comparisons in need_gc_qtexts() to be done in 64-bit
arithmetic on all platforms. It appears possible that under duress
those multiplications could overflow 32 bits, yielding a false
conclusion that we need to garbage-collect the texts file, which
could lead to repeatedly garbage-collecting after every hash table
insertion.
Per report from Bruno da Silva. I'm not convinced that these
issues fully explain his problem; there may be some other bug that's
contributing to the query-texts file becoming so large in the first
place. But it did get that big, so #2 is a reasonable defense,
and #3 could explain the reported performance difficulties.
(See also commit
|
3 years ago |
![]() |
6230bd7df4 |
postgres_fdw: set search_path to 'pg_catalog' while deparsing constants.
The motivation for this is to ensure successful transmission of the values of constants of regconfig and other reg* types. The remote will be reading them with search_path = 'pg_catalog', so schema qualification is necessary when referencing objects in other schemas. Per bug #17483 from Emmanuel Quincerot. Back-patch to all supported versions. (There's some other stuff to do here, but it's less back-patchable.) Discussion: https://postgr.es/m/1423433.1652722406@sss.pgh.pa.us |
3 years ago |
![]() |
8782ce49e4 |
CREATE INDEX: use the original userid for more ACL checks.
Commit
|
3 years ago |
![]() |
60ca2e8418 |
Silence compiler warnings from some older compilers.
Since |
3 years ago |
![]() |
35edcc0cee |
Make relation-enumerating operations be security-restricted operations.
When a feature enumerates relations and runs functions associated with all found relations, the feature's user shall not need to trust every user having permission to create objects. BRIN-specific functionality in autovacuum neglected to account for this, as did pg_amcheck and CLUSTER. An attacker having permission to create non-temp objects in at least one schema could execute arbitrary SQL functions under the identity of the bootstrap superuser. CREATE INDEX (not a relation-enumerating operation) and REINDEX protected themselves too late. This change extends to the non-enumerating amcheck interface. Back-patch to v10 (all supported versions). Sergey Shinderuk, reviewed (in earlier versions) by Alexander Lakhin. Reported by Alexander Lakhin. Security: CVE-2022-1552 |
3 years ago |
![]() |
4caa85e4f3 |
Fix back-patch of "Under has_wal_read_bug, skip .../001_wal.pl."
Per buildfarm members tadarida, snapper, and kittiwake. Back-patch to v10 (all supported versions). |
3 years ago |
![]() |
23213f53ba |
Under has_wal_read_bug, skip contrib/bloom/t/001_wal.pl.
Per buildfarm members snapper and kittiwake. Back-patch to v10 (all supported versions). Discussion: https://postgr.es/m/20220116210241.GC756210@rfd.leadboat.com |
3 years ago |
![]() |
2275d044d0 |
pageinspect: Fix handling of all-zero pages
Getting from get_raw_page() an all-zero page is considered as a valid case by the buffer manager and it can happen for example when finding a corrupted page with zero_damaged_pages enabled (using zero_damaged_pages to look at corrupted pages happens), or after a crash when a relation file is extended before any WAL for its new data is generated (before a vacuum or autovacuum job comes in to do some cleanup). However, all the functions of pageinspect, as of the index AMs (except hash that has its own idea of new pages), heap, the FSM or the page header have never worked with all-zero pages, causing various crashes when going through the page internals. This commit changes all the pageinspect functions to be compliant with all-zero pages, where the choice is made to return NULL or no rows for SRFs when finding a new page. get_raw_page() still works the same way, returning a batch of zeros in the bytea of the page retrieved. A hard error could be used but NULL, while more invasive, is useful when scanning relation files in full to get a batch of results for a single relation in one query. Tests are added for all the code paths impacted. Reported-by: Daria Lepikhova Author: Michael Paquier Discussion: https://postgr.es/m/561e187b-3549-c8d5-03f5-525c14e65bd0@postgrespro.ru Backpatch-through: 10 |
3 years ago |
![]() |
79df1d20c5 |
Fix postgres_fdw to check shippability of sort clauses properly.
postgres_fdw would push ORDER BY clauses to the remote side without
verifying that the sort operator is safe to ship. Moreover, it failed
to print a suitable USING clause if the sort operator isn't default
for the sort expression's type. The net result of this is that the
remote sort might not have anywhere near the semantics we expect,
which'd be disastrous for locally-performed merge joins in particular.
We addressed similar issues in the context of ORDER BY within an
aggregate function call in commit
|
4 years ago |
![]() |
3d4d6dee07 |
pageinspect: Add more sanity checks to prevent out-of-bound reads
A couple of code paths use the special area on the page passed by the function caller, expecting to find some data in it. However, feeding an incorrect page can lead to out-of-bound reads when trying to access the page special area (like a heap page that has no special area, leading PageGetSpecialPointer() to grab a pointer outside the allocated page). The functions used for hash and btree indexes have some protection already against that, while some other functions using a relation OID as argument would make sure that the access method involved is correct, but functions taking in input a raw page without knowing the relation the page is attached to would run into problems. This commit improves the set of checks used in the code paths of BRIN, btree (including one check if a leaf page is found with a non-zero level), GIN and GiST to verify that the page given in input has a special area size that fits with each access method, which is done though PageGetSpecialSize(), becore calling PageGetSpecialPointer(). The scope of the checks done is limited to work with pages that one would pass after getting a block with get_raw_page(), as it is possible to craft byteas that could bypass existing code paths. Having too many checks would also impact the usability of pageinspect, as the existing code is very useful to look at the content details in a corrupted page, so the focus is really to avoid out-of-bound reads as this is never a good thing even with functions whose execution is limited to superusers. The safest approach could be to rework the functions so as these fetch a block using a relation OID and a block number, but there are also cases where using a raw page is useful. Tests are added to cover all the code paths that needed such checks, and an error message for hash indexes is reworded to fit better with what this commit adds. Reported-By: Alexander Lakhin Author: Julien Rouhaud, Michael Paquier Discussion: https://postgr.es/m/16527-ef7606186f0610a1@postgresql.org Discussion: https://postgr.es/m/561e187b-3549-c8d5-03f5-525c14e65bd0@postgrespro.ru Backpatch-through: 10 |
4 years ago |
![]() |
bad202c615 |
Fix default signature length for gist_ltree_ops
|
4 years ago |
![]() |
028a3c6b1c |
pageinspect: Fix memory context allocation of page in brin_revmap_data()
This caused the function to fail, as the aligned copy of the raw page
given by the function caller was not saved in the correct memory
context, which needs to be multi_call_memory_ctx in this case.
Issue introduced by
|
4 years ago |
![]() |
d3a9b83c30 |
pageinspect: Fix handling of page sizes and AM types
This commit fixes a set of issues related to the use of the SQL functions in this module when the caller is able to pass down raw page data as input argument: - The page size check was fuzzy in a couple of places, sometimes looking after only a sub-range, but what we are looking for is an exact match on BLCKSZ. After considering a few options here, I have settled down to do a generalization of get_page_from_raw(). Most of the SQL functions already used that, and this is not strictly required if not accessing an 8-byte-wide value from a raw page, but this feels safer in the long run for alignment-picky environment, particularly if a code path begins to access such values. This also reduces the number of strings that need to be translated. - The BRIN function brin_page_items() uses a Relation but it did not check the access method of the opened index, potentially leading to crashes. All the other functions in need of a Relation already did that. - Some code paths could fail on elog(), but we should to use ereport() for failures that can be triggered by the user. Tests are added to stress all the cases that are fixed as of this commit, with some junk raw pages (\set VERBOSITY ensures that this works across all page sizes) and unexpected index types when functions open relations. Author: Michael Paquier, Justin Prysby Discussion: https://postgr.es/m/20220218030020.GA1137@telsasoft.com Backpatch-through: 10 |
4 years ago |
![]() |
29ec94efd0 |
Introduce PG_TEST_TIMEOUT_DEFAULT for TAP suite non-elapsing timeouts.
Slow hosts may avoid load-induced, spurious failures by setting environment variable PG_TEST_TIMEOUT_DEFAULT to some number of seconds greater than 180. Developers may see faster failures by setting that environment variable to some lesser number of seconds. In tests, write $PostgreSQL::Test::Utils::timeout_default wherever the convention has been to write 180. This change raises the default for some briefer timeouts. Back-patch to v10 (all supported versions). Discussion: https://postgr.es/m/20220218052842.GA3627003@rfd.leadboat.com |
4 years ago |
![]() |
1a027e6b7b |
Clean up assorted failures under clang's -fsanitize=undefined checks.
Most of these are cases where we could call memcpy() or other libc
functions with a NULL pointer and a zero count, which is forbidden
by POSIX even though every production version of libc allows it.
We've fixed such things before in a piecemeal way, but apparently
never made an effort to try to get them all. I don't claim that
this patch does so either, but it gets every failure I observe in
check-world, using clang 12.0.1 on current RHEL8.
numeric.c has a different issue that the sanitizer doesn't like:
"ln(-1.0)" will compute log10(0) and then try to assign the
resulting -Inf to an integer variable. We don't actually use the
result in such a case, so there's no live bug.
Back-patch to all supported branches, with the idea that we might
start running a buildfarm member that tests this case. This includes
back-patching
|
4 years ago |
![]() |
caa231be97 |
WAL log unchanged toasted replica identity key attributes.
Currently, during UPDATE, the unchanged replica identity key attributes are not logged separately because they are getting logged as part of the new tuple. But if they are stored externally then the untoasted values are not getting logged as part of the new tuple and logical replication won't be able to replicate such UPDATEs. So we need to log such attributes as part of the old_key_tuple during UPDATE. Reported-by: Haiying Tang Author: Dilip Kumar and Amit Kapila Reviewed-by: Alvaro Herrera, Haiying Tang, Andres Freund Backpatch-through: 10 Discussion: https://postgr.es/m/OS0PR01MB611342D0A92D4F4BF26C0F47FB229@OS0PR01MB6113.jpnprd01.prod.outlook.com |
4 years ago |
![]() |
b3558cc96c |
Use Test::Builder::todo_start(), replacing $::TODO.
Some pre-2017 Test::More versions need perfect $Test::Builder::Level maintenance to find the variable. Buildfarm member snapper reported an overall failure that the file intended to hide via the TODO construct. That trouble was reachable in v11 and v10. For later branches, this serves as defense in depth. Back-patch to v10 (all supported versions). Discussion: https://postgr.es/m/20220202055556.GB2745933@rfd.leadboat.com |
4 years ago |
![]() |
785a28e422 |
On sparc64+ext4, suppress test failures from known WAL read failure.
Buildfarm members kittiwake, tadarida and snapper began to fail frequently when commits |
4 years ago |
![]() |
823d4c7e25 |
Fix results of index-only scans on btree_gist char(N) indexes.
If contrib/btree_gist is used to make a GIST index on a char(N) (bpchar) column, and that column is retrieved via an index-only scan, what came out had all trailing spaces removed. Since that doesn't happen in any other kind of table scan, this is clearly a bug. The cause is that gbt_bpchar_compress() strips trailing spaces (using rtrim1) before a new index entry is made. That was probably a good idea when this code was first written, but since we invented index-only scans, it's not so good. One answer could be to mark this opclass as incapable of index-only scans. But to do so, we'd need an extension module version bump, followed by manual action by DBAs to install the updated version of btree_gist. And it's not really a desirable place to end up, anyway. Instead, let's fix the code by removing the unwanted space-stripping action and adjusting the opclass's comparison logic to ignore trailing spaces as bpchar normally does. This will not hinder cases that work today, since index searches with this logic will act the same whether trailing spaces are stored or not. It will not by itself fix the problem of getting space-stripped results from index-only scans, of course. Users who care about that can REINDEX affected indexes after installing this update, to immediately replace all improperly-truncated index entries. Otherwise, it can be expected that the index's behavior will change incrementally as old entries are replaced by new ones. Per report from Alexander Lakhin. Back-patch to all supported branches. Discussion: https://postgr.es/m/696c995b-b37f-5526-f45d-04abe713179f@gmail.com |
4 years ago |
![]() |
3f710fc2b4 |
Remove assertion for replication origins in PREPARE TRANSACTION
When using replication origins, pg_replication_origin_xact_setup() is an
optional choice to be able to set a LSN and a timestamp to mark the
origin, which would be additionally added to WAL for transaction commits
or aborts (including 2PC transactions). An assertion in the code path
of PREPARE TRANSACTION assumed that this data should always be set, so
it would trigger when using replication origins without setting up an
origin LSN. Some tests are added to cover more this kind of scenario.
Oversight in commit
|
4 years ago |
![]() |
f89015b63f |
postgres_fdw: Fix unexpected reporting of empty message.
pgfdw_report_error() in postgres_fdw gets a message from PGresult or PGconn to report an error received from a remote server. Previously if it could get a message from neither of them, it reported empty message unexpectedly. The cause of this issue was that pgfdw_report_error() didn't handle properly the case where no message could be obtained and its local variable message_primary was set to '\0'. This commit improves pgfdw_report_error() so that it reports the message "could not obtain ..." when it gets no message and message_primary is set to '\0'. This is the same behavior as when message_primary is NULL. dblink_res_error() in dblink has the same issue, so this commit also improves it in the same way. Back-patch to all supported branches. Author: Fujii Masao Reviewed-by: Bharath Rupireddy Discussion: https://postgr.es/m/477c16c8-7ea4-20fc-38d5-ed3a77ed616c@oss.nttdata.com |
4 years ago |
![]() |
e477642a1b |
Avoid some other O(N^2) hazards in list manipulation.
In the same spirit as
|
4 years ago |
![]() |
3a5b313ce7 |
Don't try to read a multi-GB pg_stat_statements file in one call.
Windows fails on a request to read() more than INT_MAX bytes, and perhaps other platforms could have similar issues. Let's adjust this code to read at most 1GB per call. (One would not have thought the file could get that big, but now we have a field report of trouble, so it can. We likely ought to add some mechanism to limit the size of the query-texts file separately from the size of the hash table. That is not this patch, though.) Per bug #17254 from Yusuke Egashira. It's been like this for awhile, so back-patch to all supported branches. Discussion: https://postgr.es/m/17254-a926c89dc03375c2@postgresql.org |
4 years ago |
![]() |
5a4b8a8a72 |
Improve contrib/amcheck's tests for CREATE INDEX CONCURRENTLY.
Commits |
4 years ago |
![]() |
a9d0a54094 |
Fix CREATE INDEX CONCURRENTLY for the newest prepared transactions.
The purpose of commit
|
4 years ago |
![]() |
2e33b43599 |
Avoid race in RelationBuildDesc() affecting CREATE INDEX CONCURRENTLY.
CIC and REINDEX CONCURRENTLY assume backends see their catalog changes no later than each backend's next transaction start. That failed to hold when a backend absorbed a relevant invalidation in the middle of running RelationBuildDesc() on the CIC index. Queries that use the resulting index can silently fail to find rows. Fix this for future index builds by making RelationBuildDesc() loop until it finishes without accepting a relevant invalidation. It may be necessary to reindex to recover from past occurrences; REINDEX CONCURRENTLY suffices. Back-patch to 9.6 (all supported versions). Noah Misch and Andrey Borodin, reviewed (in earlier versions) by Andres Freund. Discussion: https://postgr.es/m/20210730022548.GA1940096@gust.leadboat.com |
4 years ago |
![]() |
cdab0a80d2 |
postgres_fdw: Move comments about elog level in (sub)abort cleanup.
The comments were misplaced when adding postgres_fdw. Fix that by moving the comments to more appropriate functions. Author: Etsuro Fujita Backpatch-through: 9.6 Discussion: https://postgr.es/m/CAPmGK164sAXQtC46mDFyu6d-T25Mzvh5qaRNkit06VMmecYnOA%40mail.gmail.com |
4 years ago |
![]() |
aee83f39a8 |
Fix null-pointer crash in postgres_fdw's conversion_error_callback.
Commit
|
4 years ago |
![]() |
cf26a8d8a7 |
Fix instability in contrib/bloom TAP tests.
It turns out that the instability complained of in commit |
4 years ago |
![]() |
8e7199453b |
Add alternative output for OpenSSL 3 without legacy loaded
OpenSSL 3 introduced the concept of providers to support modularization, and moved the outdated ciphers to the new legacy provider. In case it's not loaded in the users openssl.cnf file there will be a lot of regress test failures, so add alternative outputs covering those. Also document the need to load the legacy provider in order to use older ciphers with OpenSSL-enabled pgcrypto. This will be backpatched to all supported version once there is sufficient testing in the buildfarm of OpenSSL 3. Reviewed-by: Michael Paquier Discussion: https://postgr.es/m/FEF81714-D479-4512-839B-C769D2605F8A@yesql.se Backpatch-through: 9.6 |
4 years ago |
![]() |
135d8687ad |
Disable OpenSSL EVP digest padding in pgcrypto
The PX layer in pgcrypto is handling digest padding on its own uniformly for all backend implementations. Starting with OpenSSL 3.0.0, DecryptUpdate doesn't flush the last block in case padding is enabled so explicitly disable it as we don't use it. This will be backpatched to all supported version once there is sufficient testing in the buildfarm of OpenSSL 3. Reviewed-by: Peter Eisentraut, Michael Paquier Discussion: https://postgr.es/m/FEF81714-D479-4512-839B-C769D2605F8A@yesql.se Backpatch-through: 9.6 |
4 years ago |
![]() |
a69e1506f6 |
pgcrypto: Check for error return of px_cipher_decrypt()
This has previously not been a problem (that anyone ever reported), but in future OpenSSL versions (3.0.0), where legacy ciphers are/can be disabled, this is the place where this is reported. So we need to catch the error here, otherwise the higher-level functions would return garbage. The nearby encryption code already handled errors similarly. Author: Peter Eisentraut <peter@eisentraut.org> Reviewed-by: Daniel Gustafsson <daniel@yesql.se> Discussion: https://www.postgresql.org/message-id/9e9c431c-0adc-7a6d-9b1a-915de1ba3fe7@enterprisedb.com Backpatch-through: 9.6 |
4 years ago |