|
|
|
|
/*-----------------------------------------------------------------------
|
|
|
|
|
*
|
|
|
|
|
* PostgreSQL locale utilities
|
|
|
|
|
*
|
|
|
|
|
* Portions Copyright (c) 2002-2025, PostgreSQL Global Development Group
|
|
|
|
|
*
|
|
|
|
|
* src/backend/utils/adt/pg_locale.c
|
|
|
|
|
*
|
|
|
|
|
*-----------------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/*----------
|
|
|
|
|
* Here is how the locale stuff is handled: LC_COLLATE and LC_CTYPE
|
|
|
|
|
* are fixed at CREATE DATABASE time, stored in pg_database, and cannot
|
|
|
|
|
* be changed. Thus, the effects of strcoll(), strxfrm(), isupper(),
|
|
|
|
|
* toupper(), etc. are always in the same fixed locale.
|
|
|
|
|
*
|
|
|
|
|
* LC_MESSAGES is settable at run time and will take effect
|
|
|
|
|
* immediately.
|
|
|
|
|
*
|
|
|
|
|
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are
|
|
|
|
|
* permanently set to "C", and then we use temporary locale_t
|
|
|
|
|
* objects when we need to look up locale data based on the GUCs
|
|
|
|
|
* of the same name. Information is cached when the GUCs change.
|
|
|
|
|
* The cached information is only used by the formatting functions
|
|
|
|
|
* (to_char, etc.) and the money type. For the user, this should all be
|
|
|
|
|
* transparent.
|
|
|
|
|
*----------
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
|
|
|
|
#include <time.h>
|
|
|
|
|
|
|
|
|
|
#include "access/htup_details.h"
|
|
|
|
|
#include "catalog/pg_collation.h"
|
|
|
|
|
#include "catalog/pg_database.h"
|
|
|
|
|
#include "common/hashfn.h"
|
Reject non-ASCII locale names.
Commit bf03cfd1 started scanning all available BCP 47 locale names on
Windows. This caused an abort/crash in the Windows runtime library if
the default locale name contained non-ASCII characters, because of our
use of the setlocale() save/restore pattern with "char" strings. After
switching to another locale with a different encoding, the saved name
could no longer be understood, and setlocale() would abort.
"Turkish_Türkiye.1254" is the example from recent reports, but there are
other examples of countries and languages with non-ASCII characters in
their names, and they appear in Windows' (old style) locale names.
To defend against this:
1. In initdb, reject non-ASCII locale names given explicity on the
command line, or returned by the operating system environment with
setlocale(..., ""), or "canonicalized" by the operating system when we
set it.
2. In initdb only, perform the save-and-restore with Windows'
non-standard wchar_t variant of setlocale(), so that it is not subject
to round trip failures stemming from char string encoding confusion.
3. In the backend, we don't have to worry about the save-and-restore
problem because we have already vetted the defaults, so we just have to
make sure that CREATE DATABASE also rejects non-ASCII names in any new
databases. SET lc_XXX doesn't suffer from the problem, but the ban
applies to it too because it uses check_locale(). CREATE COLLATION
doesn't suffer from the problem either, but it doesn't use
check_locale() so it is not included in the new ban for now, to minimize
the change.
Anyone who encounters the new error message should either create a new
duplicated locale with an ASCII-only name using Windows Locale Builder,
or consider using BCP 47 names like "tr-TR". Users already couldn't
initialize a cluster with "Turkish_Türkiye.1254" on PostgreSQL 16+, but
the new failure mode is an error message that explains why, instead of a
crash.
Back-patch to 16, where bf03cfd1 landed. Older versions are affected
in theory too, but only 16 and later are causing crash reports.
Reviewed-by: Andrew Dunstan <andrew@dunslane.net> (the idea, not the patch)
Reported-by: Haifang Wang (Centific Technologies Inc) <v-haiwang@microsoft.com>
Discussion: https://postgr.es/m/PH8PR21MB3902F334A3174C54058F792CE5182%40PH8PR21MB3902.namprd21.prod.outlook.com
1 year ago
|
|
|
#include "common/string.h"
|
|
|
|
|
#include "mb/pg_wchar.h"
|
|
|
|
|
#include "miscadmin.h"
|
|
|
|
|
#include "utils/builtins.h"
|
Split up guc.c for better build speed and ease of maintenance.
guc.c has grown to be one of our largest .c files, making it
a bottleneck for compilation. It's also acquired a bunch of
knowledge that'd be better kept elsewhere, because of our not
very good habit of putting variable-specific check hooks here.
Hence, split it up along these lines:
* guc.c itself retains just the core GUC housekeeping mechanisms.
* New file guc_funcs.c contains the SET/SHOW interfaces and some
SQL-accessible functions for GUC manipulation.
* New file guc_tables.c contains the data arrays that define the
built-in GUC variables, along with some already-exported constant
tables.
* GUC check/assign/show hook functions are moved to the variable's
home module, whenever that's clearly identifiable. A few hard-
to-classify hooks ended up in commands/variable.c, which was
already a home for miscellaneous GUC hook functions.
To avoid cluttering a lot more header files with #include "guc.h",
I also invented a new header file utils/guc_hooks.h and put all
the GUC hook functions' declarations there, regardless of their
originating module. That allowed removal of #include "guc.h"
from some existing headers. The fallout from that (hopefully
all caught here) demonstrates clearly why such inclusions are
best minimized: there are a lot of files that, for example,
were getting array.h at two or more levels of remove, despite
not having any connection at all to GUCs in themselves.
There is some very minor code beautification here, such as
renaming a couple of inconsistently-named hook functions
and improving some comments. But mostly this just moves
code from point A to point B and deals with the ensuing
needs for #include adjustments and exporting a few functions
that previously weren't exported.
Patch by me, per a suggestion from Andres Freund; thanks also
to Michael Paquier for the idea to invent guc_funcs.c.
Discussion: https://postgr.es/m/587607.1662836699@sss.pgh.pa.us
3 years ago
|
|
|
#include "utils/guc_hooks.h"
|
|
|
|
|
#include "utils/lsyscache.h"
|
|
|
|
|
#include "utils/memutils.h"
|
|
|
|
|
#include "utils/pg_locale.h"
|
|
|
|
|
#include "utils/relcache.h"
|
|
|
|
|
#include "utils/syscache.h"
|
|
|
|
|
|
|
|
|
|
#ifdef WIN32
|
|
|
|
|
#include <shlwapi.h>
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/* Error triggered for locale-sensitive subroutines */
|
|
|
|
|
#define PGLOCALE_SUPPORT_ERROR(provider) \
|
|
|
|
|
elog(ERROR, "unsupported collprovider for %s: %c", __func__, provider)
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This should be large enough that most strings will fit, but small enough
|
|
|
|
|
* that we feel comfortable putting it on the stack
|
|
|
|
|
*/
|
|
|
|
|
#define TEXTBUFLEN 1024
|
|
|
|
|
|
|
|
|
|
#define MAX_L10N_DATA 80
|
|
|
|
|
|
|
|
|
|
/* pg_locale_builtin.c */
|
|
|
|
|
extern pg_locale_t create_pg_locale_builtin(Oid collid, MemoryContext context);
|
|
|
|
|
extern char *get_collation_actual_version_builtin(const char *collcollate);
|
|
|
|
|
|
|
|
|
|
/* pg_locale_icu.c */
|
|
|
|
|
#ifdef USE_ICU
|
|
|
|
|
extern UCollator *pg_ucol_open(const char *loc_str);
|
|
|
|
|
extern char *get_collation_actual_version_icu(const char *collcollate);
|
|
|
|
|
#endif
|
|
|
|
|
extern pg_locale_t create_pg_locale_icu(Oid collid, MemoryContext context);
|
|
|
|
|
|
|
|
|
|
/* pg_locale_libc.c */
|
|
|
|
|
extern pg_locale_t create_pg_locale_libc(Oid collid, MemoryContext context);
|
|
|
|
|
extern char *get_collation_actual_version_libc(const char *collcollate);
|
|
|
|
|
|
|
|
|
|
/* GUC settings */
|
|
|
|
|
char *locale_messages;
|
|
|
|
|
char *locale_monetary;
|
|
|
|
|
char *locale_numeric;
|
|
|
|
|
char *locale_time;
|
|
|
|
|
|
|
|
|
|
int icu_validation_level = WARNING;
|
|
|
|
|
|
Allow to_date/to_timestamp to recognize non-English month/day names.
to_char() has long allowed the TM (translation mode) prefix to
specify output of translated month or day names; but that prefix
had no effect in input format strings. Now it does. to_date()
and to_timestamp() will now recognize the same month or day names
that to_char() would output for the same format code. Matching is
case-insensitive (per the active collation's notion of what that
means), just as it has always been for English month/day names
without the TM prefix.
(As per the discussion thread, there are lots of cases that this
feature will not handle, such as alternate day names. But being
able to accept what to_char() will output seems useful enough.)
In passing, fix some shaky English and violations of message
style guidelines in jsonpath errors for the .datetime() method,
which depends on this code.
Juan José Santamaría Flecha, reviewed and modified by me,
with other commentary from Alvaro Herrera, Tomas Vondra,
Arthur Zakirov, Peter Eisentraut, Mark Dilger.
Discussion: https://postgr.es/m/CAC+AXB3u1jTngJcoC1nAHBf=M3v-jrEfo86UFtCqCjzbWS9QhA@mail.gmail.com
6 years ago
|
|
|
/*
|
|
|
|
|
* lc_time localization cache.
|
|
|
|
|
*
|
|
|
|
|
* We use only the first 7 or 12 entries of these arrays. The last array
|
|
|
|
|
* element is left as NULL for the convenience of outside code that wants
|
|
|
|
|
* to sequentially scan these arrays.
|
|
|
|
|
*/
|
|
|
|
|
char *localized_abbrev_days[7 + 1];
|
|
|
|
|
char *localized_full_days[7 + 1];
|
|
|
|
|
char *localized_abbrev_months[12 + 1];
|
|
|
|
|
char *localized_full_months[12 + 1];
|
|
|
|
|
|
|
|
|
|
/* is the databases's LC_CTYPE the C locale? */
|
|
|
|
|
bool database_ctype_is_c = false;
|
|
|
|
|
|
|
|
|
|
static pg_locale_t default_locale = NULL;
|
|
|
|
|
|
|
|
|
|
/* indicates whether locale information cache is valid */
|
|
|
|
|
static bool CurrentLocaleConvValid = false;
|
|
|
|
|
static bool CurrentLCTimeValid = false;
|
|
|
|
|
|
|
|
|
|
/* Cache for collation-related knowledge */
|
|
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
|
{
|
|
|
|
|
Oid collid; /* hash key: pg_collation OID */
|
|
|
|
|
pg_locale_t locale; /* locale_t struct, or 0 if not valid */
|
|
|
|
|
|
|
|
|
|
/* needed for simplehash */
|
|
|
|
|
uint32 hash;
|
|
|
|
|
char status;
|
|
|
|
|
} collation_cache_entry;
|
|
|
|
|
|
|
|
|
|
#define SH_PREFIX collation_cache
|
|
|
|
|
#define SH_ELEMENT_TYPE collation_cache_entry
|
|
|
|
|
#define SH_KEY_TYPE Oid
|
|
|
|
|
#define SH_KEY collid
|
|
|
|
|
#define SH_HASH_KEY(tb, key) murmurhash32((uint32) key)
|
|
|
|
|
#define SH_EQUAL(tb, a, b) (a == b)
|
|
|
|
|
#define SH_GET_HASH(tb, a) a->hash
|
|
|
|
|
#define SH_SCOPE static inline
|
|
|
|
|
#define SH_STORE_HASH
|
|
|
|
|
#define SH_DECLARE
|
|
|
|
|
#define SH_DEFINE
|
|
|
|
|
#include "lib/simplehash.h"
|
|
|
|
|
|
|
|
|
|
static MemoryContext CollationCacheContext = NULL;
|
|
|
|
|
static collation_cache_hash *CollationCache = NULL;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The collation cache is often accessed repeatedly for the same collation, so
|
|
|
|
|
* remember the last one used.
|
|
|
|
|
*/
|
|
|
|
|
static Oid last_collation_cache_oid = InvalidOid;
|
|
|
|
|
static pg_locale_t last_collation_cache_locale = NULL;
|
|
|
|
|
|
|
|
|
|
#if defined(WIN32) && defined(LC_MESSAGES)
|
|
|
|
|
static char *IsoLocaleName(const char *);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* pg_perm_setlocale
|
|
|
|
|
*
|
Renovate display of non-ASCII messages on Windows.
GNU gettext selects a default encoding for the messages it emits in a
platform-specific manner; it uses the Windows ANSI code page on Windows
and follows LC_CTYPE on other platforms. This is inconvenient for
PostgreSQL server processes, so realize consistent cross-platform
behavior by calling bind_textdomain_codeset() on Windows each time we
permanently change LC_CTYPE. This primarily affects SQL_ASCII databases
and processes like the postmaster that do not attach to a database,
making their behavior consistent with PostgreSQL on non-Windows
platforms. Messages from SQL_ASCII databases use the encoding implied
by the database LC_CTYPE, and messages from non-database processes use
LC_CTYPE from the postmaster system environment. PlatformEncoding
becomes unused, so remove it.
Make write_console() prefer WriteConsoleW() to write() regardless of the
encodings in use. In this situation, write() will invariably mishandle
non-ASCII characters.
elog.c has assumed that messages conform to the database encoding.
While usually true, this does not hold for SQL_ASCII and MULE_INTERNAL.
Introduce MessageEncoding to track the actual encoding of message text.
The present consumers are Windows-specific code for converting messages
to UTF16 for use in system interfaces. This fixes the appearance in
Windows event logs and consoles of translated messages from SQL_ASCII
processes like the postmaster. Note that SQL_ASCII inherently disclaims
a strong notion of encoding, so non-ASCII byte sequences interpolated
into messages by %s may yet yield a nonsensical message. MULE_INTERNAL
has similar problems at present, albeit for a different reason: its lack
of libiconv support or a conversion to UTF8.
Consequently, one need no longer restart Windows with a different
Windows ANSI code page to broadly test backend logging under a given
language. Changing the user's locale ("Format") is enough. Several
accounts can simultaneously run postmasters under different locales, all
correctly logging localized messages to Windows event logs and consoles.
Alexander Law and Noah Misch
13 years ago
|
|
|
* This wraps the libc function setlocale(), with two additions. First, when
|
|
|
|
|
* changing LC_CTYPE, update gettext's encoding for the current message
|
|
|
|
|
* domain. GNU gettext automatically tracks LC_CTYPE on most platforms, but
|
|
|
|
|
* not on Windows. Second, if the operation is successful, the corresponding
|
|
|
|
|
* LC_XXX environment variable is set to match. By setting the environment
|
|
|
|
|
* variable, we ensure that any subsequent use of setlocale(..., "") will
|
|
|
|
|
* preserve the settings made through this routine. Of course, LC_ALL must
|
|
|
|
|
* also be unset to fully ensure that, but that has to be done elsewhere after
|
|
|
|
|
* all the individual LC_XXX variables have been set correctly. (Thank you
|
|
|
|
|
* Perl for making this kluge necessary.)
|
|
|
|
|
*/
|
|
|
|
|
char *
|
|
|
|
|
pg_perm_setlocale(int category, const char *locale)
|
|
|
|
|
{
|
|
|
|
|
char *result;
|
|
|
|
|
const char *envvar;
|
|
|
|
|
|
|
|
|
|
#ifndef WIN32
|
|
|
|
|
result = setlocale(category, locale);
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* On Windows, setlocale(LC_MESSAGES) does not work, so just assume that
|
|
|
|
|
* the given value is good and set it in the environment variables. We
|
|
|
|
|
* must ignore attempts to set to "", which means "keep using the old
|
|
|
|
|
* environment value".
|
|
|
|
|
*/
|
|
|
|
|
#ifdef LC_MESSAGES
|
|
|
|
|
if (category == LC_MESSAGES)
|
|
|
|
|
{
|
|
|
|
|
result = (char *) locale;
|
|
|
|
|
if (locale == NULL || locale[0] == '\0')
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
result = setlocale(category, locale);
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
9 years ago
|
|
|
#endif /* WIN32 */
|
|
|
|
|
|
|
|
|
|
if (result == NULL)
|
|
|
|
|
return result; /* fall out immediately on failure */
|
|
|
|
|
|
Renovate display of non-ASCII messages on Windows.
GNU gettext selects a default encoding for the messages it emits in a
platform-specific manner; it uses the Windows ANSI code page on Windows
and follows LC_CTYPE on other platforms. This is inconvenient for
PostgreSQL server processes, so realize consistent cross-platform
behavior by calling bind_textdomain_codeset() on Windows each time we
permanently change LC_CTYPE. This primarily affects SQL_ASCII databases
and processes like the postmaster that do not attach to a database,
making their behavior consistent with PostgreSQL on non-Windows
platforms. Messages from SQL_ASCII databases use the encoding implied
by the database LC_CTYPE, and messages from non-database processes use
LC_CTYPE from the postmaster system environment. PlatformEncoding
becomes unused, so remove it.
Make write_console() prefer WriteConsoleW() to write() regardless of the
encodings in use. In this situation, write() will invariably mishandle
non-ASCII characters.
elog.c has assumed that messages conform to the database encoding.
While usually true, this does not hold for SQL_ASCII and MULE_INTERNAL.
Introduce MessageEncoding to track the actual encoding of message text.
The present consumers are Windows-specific code for converting messages
to UTF16 for use in system interfaces. This fixes the appearance in
Windows event logs and consoles of translated messages from SQL_ASCII
processes like the postmaster. Note that SQL_ASCII inherently disclaims
a strong notion of encoding, so non-ASCII byte sequences interpolated
into messages by %s may yet yield a nonsensical message. MULE_INTERNAL
has similar problems at present, albeit for a different reason: its lack
of libiconv support or a conversion to UTF8.
Consequently, one need no longer restart Windows with a different
Windows ANSI code page to broadly test backend logging under a given
language. Changing the user's locale ("Format") is enough. Several
accounts can simultaneously run postmasters under different locales, all
correctly logging localized messages to Windows event logs and consoles.
Alexander Law and Noah Misch
13 years ago
|
|
|
/*
|
|
|
|
|
* Use the right encoding in translated messages. Under ENABLE_NLS, let
|
|
|
|
|
* pg_bind_textdomain_codeset() figure it out. Under !ENABLE_NLS, message
|
|
|
|
|
* format strings are ASCII, but database-encoding strings may enter the
|
|
|
|
|
* message via %s. This makes the overall message encoding equal to the
|
|
|
|
|
* database encoding.
|
|
|
|
|
*/
|
|
|
|
|
if (category == LC_CTYPE)
|
|
|
|
|
{
|
|
|
|
|
static char save_lc_ctype[LOCALE_NAME_BUFLEN];
|
Fix failure to copy setlocale() return value.
POSIX permits setlocale() calls to invalidate any previous setlocale()
return values, but commit 5f538ad004aa00cf0881f179f0cde789aad4f47e
neglected to account for setlocale(LC_CTYPE, NULL) doing so. The effect
was to set the LC_CTYPE environment variable to an unintended value.
pg_perm_setlocale() sets this variable to assist PL/Perl; without it,
Perl would undo PostgreSQL's locale settings. The known-affected
configurations are 32-bit, release builds using Visual Studio 2012 or
Visual Studio 2013. Visual Studio 2010 is unaffected, as were all
buildfarm-attested configurations. In principle, this bug could leave
the wrong LC_CTYPE in effect after PL/Perl use, which could in turn
facilitate problems like corrupt tsvector datums. No known platform
experiences that consequence, because PL/Perl on Windows does not use
this environment variable.
The bug has been user-visible, as early postmaster failure, on systems
with Windows ANSI code page set to CP936 for "Chinese (Simplified, PRC)"
and probably on systems using other multibyte code pages.
(SetEnvironmentVariable() rejects values containing character data not
valid under the Windows ANSI code page.) Back-patch to 9.4, where the
faulty commit first appeared.
Reported by Didi Hu and 林鹏程. Reviewed by Tom Lane, though this fix
strategy was not his first choice.
11 years ago
|
|
|
|
|
|
|
|
/* copy setlocale() return value before callee invokes it again */
|
|
|
|
|
strlcpy(save_lc_ctype, result, sizeof(save_lc_ctype));
|
|
|
|
|
result = save_lc_ctype;
|
|
|
|
|
|
Renovate display of non-ASCII messages on Windows.
GNU gettext selects a default encoding for the messages it emits in a
platform-specific manner; it uses the Windows ANSI code page on Windows
and follows LC_CTYPE on other platforms. This is inconvenient for
PostgreSQL server processes, so realize consistent cross-platform
behavior by calling bind_textdomain_codeset() on Windows each time we
permanently change LC_CTYPE. This primarily affects SQL_ASCII databases
and processes like the postmaster that do not attach to a database,
making their behavior consistent with PostgreSQL on non-Windows
platforms. Messages from SQL_ASCII databases use the encoding implied
by the database LC_CTYPE, and messages from non-database processes use
LC_CTYPE from the postmaster system environment. PlatformEncoding
becomes unused, so remove it.
Make write_console() prefer WriteConsoleW() to write() regardless of the
encodings in use. In this situation, write() will invariably mishandle
non-ASCII characters.
elog.c has assumed that messages conform to the database encoding.
While usually true, this does not hold for SQL_ASCII and MULE_INTERNAL.
Introduce MessageEncoding to track the actual encoding of message text.
The present consumers are Windows-specific code for converting messages
to UTF16 for use in system interfaces. This fixes the appearance in
Windows event logs and consoles of translated messages from SQL_ASCII
processes like the postmaster. Note that SQL_ASCII inherently disclaims
a strong notion of encoding, so non-ASCII byte sequences interpolated
into messages by %s may yet yield a nonsensical message. MULE_INTERNAL
has similar problems at present, albeit for a different reason: its lack
of libiconv support or a conversion to UTF8.
Consequently, one need no longer restart Windows with a different
Windows ANSI code page to broadly test backend logging under a given
language. Changing the user's locale ("Format") is enough. Several
accounts can simultaneously run postmasters under different locales, all
correctly logging localized messages to Windows event logs and consoles.
Alexander Law and Noah Misch
13 years ago
|
|
|
#ifdef ENABLE_NLS
|
|
|
|
|
SetMessageEncoding(pg_bind_textdomain_codeset(textdomain(NULL)));
|
|
|
|
|
#else
|
|
|
|
|
SetMessageEncoding(GetDatabaseEncoding());
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (category)
|
|
|
|
|
{
|
|
|
|
|
case LC_COLLATE:
|
|
|
|
|
envvar = "LC_COLLATE";
|
|
|
|
|
break;
|
|
|
|
|
case LC_CTYPE:
|
|
|
|
|
envvar = "LC_CTYPE";
|
|
|
|
|
break;
|
|
|
|
|
#ifdef LC_MESSAGES
|
|
|
|
|
case LC_MESSAGES:
|
|
|
|
|
envvar = "LC_MESSAGES";
|
|
|
|
|
#ifdef WIN32
|
|
|
|
|
result = IsoLocaleName(locale);
|
|
|
|
|
if (result == NULL)
|
|
|
|
|
result = (char *) locale;
|
|
|
|
|
elog(DEBUG3, "IsoLocaleName() executed; locale: \"%s\"", result);
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
9 years ago
|
|
|
#endif /* WIN32 */
|
|
|
|
|
break;
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
9 years ago
|
|
|
#endif /* LC_MESSAGES */
|
|
|
|
|
case LC_MONETARY:
|
|
|
|
|
envvar = "LC_MONETARY";
|
|
|
|
|
break;
|
|
|
|
|
case LC_NUMERIC:
|
|
|
|
|
envvar = "LC_NUMERIC";
|
|
|
|
|
break;
|
|
|
|
|
case LC_TIME:
|
|
|
|
|
envvar = "LC_TIME";
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
elog(FATAL, "unrecognized LC category: %d", category);
|
Use setenv() in preference to putenv().
Since at least 2001 we've used putenv() and avoided setenv(), on the
grounds that the latter was unportable and not in POSIX. However,
POSIX added it that same year, and by now the situation has reversed:
setenv() is probably more portable than putenv(), since POSIX now
treats the latter as not being a core function. And setenv() has
cleaner semantics too. So, let's reverse that old policy.
This commit adds a simple src/port/ implementation of setenv() for
any stragglers (we have one in the buildfarm, but I'd not be surprised
if that code is never used in the field). More importantly, extend
win32env.c to also support setenv(). Then, replace usages of putenv()
with setenv(), and get rid of some ad-hoc implementations of setenv()
wannabees.
Also, adjust our src/port/ implementation of unsetenv() to follow the
POSIX spec that it returns an error indicator, rather than returning
void as per the ancient BSD convention. I don't feel a need to make
all the call sites check for errors, but the portability stub ought
to match real-world practice.
Discussion: https://postgr.es/m/2065122.1609212051@sss.pgh.pa.us
5 years ago
|
|
|
return NULL; /* keep compiler quiet */
|
|
|
|
|
}
|
|
|
|
|
|
Use setenv() in preference to putenv().
Since at least 2001 we've used putenv() and avoided setenv(), on the
grounds that the latter was unportable and not in POSIX. However,
POSIX added it that same year, and by now the situation has reversed:
setenv() is probably more portable than putenv(), since POSIX now
treats the latter as not being a core function. And setenv() has
cleaner semantics too. So, let's reverse that old policy.
This commit adds a simple src/port/ implementation of setenv() for
any stragglers (we have one in the buildfarm, but I'd not be surprised
if that code is never used in the field). More importantly, extend
win32env.c to also support setenv(). Then, replace usages of putenv()
with setenv(), and get rid of some ad-hoc implementations of setenv()
wannabees.
Also, adjust our src/port/ implementation of unsetenv() to follow the
POSIX spec that it returns an error indicator, rather than returning
void as per the ancient BSD convention. I don't feel a need to make
all the call sites check for errors, but the portability stub ought
to match real-world practice.
Discussion: https://postgr.es/m/2065122.1609212051@sss.pgh.pa.us
5 years ago
|
|
|
if (setenv(envvar, result, 1) != 0)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Is the locale name valid for the locale category?
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
*
|
|
|
|
|
* If successful, and canonname isn't NULL, a palloc'd copy of the locale's
|
|
|
|
|
* canonical name is stored there. This is especially useful for figuring out
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
* what locale name "" means (ie, the server environment value). (Actually,
|
|
|
|
|
* it seems that on most implementations that's the only thing it's good for;
|
|
|
|
|
* we could wish that setlocale gave back a canonically spelled version of
|
|
|
|
|
* the locale name, but typically it doesn't.)
|
|
|
|
|
*/
|
|
|
|
|
bool
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
check_locale(int category, const char *locale, char **canonname)
|
|
|
|
|
{
|
|
|
|
|
char *save;
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
char *res;
|
|
|
|
|
|
Reject non-ASCII locale names.
Commit bf03cfd1 started scanning all available BCP 47 locale names on
Windows. This caused an abort/crash in the Windows runtime library if
the default locale name contained non-ASCII characters, because of our
use of the setlocale() save/restore pattern with "char" strings. After
switching to another locale with a different encoding, the saved name
could no longer be understood, and setlocale() would abort.
"Turkish_Türkiye.1254" is the example from recent reports, but there are
other examples of countries and languages with non-ASCII characters in
their names, and they appear in Windows' (old style) locale names.
To defend against this:
1. In initdb, reject non-ASCII locale names given explicity on the
command line, or returned by the operating system environment with
setlocale(..., ""), or "canonicalized" by the operating system when we
set it.
2. In initdb only, perform the save-and-restore with Windows'
non-standard wchar_t variant of setlocale(), so that it is not subject
to round trip failures stemming from char string encoding confusion.
3. In the backend, we don't have to worry about the save-and-restore
problem because we have already vetted the defaults, so we just have to
make sure that CREATE DATABASE also rejects non-ASCII names in any new
databases. SET lc_XXX doesn't suffer from the problem, but the ban
applies to it too because it uses check_locale(). CREATE COLLATION
doesn't suffer from the problem either, but it doesn't use
check_locale() so it is not included in the new ban for now, to minimize
the change.
Anyone who encounters the new error message should either create a new
duplicated locale with an ASCII-only name using Windows Locale Builder,
or consider using BCP 47 names like "tr-TR". Users already couldn't
initialize a cluster with "Turkish_Türkiye.1254" on PostgreSQL 16+, but
the new failure mode is an error message that explains why, instead of a
crash.
Back-patch to 16, where bf03cfd1 landed. Older versions are affected
in theory too, but only 16 and later are causing crash reports.
Reviewed-by: Andrew Dunstan <andrew@dunslane.net> (the idea, not the patch)
Reported-by: Haifang Wang (Centific Technologies Inc) <v-haiwang@microsoft.com>
Discussion: https://postgr.es/m/PH8PR21MB3902F334A3174C54058F792CE5182%40PH8PR21MB3902.namprd21.prod.outlook.com
1 year ago
|
|
|
/* Don't let Windows' non-ASCII locale names in. */
|
|
|
|
|
if (!pg_is_ascii(locale))
|
|
|
|
|
{
|
|
|
|
|
ereport(WARNING,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
|
errmsg("locale name \"%s\" contains non-ASCII characters",
|
|
|
|
|
locale)));
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
if (canonname)
|
|
|
|
|
*canonname = NULL; /* in case of failure */
|
|
|
|
|
|
|
|
|
|
save = setlocale(category, NULL);
|
|
|
|
|
if (!save)
|
|
|
|
|
return false; /* won't happen, we hope */
|
|
|
|
|
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
/* save may be pointing at a modifiable scratch variable, see above. */
|
|
|
|
|
save = pstrdup(save);
|
|
|
|
|
|
|
|
|
|
/* set the locale with setlocale, to see if it accepts it. */
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
res = setlocale(category, locale);
|
|
|
|
|
|
|
|
|
|
/* save canonical name if requested. */
|
|
|
|
|
if (res && canonname)
|
|
|
|
|
*canonname = pstrdup(res);
|
|
|
|
|
|
setlocale() on Windows doesn't work correctly if the locale name contains
dots. I previously worked around this in initdb, mapping the known
problematic locale names to aliases that work, but Hiroshi Inoue pointed
out that that's not enough because even if you use one of the aliases, like
"Chinese_HKG", setlocale(LC_CTYPE, NULL) returns back the long form, ie.
"Chinese_Hong Kong S.A.R.". When we try to restore an old locale value by
passing that value back to setlocale(), it fails. Note that you are affected
by this bug also if you use one of those short-form names manually, so just
reverting the hack in initdb won't fix it.
To work around that, move the locale name mapping from initdb to a wrapper
around setlocale(), so that the mapping is invoked on every setlocale() call.
Also, add a few checks for failed setlocale() calls in the backend. These
calls shouldn't fail, and if they do there isn't much we can do about it,
but at least you'll get a warning.
Backpatch to 9.1, where the initdb hack was introduced. The Windows bug
affects older versions too if you set locale manually to one of the aliases,
but given the lack of complaints from the field, I'm hesitent to backpatch.
15 years ago
|
|
|
/* restore old value. */
|
|
|
|
|
if (!setlocale(category, save))
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
elog(WARNING, "failed to restore old locale \"%s\"", save);
|
|
|
|
|
pfree(save);
|
|
|
|
|
|
Reject non-ASCII locale names.
Commit bf03cfd1 started scanning all available BCP 47 locale names on
Windows. This caused an abort/crash in the Windows runtime library if
the default locale name contained non-ASCII characters, because of our
use of the setlocale() save/restore pattern with "char" strings. After
switching to another locale with a different encoding, the saved name
could no longer be understood, and setlocale() would abort.
"Turkish_Türkiye.1254" is the example from recent reports, but there are
other examples of countries and languages with non-ASCII characters in
their names, and they appear in Windows' (old style) locale names.
To defend against this:
1. In initdb, reject non-ASCII locale names given explicity on the
command line, or returned by the operating system environment with
setlocale(..., ""), or "canonicalized" by the operating system when we
set it.
2. In initdb only, perform the save-and-restore with Windows'
non-standard wchar_t variant of setlocale(), so that it is not subject
to round trip failures stemming from char string encoding confusion.
3. In the backend, we don't have to worry about the save-and-restore
problem because we have already vetted the defaults, so we just have to
make sure that CREATE DATABASE also rejects non-ASCII names in any new
databases. SET lc_XXX doesn't suffer from the problem, but the ban
applies to it too because it uses check_locale(). CREATE COLLATION
doesn't suffer from the problem either, but it doesn't use
check_locale() so it is not included in the new ban for now, to minimize
the change.
Anyone who encounters the new error message should either create a new
duplicated locale with an ASCII-only name using Windows Locale Builder,
or consider using BCP 47 names like "tr-TR". Users already couldn't
initialize a cluster with "Turkish_Türkiye.1254" on PostgreSQL 16+, but
the new failure mode is an error message that explains why, instead of a
crash.
Back-patch to 16, where bf03cfd1 landed. Older versions are affected
in theory too, but only 16 and later are causing crash reports.
Reviewed-by: Andrew Dunstan <andrew@dunslane.net> (the idea, not the patch)
Reported-by: Haifang Wang (Centific Technologies Inc) <v-haiwang@microsoft.com>
Discussion: https://postgr.es/m/PH8PR21MB3902F334A3174C54058F792CE5182%40PH8PR21MB3902.namprd21.prod.outlook.com
1 year ago
|
|
|
/* Don't let Windows' non-ASCII locale names out. */
|
|
|
|
|
if (canonname && *canonname && !pg_is_ascii(*canonname))
|
|
|
|
|
{
|
|
|
|
|
ereport(WARNING,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
|
errmsg("locale name \"%s\" contains non-ASCII characters",
|
|
|
|
|
*canonname)));
|
|
|
|
|
pfree(*canonname);
|
|
|
|
|
*canonname = NULL;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
return (res != NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* GUC check/assign hooks
|
|
|
|
|
*
|
|
|
|
|
* For most locale categories, the assign hook doesn't actually set the locale
|
|
|
|
|
* permanently, just reset flags so that the next use will cache the
|
|
|
|
|
* appropriate values. (See explanation at the top of this file.)
|
|
|
|
|
*
|
|
|
|
|
* Note: we accept value = "" as selecting the postmaster's environment
|
|
|
|
|
* value, whatever it was (so long as the environment setting is legal).
|
|
|
|
|
* This will have been locked down by an earlier call to pg_perm_setlocale.
|
|
|
|
|
*/
|
|
|
|
|
bool
|
|
|
|
|
check_locale_monetary(char **newval, void **extra, GucSource source)
|
|
|
|
|
{
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
return check_locale(LC_MONETARY, *newval, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
assign_locale_monetary(const char *newval, void *extra)
|
|
|
|
|
{
|
|
|
|
|
CurrentLocaleConvValid = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
|
check_locale_numeric(char **newval, void **extra, GucSource source)
|
|
|
|
|
{
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
return check_locale(LC_NUMERIC, *newval, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
assign_locale_numeric(const char *newval, void *extra)
|
|
|
|
|
{
|
|
|
|
|
CurrentLocaleConvValid = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
|
check_locale_time(char **newval, void **extra, GucSource source)
|
|
|
|
|
{
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
return check_locale(LC_TIME, *newval, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
assign_locale_time(const char *newval, void *extra)
|
|
|
|
|
{
|
|
|
|
|
CurrentLCTimeValid = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We allow LC_MESSAGES to actually be set globally.
|
|
|
|
|
*
|
|
|
|
|
* Note: we normally disallow value = "" because it wouldn't have consistent
|
|
|
|
|
* semantics (it'd effectively just use the previous value). However, this
|
|
|
|
|
* is the value passed for PGC_S_DEFAULT, so don't complain in that case,
|
|
|
|
|
* not even if the attempted setting fails due to invalid environment value.
|
|
|
|
|
* The idea there is just to accept the environment setting *if possible*
|
|
|
|
|
* during startup, until we can read the proper value from postgresql.conf.
|
|
|
|
|
*/
|
|
|
|
|
bool
|
|
|
|
|
check_locale_messages(char **newval, void **extra, GucSource source)
|
|
|
|
|
{
|
|
|
|
|
if (**newval == '\0')
|
|
|
|
|
{
|
|
|
|
|
if (source == PGC_S_DEFAULT)
|
|
|
|
|
return true;
|
|
|
|
|
else
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* LC_MESSAGES category does not exist everywhere, but accept it anyway
|
|
|
|
|
*
|
|
|
|
|
* On Windows, we can't even check the value, so accept blindly
|
|
|
|
|
*/
|
|
|
|
|
#if defined(LC_MESSAGES) && !defined(WIN32)
|
Replace empty locale name with implied value in CREATE DATABASE and initdb.
setlocale() accepts locale name "" as meaning "the locale specified by the
process's environment variables". Historically we've accepted that for
Postgres' locale settings, too. However, it's fairly unsafe to store an
empty string in a new database's pg_database.datcollate or datctype fields,
because then the interpretation could vary across postmaster restarts,
possibly resulting in index corruption and other unpleasantness.
Instead, we should expand "" to whatever it means at the moment of calling
CREATE DATABASE, which we can do by saving the value returned by
setlocale().
For consistency, make initdb set up the initial lc_xxx parameter values the
same way. initdb was already doing the right thing for empty locale names,
but it did not replace non-empty names with setlocale results. On a
platform where setlocale chooses to canonicalize the spellings of locale
names, this would result in annoying inconsistency. (It seems that popular
implementations of setlocale don't do such canonicalization, which is a
pity, but the POSIX spec certainly allows it to be done.) The same risk
of inconsistency leads me to not venture back-patching this, although it
could certainly be seen as a longstanding bug.
Per report from Jeff Davis, though this is not his proposed patch.
14 years ago
|
|
|
return check_locale(LC_MESSAGES, *newval, NULL);
|
|
|
|
|
#else
|
|
|
|
|
return true;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
assign_locale_messages(const char *newval, void *extra)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* LC_MESSAGES category does not exist everywhere, but accept it anyway.
|
|
|
|
|
* We ignore failure, as per comment above.
|
|
|
|
|
*/
|
|
|
|
|
#ifdef LC_MESSAGES
|
|
|
|
|
(void) pg_perm_setlocale(LC_MESSAGES, newval);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Frees the malloced content of a struct lconv. (But not the struct
|
|
|
|
|
* itself.) It's important that this not throw elog(ERROR).
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
free_struct_lconv(struct lconv *s)
|
|
|
|
|
{
|
|
|
|
|
free(s->decimal_point);
|
|
|
|
|
free(s->thousands_sep);
|
|
|
|
|
free(s->grouping);
|
|
|
|
|
free(s->int_curr_symbol);
|
|
|
|
|
free(s->currency_symbol);
|
|
|
|
|
free(s->mon_decimal_point);
|
|
|
|
|
free(s->mon_thousands_sep);
|
|
|
|
|
free(s->mon_grouping);
|
|
|
|
|
free(s->positive_sign);
|
|
|
|
|
free(s->negative_sign);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Check that all fields of a struct lconv (or at least, the ones we care
|
|
|
|
|
* about) are non-NULL. The field list must match free_struct_lconv().
|
|
|
|
|
*/
|
|
|
|
|
static bool
|
|
|
|
|
struct_lconv_is_valid(struct lconv *s)
|
|
|
|
|
{
|
|
|
|
|
if (s->decimal_point == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
if (s->thousands_sep == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
if (s->grouping == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
if (s->int_curr_symbol == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
if (s->currency_symbol == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
if (s->mon_decimal_point == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
if (s->mon_thousands_sep == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
if (s->mon_grouping == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
if (s->positive_sign == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
if (s->negative_sign == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Convert the strdup'd string at *str from the specified encoding to the
|
|
|
|
|
* database encoding.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
db_encoding_convert(int encoding, char **str)
|
|
|
|
|
{
|
|
|
|
|
char *pstr;
|
|
|
|
|
char *mstr;
|
|
|
|
|
|
|
|
|
|
/* convert the string to the database encoding */
|
|
|
|
|
pstr = pg_any_to_server(*str, strlen(*str), encoding);
|
|
|
|
|
if (pstr == *str)
|
|
|
|
|
return; /* no conversion happened */
|
|
|
|
|
|
|
|
|
|
/* need it malloc'd not palloc'd */
|
|
|
|
|
mstr = strdup(pstr);
|
|
|
|
|
if (mstr == NULL)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_OUT_OF_MEMORY),
|
|
|
|
|
errmsg("out of memory")));
|
|
|
|
|
|
|
|
|
|
/* replace old string */
|
|
|
|
|
free(*str);
|
|
|
|
|
*str = mstr;
|
|
|
|
|
|
|
|
|
|
pfree(pstr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Return the POSIX lconv struct (contains number/money formatting
|
|
|
|
|
* information) with locale information for all categories.
|
|
|
|
|
*/
|
|
|
|
|
struct lconv *
|
|
|
|
|
PGLC_localeconv(void)
|
|
|
|
|
{
|
|
|
|
|
static struct lconv CurrentLocaleConv;
|
|
|
|
|
static bool CurrentLocaleConvAllocated = false;
|
|
|
|
|
struct lconv *extlconv;
|
Provide thread-safe pg_localeconv_r().
This involves four different implementation strategies:
1. For Windows, we now require _configthreadlocale() to be available
and work (commit f1da075d9a0), and the documentation says that the
object returned by localeconv() is in thread-local memory.
2. For glibc, we translate to nl_langinfo_l() calls, because it
offers the same information that way as an extension, and that API is
thread-safe.
3. For macOS/*BSD, use localeconv_l(), which is thread-safe.
4. For everything else, use uselocale() to set the locale for the
thread, and use a big ugly lock to defend against the returned object
being concurrently clobbered. In practice this currently means only
Solaris.
The new call is used in pg_locale.c, replacing calls to setlocale() and
localeconv().
Author: Thomas Munro <thomas.munro@gmail.com>
Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Reviewed-by: Peter Eisentraut <peter@eisentraut.org>
Discussion: https://postgr.es/m/CA%2BhUKGJqVe0%2BPv9dvC9dSums_PXxGo9SWcxYAMBguWJUGbWz-A%40mail.gmail.com
9 months ago
|
|
|
struct lconv tmp;
|
|
|
|
|
struct lconv worklconv = {0};
|
|
|
|
|
|
|
|
|
|
/* Did we do it already? */
|
|
|
|
|
if (CurrentLocaleConvValid)
|
|
|
|
|
return &CurrentLocaleConv;
|
|
|
|
|
|
|
|
|
|
/* Free any already-allocated storage */
|
|
|
|
|
if (CurrentLocaleConvAllocated)
|
|
|
|
|
{
|
|
|
|
|
free_struct_lconv(&CurrentLocaleConv);
|
|
|
|
|
CurrentLocaleConvAllocated = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
Provide thread-safe pg_localeconv_r().
This involves four different implementation strategies:
1. For Windows, we now require _configthreadlocale() to be available
and work (commit f1da075d9a0), and the documentation says that the
object returned by localeconv() is in thread-local memory.
2. For glibc, we translate to nl_langinfo_l() calls, because it
offers the same information that way as an extension, and that API is
thread-safe.
3. For macOS/*BSD, use localeconv_l(), which is thread-safe.
4. For everything else, use uselocale() to set the locale for the
thread, and use a big ugly lock to defend against the returned object
being concurrently clobbered. In practice this currently means only
Solaris.
The new call is used in pg_locale.c, replacing calls to setlocale() and
localeconv().
Author: Thomas Munro <thomas.munro@gmail.com>
Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Reviewed-by: Peter Eisentraut <peter@eisentraut.org>
Discussion: https://postgr.es/m/CA%2BhUKGJqVe0%2BPv9dvC9dSums_PXxGo9SWcxYAMBguWJUGbWz-A%40mail.gmail.com
9 months ago
|
|
|
* Use thread-safe method of obtaining a copy of lconv from the operating
|
|
|
|
|
* system.
|
|
|
|
|
*/
|
Provide thread-safe pg_localeconv_r().
This involves four different implementation strategies:
1. For Windows, we now require _configthreadlocale() to be available
and work (commit f1da075d9a0), and the documentation says that the
object returned by localeconv() is in thread-local memory.
2. For glibc, we translate to nl_langinfo_l() calls, because it
offers the same information that way as an extension, and that API is
thread-safe.
3. For macOS/*BSD, use localeconv_l(), which is thread-safe.
4. For everything else, use uselocale() to set the locale for the
thread, and use a big ugly lock to defend against the returned object
being concurrently clobbered. In practice this currently means only
Solaris.
The new call is used in pg_locale.c, replacing calls to setlocale() and
localeconv().
Author: Thomas Munro <thomas.munro@gmail.com>
Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Reviewed-by: Peter Eisentraut <peter@eisentraut.org>
Discussion: https://postgr.es/m/CA%2BhUKGJqVe0%2BPv9dvC9dSums_PXxGo9SWcxYAMBguWJUGbWz-A%40mail.gmail.com
9 months ago
|
|
|
if (pg_localeconv_r(locale_monetary,
|
|
|
|
|
locale_numeric,
|
|
|
|
|
&tmp) != 0)
|
|
|
|
|
elog(ERROR,
|
|
|
|
|
"could not get lconv for LC_MONETARY = \"%s\", LC_NUMERIC = \"%s\": %m",
|
|
|
|
|
locale_monetary, locale_numeric);
|
|
|
|
|
|
|
|
|
|
/* Must copy data now so we can re-encode it. */
|
Provide thread-safe pg_localeconv_r().
This involves four different implementation strategies:
1. For Windows, we now require _configthreadlocale() to be available
and work (commit f1da075d9a0), and the documentation says that the
object returned by localeconv() is in thread-local memory.
2. For glibc, we translate to nl_langinfo_l() calls, because it
offers the same information that way as an extension, and that API is
thread-safe.
3. For macOS/*BSD, use localeconv_l(), which is thread-safe.
4. For everything else, use uselocale() to set the locale for the
thread, and use a big ugly lock to defend against the returned object
being concurrently clobbered. In practice this currently means only
Solaris.
The new call is used in pg_locale.c, replacing calls to setlocale() and
localeconv().
Author: Thomas Munro <thomas.munro@gmail.com>
Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Reviewed-by: Peter Eisentraut <peter@eisentraut.org>
Discussion: https://postgr.es/m/CA%2BhUKGJqVe0%2BPv9dvC9dSums_PXxGo9SWcxYAMBguWJUGbWz-A%40mail.gmail.com
9 months ago
|
|
|
extlconv = &tmp;
|
|
|
|
|
worklconv.decimal_point = strdup(extlconv->decimal_point);
|
|
|
|
|
worklconv.thousands_sep = strdup(extlconv->thousands_sep);
|
|
|
|
|
worklconv.grouping = strdup(extlconv->grouping);
|
|
|
|
|
worklconv.int_curr_symbol = strdup(extlconv->int_curr_symbol);
|
|
|
|
|
worklconv.currency_symbol = strdup(extlconv->currency_symbol);
|
|
|
|
|
worklconv.mon_decimal_point = strdup(extlconv->mon_decimal_point);
|
|
|
|
|
worklconv.mon_thousands_sep = strdup(extlconv->mon_thousands_sep);
|
|
|
|
|
worklconv.mon_grouping = strdup(extlconv->mon_grouping);
|
|
|
|
|
worklconv.positive_sign = strdup(extlconv->positive_sign);
|
|
|
|
|
worklconv.negative_sign = strdup(extlconv->negative_sign);
|
|
|
|
|
/* Copy scalar fields as well */
|
|
|
|
|
worklconv.int_frac_digits = extlconv->int_frac_digits;
|
|
|
|
|
worklconv.frac_digits = extlconv->frac_digits;
|
|
|
|
|
worklconv.p_cs_precedes = extlconv->p_cs_precedes;
|
|
|
|
|
worklconv.p_sep_by_space = extlconv->p_sep_by_space;
|
|
|
|
|
worklconv.n_cs_precedes = extlconv->n_cs_precedes;
|
|
|
|
|
worklconv.n_sep_by_space = extlconv->n_sep_by_space;
|
|
|
|
|
worklconv.p_sign_posn = extlconv->p_sign_posn;
|
|
|
|
|
worklconv.n_sign_posn = extlconv->n_sign_posn;
|
|
|
|
|
|
Provide thread-safe pg_localeconv_r().
This involves four different implementation strategies:
1. For Windows, we now require _configthreadlocale() to be available
and work (commit f1da075d9a0), and the documentation says that the
object returned by localeconv() is in thread-local memory.
2. For glibc, we translate to nl_langinfo_l() calls, because it
offers the same information that way as an extension, and that API is
thread-safe.
3. For macOS/*BSD, use localeconv_l(), which is thread-safe.
4. For everything else, use uselocale() to set the locale for the
thread, and use a big ugly lock to defend against the returned object
being concurrently clobbered. In practice this currently means only
Solaris.
The new call is used in pg_locale.c, replacing calls to setlocale() and
localeconv().
Author: Thomas Munro <thomas.munro@gmail.com>
Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Reviewed-by: Peter Eisentraut <peter@eisentraut.org>
Discussion: https://postgr.es/m/CA%2BhUKGJqVe0%2BPv9dvC9dSums_PXxGo9SWcxYAMBguWJUGbWz-A%40mail.gmail.com
9 months ago
|
|
|
/* Free the contents of the object populated by pg_localeconv_r(). */
|
|
|
|
|
pg_localeconv_free(&tmp);
|
|
|
|
|
|
|
|
|
|
/* If any of the preceding strdup calls failed, complain now. */
|
|
|
|
|
if (!struct_lconv_is_valid(&worklconv))
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_OUT_OF_MEMORY),
|
|
|
|
|
errmsg("out of memory")));
|
|
|
|
|
|
|
|
|
|
PG_TRY();
|
|
|
|
|
{
|
|
|
|
|
int encoding;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Now we must perform encoding conversion from whatever's associated
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
* with the locales into the database encoding. If we can't identify
|
|
|
|
|
* the encoding implied by LC_NUMERIC or LC_MONETARY (ie we get -1),
|
|
|
|
|
* use PG_SQL_ASCII, which will result in just validating that the
|
|
|
|
|
* strings are OK in the database encoding.
|
|
|
|
|
*/
|
|
|
|
|
encoding = pg_get_encoding_from_locale(locale_numeric, true);
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
if (encoding < 0)
|
|
|
|
|
encoding = PG_SQL_ASCII;
|
|
|
|
|
|
|
|
|
|
db_encoding_convert(encoding, &worklconv.decimal_point);
|
|
|
|
|
db_encoding_convert(encoding, &worklconv.thousands_sep);
|
|
|
|
|
/* grouping is not text and does not require conversion */
|
|
|
|
|
|
|
|
|
|
encoding = pg_get_encoding_from_locale(locale_monetary, true);
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
if (encoding < 0)
|
|
|
|
|
encoding = PG_SQL_ASCII;
|
|
|
|
|
|
|
|
|
|
db_encoding_convert(encoding, &worklconv.int_curr_symbol);
|
|
|
|
|
db_encoding_convert(encoding, &worklconv.currency_symbol);
|
|
|
|
|
db_encoding_convert(encoding, &worklconv.mon_decimal_point);
|
|
|
|
|
db_encoding_convert(encoding, &worklconv.mon_thousands_sep);
|
|
|
|
|
/* mon_grouping is not text and does not require conversion */
|
|
|
|
|
db_encoding_convert(encoding, &worklconv.positive_sign);
|
|
|
|
|
db_encoding_convert(encoding, &worklconv.negative_sign);
|
|
|
|
|
}
|
|
|
|
|
PG_CATCH();
|
|
|
|
|
{
|
|
|
|
|
free_struct_lconv(&worklconv);
|
|
|
|
|
PG_RE_THROW();
|
|
|
|
|
}
|
|
|
|
|
PG_END_TRY();
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Everything is good, so save the results.
|
|
|
|
|
*/
|
|
|
|
|
CurrentLocaleConv = worklconv;
|
|
|
|
|
CurrentLocaleConvAllocated = true;
|
|
|
|
|
CurrentLocaleConvValid = true;
|
|
|
|
|
return &CurrentLocaleConv;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef WIN32
|
|
|
|
|
/*
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
* On Windows, strftime() returns its output in encoding CP_ACP (the default
|
|
|
|
|
* operating system codepage for the computer), which is likely different
|
|
|
|
|
* from SERVER_ENCODING. This is especially important in Japanese versions
|
|
|
|
|
* of Windows which will use SJIS encoding, which we don't support as a
|
|
|
|
|
* server encoding.
|
|
|
|
|
*
|
|
|
|
|
* So, instead of using strftime(), use wcsftime() to return the value in
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
* wide characters (internally UTF16) and then convert to UTF8, which we
|
|
|
|
|
* know how to handle directly.
|
|
|
|
|
*
|
|
|
|
|
* Note that this only affects the calls to strftime() in this file, which are
|
|
|
|
|
* used to get the locale-aware strings. Other parts of the backend use
|
|
|
|
|
* pg_strftime(), which isn't locale-aware and does not need to be replaced.
|
|
|
|
|
*/
|
|
|
|
|
static size_t
|
|
|
|
|
strftime_l_win32(char *dst, size_t dstlen,
|
|
|
|
|
const char *format, const struct tm *tm, locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
size_t len;
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
wchar_t wformat[8]; /* formats used below need 3 chars */
|
|
|
|
|
wchar_t wbuf[MAX_L10N_DATA];
|
|
|
|
|
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
/*
|
|
|
|
|
* Get a wchar_t version of the format string. We only actually use
|
|
|
|
|
* plain-ASCII formats in this file, so we can say that they're UTF8.
|
|
|
|
|
*/
|
|
|
|
|
len = MultiByteToWideChar(CP_UTF8, 0, format, -1,
|
|
|
|
|
wformat, lengthof(wformat));
|
|
|
|
|
if (len == 0)
|
|
|
|
|
elog(ERROR, "could not convert format string from UTF-8: error code %lu",
|
|
|
|
|
GetLastError());
|
|
|
|
|
|
|
|
|
|
len = _wcsftime_l(wbuf, MAX_L10N_DATA, wformat, tm, locale);
|
|
|
|
|
if (len == 0)
|
|
|
|
|
{
|
|
|
|
|
/*
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
* wcsftime failed, possibly because the result would not fit in
|
|
|
|
|
* MAX_L10N_DATA. Return 0 with the contents of dst unspecified.
|
|
|
|
|
*/
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
len = WideCharToMultiByte(CP_UTF8, 0, wbuf, len, dst, dstlen - 1,
|
|
|
|
|
NULL, NULL);
|
|
|
|
|
if (len == 0)
|
|
|
|
|
elog(ERROR, "could not convert string to UTF-8: error code %lu",
|
|
|
|
|
GetLastError());
|
|
|
|
|
|
|
|
|
|
dst[len] = '\0';
|
|
|
|
|
|
|
|
|
|
return len;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* redefine strftime_l() */
|
|
|
|
|
#define strftime_l(a,b,c,d,e) strftime_l_win32(a,b,c,d,e)
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
9 years ago
|
|
|
#endif /* WIN32 */
|
|
|
|
|
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
/*
|
|
|
|
|
* Subroutine for cache_locale_time().
|
|
|
|
|
* Convert the given string from encoding "encoding" to the database
|
|
|
|
|
* encoding, and store the result at *dst, replacing any previous value.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
cache_single_string(char **dst, const char *src, int encoding)
|
|
|
|
|
{
|
|
|
|
|
char *ptr;
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
char *olddst;
|
|
|
|
|
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
/* Convert the string to the database encoding, or validate it's OK */
|
|
|
|
|
ptr = pg_any_to_server(src, strlen(src), encoding);
|
|
|
|
|
|
|
|
|
|
/* Store the string in long-lived storage, replacing any previous value */
|
|
|
|
|
olddst = *dst;
|
|
|
|
|
*dst = MemoryContextStrdup(TopMemoryContext, ptr);
|
|
|
|
|
if (olddst)
|
|
|
|
|
pfree(olddst);
|
|
|
|
|
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
/* Might as well clean up any palloc'd conversion result, too */
|
|
|
|
|
if (ptr != src)
|
|
|
|
|
pfree(ptr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Update the lc_time localization cache variables if needed.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
cache_locale_time(void)
|
|
|
|
|
{
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
char buf[(2 * 7 + 2 * 12) * MAX_L10N_DATA];
|
|
|
|
|
char *bufptr;
|
|
|
|
|
time_t timenow;
|
|
|
|
|
struct tm *timeinfo;
|
thread-safety: gmtime_r(), localtime_r()
Use gmtime_r() and localtime_r() instead of gmtime() and localtime(),
for thread-safety.
There are a few affected calls in libpq and ecpg's libpgtypes, which
are probably effectively bugs, because those libraries already claim
to be thread-safe.
There is one affected call in the backend. Most of the backend
otherwise uses the custom functions pg_gmtime() and pg_localtime(),
which are implemented differently.
While we're here, change the call in the backend to gmtime*() instead
of localtime*(), since for that use time zone behavior is irrelevant,
and this side-steps any questions about when time zones are
initialized by localtime_r() vs localtime().
Portability: gmtime_r() and localtime_r() are in POSIX but are not
available on Windows. Windows has functions gmtime_s() and
localtime_s() that can fulfill the same purpose, so we add some small
wrappers around them. (Note that these *_s() functions are also
different from the *_s() functions in the bounds-checking extension of
C11. We are not using those here.)
On MinGW, you can get the POSIX-style *_r() functions by defining
_POSIX_C_SOURCE appropriately before including <time.h>. This leads
to a conflict at least in plpython because apparently _POSIX_C_SOURCE
gets defined in some header there, and then our replacement
definitions conflict with the system definitions. To avoid that sort
of thing, we now always define _POSIX_C_SOURCE on MinGW and use the
POSIX-style functions here.
Reviewed-by: Stepan Neretin <sncfmgg@gmail.com>
Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Reviewed-by: Thomas Munro <thomas.munro@gmail.com>
Discussion: https://www.postgresql.org/message-id/flat/eba1dc75-298e-4c46-8869-48ba8aad7d70@eisentraut.org
1 year ago
|
|
|
struct tm timeinfobuf;
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
bool strftimefail = false;
|
|
|
|
|
int encoding;
|
|
|
|
|
int i;
|
|
|
|
|
locale_t locale;
|
|
|
|
|
|
|
|
|
|
/* did we do this already? */
|
|
|
|
|
if (CurrentLCTimeValid)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
elog(DEBUG3, "cache_locale_time() executed; locale: \"%s\"", locale_time);
|
|
|
|
|
|
|
|
|
|
errno = ENOENT;
|
|
|
|
|
#ifdef WIN32
|
|
|
|
|
locale = _create_locale(LC_ALL, locale_time);
|
|
|
|
|
if (locale == (locale_t) 0)
|
|
|
|
|
_dosmaperr(GetLastError());
|
|
|
|
|
#else
|
|
|
|
|
locale = newlocale(LC_ALL_MASK, locale_time, (locale_t) 0);
|
|
|
|
|
#endif
|
|
|
|
|
if (!locale)
|
|
|
|
|
report_newlocale_failure(locale_time);
|
|
|
|
|
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
/* We use times close to current time as data for strftime(). */
|
|
|
|
|
timenow = time(NULL);
|
thread-safety: gmtime_r(), localtime_r()
Use gmtime_r() and localtime_r() instead of gmtime() and localtime(),
for thread-safety.
There are a few affected calls in libpq and ecpg's libpgtypes, which
are probably effectively bugs, because those libraries already claim
to be thread-safe.
There is one affected call in the backend. Most of the backend
otherwise uses the custom functions pg_gmtime() and pg_localtime(),
which are implemented differently.
While we're here, change the call in the backend to gmtime*() instead
of localtime*(), since for that use time zone behavior is irrelevant,
and this side-steps any questions about when time zones are
initialized by localtime_r() vs localtime().
Portability: gmtime_r() and localtime_r() are in POSIX but are not
available on Windows. Windows has functions gmtime_s() and
localtime_s() that can fulfill the same purpose, so we add some small
wrappers around them. (Note that these *_s() functions are also
different from the *_s() functions in the bounds-checking extension of
C11. We are not using those here.)
On MinGW, you can get the POSIX-style *_r() functions by defining
_POSIX_C_SOURCE appropriately before including <time.h>. This leads
to a conflict at least in plpython because apparently _POSIX_C_SOURCE
gets defined in some header there, and then our replacement
definitions conflict with the system definitions. To avoid that sort
of thing, we now always define _POSIX_C_SOURCE on MinGW and use the
POSIX-style functions here.
Reviewed-by: Stepan Neretin <sncfmgg@gmail.com>
Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Reviewed-by: Thomas Munro <thomas.munro@gmail.com>
Discussion: https://www.postgresql.org/message-id/flat/eba1dc75-298e-4c46-8869-48ba8aad7d70@eisentraut.org
1 year ago
|
|
|
timeinfo = gmtime_r(&timenow, &timeinfobuf);
|
|
|
|
|
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
/* Store the strftime results in MAX_L10N_DATA-sized portions of buf[] */
|
|
|
|
|
bufptr = buf;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* MAX_L10N_DATA is sufficient buffer space for every known locale, and
|
|
|
|
|
* POSIX defines no strftime() errors. (Buffer space exhaustion is not an
|
|
|
|
|
* error.) An implementation might report errors (e.g. ENOMEM) by
|
|
|
|
|
* returning 0 (or, less plausibly, a negative value) and setting errno.
|
|
|
|
|
* Report errno just in case the implementation did that, but clear it in
|
|
|
|
|
* advance of the calls so we don't emit a stale, unrelated errno.
|
|
|
|
|
*/
|
|
|
|
|
errno = 0;
|
|
|
|
|
|
|
|
|
|
/* localized days */
|
|
|
|
|
for (i = 0; i < 7; i++)
|
|
|
|
|
{
|
|
|
|
|
timeinfo->tm_wday = i;
|
|
|
|
|
if (strftime_l(bufptr, MAX_L10N_DATA, "%a", timeinfo, locale) <= 0)
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
strftimefail = true;
|
|
|
|
|
bufptr += MAX_L10N_DATA;
|
|
|
|
|
if (strftime_l(bufptr, MAX_L10N_DATA, "%A", timeinfo, locale) <= 0)
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
strftimefail = true;
|
|
|
|
|
bufptr += MAX_L10N_DATA;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* localized months */
|
|
|
|
|
for (i = 0; i < 12; i++)
|
|
|
|
|
{
|
|
|
|
|
timeinfo->tm_mon = i;
|
|
|
|
|
timeinfo->tm_mday = 1; /* make sure we don't have invalid date */
|
|
|
|
|
if (strftime_l(bufptr, MAX_L10N_DATA, "%b", timeinfo, locale) <= 0)
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
strftimefail = true;
|
|
|
|
|
bufptr += MAX_L10N_DATA;
|
|
|
|
|
if (strftime_l(bufptr, MAX_L10N_DATA, "%B", timeinfo, locale) <= 0)
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
strftimefail = true;
|
|
|
|
|
bufptr += MAX_L10N_DATA;
|
|
|
|
|
}
|
|
|
|
|
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
#ifdef WIN32
|
|
|
|
|
_free_locale(locale);
|
|
|
|
|
#else
|
|
|
|
|
freelocale(locale);
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* At this point we've done our best to clean up, and can throw errors, or
|
|
|
|
|
* call functions that might throw errors, with a clean conscience.
|
|
|
|
|
*/
|
|
|
|
|
if (strftimefail)
|
|
|
|
|
elog(ERROR, "strftime_l() failed");
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
|
|
|
|
|
#ifndef WIN32
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* As in PGLC_localeconv(), we must convert strftime()'s output from the
|
|
|
|
|
* encoding implied by LC_TIME to the database encoding. If we can't
|
|
|
|
|
* identify the LC_TIME encoding, just perform encoding validation.
|
|
|
|
|
*/
|
|
|
|
|
encoding = pg_get_encoding_from_locale(locale_time, true);
|
|
|
|
|
if (encoding < 0)
|
|
|
|
|
encoding = PG_SQL_ASCII;
|
|
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* On Windows, strftime_win32() always returns UTF8 data, so convert from
|
|
|
|
|
* that if necessary.
|
|
|
|
|
*/
|
|
|
|
|
encoding = PG_UTF8;
|
|
|
|
|
|
|
|
|
|
#endif /* WIN32 */
|
|
|
|
|
|
|
|
|
|
bufptr = buf;
|
|
|
|
|
|
|
|
|
|
/* localized days */
|
|
|
|
|
for (i = 0; i < 7; i++)
|
|
|
|
|
{
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
cache_single_string(&localized_abbrev_days[i], bufptr, encoding);
|
|
|
|
|
bufptr += MAX_L10N_DATA;
|
|
|
|
|
cache_single_string(&localized_full_days[i], bufptr, encoding);
|
|
|
|
|
bufptr += MAX_L10N_DATA;
|
|
|
|
|
}
|
Allow to_date/to_timestamp to recognize non-English month/day names.
to_char() has long allowed the TM (translation mode) prefix to
specify output of translated month or day names; but that prefix
had no effect in input format strings. Now it does. to_date()
and to_timestamp() will now recognize the same month or day names
that to_char() would output for the same format code. Matching is
case-insensitive (per the active collation's notion of what that
means), just as it has always been for English month/day names
without the TM prefix.
(As per the discussion thread, there are lots of cases that this
feature will not handle, such as alternate day names. But being
able to accept what to_char() will output seems useful enough.)
In passing, fix some shaky English and violations of message
style guidelines in jsonpath errors for the .datetime() method,
which depends on this code.
Juan José Santamaría Flecha, reviewed and modified by me,
with other commentary from Alvaro Herrera, Tomas Vondra,
Arthur Zakirov, Peter Eisentraut, Mark Dilger.
Discussion: https://postgr.es/m/CAC+AXB3u1jTngJcoC1nAHBf=M3v-jrEfo86UFtCqCjzbWS9QhA@mail.gmail.com
6 years ago
|
|
|
localized_abbrev_days[7] = NULL;
|
|
|
|
|
localized_full_days[7] = NULL;
|
|
|
|
|
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
/* localized months */
|
|
|
|
|
for (i = 0; i < 12; i++)
|
|
|
|
|
{
|
Repair assorted issues in locale data extraction.
cache_locale_time (extraction of LC_TIME-related info) had never been
taught the lessons we previously learned about extraction of info related
to LC_MONETARY and LC_NUMERIC. Specifically, commit 95a777c61 taught
PGLC_localeconv() that data coming out of localeconv() was in an encoding
determined by the relevant locale, but we didn't realize that there's a
similar issue with strftime(). And commit a4930e7ca hardened
PGLC_localeconv() against errors occurring partway through, but failed
to do likewise for cache_locale_time(). So, rearrange the latter
function to perform encoding conversion and not risk failure while
it's got the locales set to temporary values.
This time around I also changed PGLC_localeconv() to treat it as FATAL
if it can't restore the previous settings of the locale values. There
is no reason (except possibly OOM) for that to fail, and proceeding with
the wrong locale values seems like a seriously bad idea --- especially
on Windows where we have to also temporarily change LC_CTYPE. Also,
protect against the possibility that we can't identify the codeset
reported for LC_MONETARY or LC_NUMERIC; rather than just failing,
try to validate the data without conversion.
The user-visible symptom this fixes is that if LC_TIME is set to a locale
name that implies an encoding different from the database encoding,
non-ASCII localized day and month names would be retrieved in the wrong
encoding, leading to either unexpected encoding-conversion error reports
or wrong output from to_char(). The other possible failure modes are
unlikely enough that we've not seen reports of them, AFAIK.
The encoding conversion problems do not manifest on Windows, since
we'd already created special-case code to handle that issue there.
Per report from Juan José Santamaría Flecha. Back-patch to all
supported versions.
Juan José Santamaría Flecha and Tom Lane
Discussion: https://postgr.es/m/CAC+AXB22So5aZm2vZe+MChYXec7gWfr-n-SK-iO091R0P_1Tew@mail.gmail.com
7 years ago
|
|
|
cache_single_string(&localized_abbrev_months[i], bufptr, encoding);
|
|
|
|
|
bufptr += MAX_L10N_DATA;
|
|
|
|
|
cache_single_string(&localized_full_months[i], bufptr, encoding);
|
|
|
|
|
bufptr += MAX_L10N_DATA;
|
|
|
|
|
}
|
Allow to_date/to_timestamp to recognize non-English month/day names.
to_char() has long allowed the TM (translation mode) prefix to
specify output of translated month or day names; but that prefix
had no effect in input format strings. Now it does. to_date()
and to_timestamp() will now recognize the same month or day names
that to_char() would output for the same format code. Matching is
case-insensitive (per the active collation's notion of what that
means), just as it has always been for English month/day names
without the TM prefix.
(As per the discussion thread, there are lots of cases that this
feature will not handle, such as alternate day names. But being
able to accept what to_char() will output seems useful enough.)
In passing, fix some shaky English and violations of message
style guidelines in jsonpath errors for the .datetime() method,
which depends on this code.
Juan José Santamaría Flecha, reviewed and modified by me,
with other commentary from Alvaro Herrera, Tomas Vondra,
Arthur Zakirov, Peter Eisentraut, Mark Dilger.
Discussion: https://postgr.es/m/CAC+AXB3u1jTngJcoC1nAHBf=M3v-jrEfo86UFtCqCjzbWS9QhA@mail.gmail.com
6 years ago
|
|
|
localized_abbrev_months[12] = NULL;
|
|
|
|
|
localized_full_months[12] = NULL;
|
|
|
|
|
|
|
|
|
|
CurrentLCTimeValid = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(WIN32) && defined(LC_MESSAGES)
|
|
|
|
|
/*
|
|
|
|
|
* Convert a Windows setlocale() argument to a Unix-style one.
|
|
|
|
|
*
|
|
|
|
|
* Regardless of platform, we install message catalogs under a Unix-style
|
|
|
|
|
* LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
|
|
|
|
|
* following that style will elicit localized interface strings.
|
|
|
|
|
*
|
|
|
|
|
* Before Visual Studio 2012 (msvcr110.dll), Windows setlocale() accepted "C"
|
|
|
|
|
* (but not "c") and strings of the form <Language>[_<Country>][.<CodePage>],
|
|
|
|
|
* case-insensitive. setlocale() returns the fully-qualified form; for
|
|
|
|
|
* example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
|
|
|
|
|
* setlocale() and _create_locale() select a "locale identifier"[1] and store
|
|
|
|
|
* it in an undocumented _locale_t field. From that LCID, we can retrieve the
|
|
|
|
|
* ISO 639 language and the ISO 3166 country. Character encoding does not
|
|
|
|
|
* matter, because the server and client encodings govern that.
|
|
|
|
|
*
|
|
|
|
|
* Windows Vista introduced the "locale name" concept[2], closely following
|
|
|
|
|
* RFC 4646. Locale identifiers are now deprecated. Starting with Visual
|
|
|
|
|
* Studio 2012, setlocale() accepts locale names in addition to the strings it
|
|
|
|
|
* accepted historically. It does not standardize them; setlocale("Th-tH")
|
|
|
|
|
* returns "Th-tH". setlocale(category, "") still returns a traditional
|
|
|
|
|
* string. Furthermore, msvcr110.dll changed the undocumented _locale_t
|
|
|
|
|
* content to carry locale names instead of locale identifiers.
|
|
|
|
|
*
|
|
|
|
|
* Visual Studio 2015 should still be able to do the same as Visual Studio
|
|
|
|
|
* 2012, but the declaration of locale_name is missing in _locale_t, causing
|
|
|
|
|
* this code compilation to fail, hence this falls back instead on to
|
|
|
|
|
* enumerating all system locales by using EnumSystemLocalesEx to find the
|
|
|
|
|
* required locale name. If the input argument is in Unix-style then we can
|
|
|
|
|
* get ISO Locale name directly by using GetLocaleInfoEx() with LCType as
|
|
|
|
|
* LOCALE_SNAME.
|
|
|
|
|
*
|
|
|
|
|
* This function returns a pointer to a static buffer bearing the converted
|
|
|
|
|
* name or NULL if conversion fails.
|
|
|
|
|
*
|
|
|
|
|
* [1] https://docs.microsoft.com/en-us/windows/win32/intl/locale-identifiers
|
|
|
|
|
* [2] https://docs.microsoft.com/en-us/windows/win32/intl/locale-names
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Callback function for EnumSystemLocalesEx() in get_iso_localename().
|
|
|
|
|
*
|
|
|
|
|
* This function enumerates all system locales, searching for one that matches
|
|
|
|
|
* an input with the format: <Language>[_<Country>], e.g.
|
|
|
|
|
* English[_United States]
|
|
|
|
|
*
|
|
|
|
|
* The input is a three wchar_t array as an LPARAM. The first element is the
|
|
|
|
|
* locale_name we want to match, the second element is an allocated buffer
|
|
|
|
|
* where the Unix-style locale is copied if a match is found, and the third
|
|
|
|
|
* element is the search status, 1 if a match was found, 0 otherwise.
|
|
|
|
|
*/
|
|
|
|
|
static BOOL CALLBACK
|
|
|
|
|
search_locale_enum(LPWSTR pStr, DWORD dwFlags, LPARAM lparam)
|
|
|
|
|
{
|
|
|
|
|
wchar_t test_locale[LOCALE_NAME_MAX_LENGTH];
|
|
|
|
|
wchar_t **argv;
|
|
|
|
|
|
|
|
|
|
(void) (dwFlags);
|
|
|
|
|
|
|
|
|
|
argv = (wchar_t **) lparam;
|
|
|
|
|
*argv[2] = (wchar_t) 0;
|
|
|
|
|
|
|
|
|
|
memset(test_locale, 0, sizeof(test_locale));
|
|
|
|
|
|
|
|
|
|
/* Get the name of the <Language> in English */
|
|
|
|
|
if (GetLocaleInfoEx(pStr, LOCALE_SENGLISHLANGUAGENAME,
|
|
|
|
|
test_locale, LOCALE_NAME_MAX_LENGTH))
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* If the enumerated locale does not have a hyphen ("en") OR the
|
|
|
|
|
* locale_name input does not have an underscore ("English"), we only
|
|
|
|
|
* need to compare the <Language> tags.
|
|
|
|
|
*/
|
|
|
|
|
if (wcsrchr(pStr, '-') == NULL || wcsrchr(argv[0], '_') == NULL)
|
|
|
|
|
{
|
|
|
|
|
if (_wcsicmp(argv[0], test_locale) == 0)
|
|
|
|
|
{
|
|
|
|
|
wcscpy(argv[1], pStr);
|
|
|
|
|
*argv[2] = (wchar_t) 1;
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We have to compare a full <Language>_<Country> tag, so we append
|
|
|
|
|
* the underscore and name of the country/region in English, e.g.
|
|
|
|
|
* "English_United States".
|
|
|
|
|
*/
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
|
|
wcscat(test_locale, L"_");
|
|
|
|
|
len = wcslen(test_locale);
|
|
|
|
|
if (GetLocaleInfoEx(pStr, LOCALE_SENGLISHCOUNTRYNAME,
|
|
|
|
|
test_locale + len,
|
|
|
|
|
LOCALE_NAME_MAX_LENGTH - len))
|
|
|
|
|
{
|
|
|
|
|
if (_wcsicmp(argv[0], test_locale) == 0)
|
|
|
|
|
{
|
|
|
|
|
wcscpy(argv[1], pStr);
|
|
|
|
|
*argv[2] = (wchar_t) 1;
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This function converts a Windows locale name to an ISO formatted version
|
|
|
|
|
* for Visual Studio 2015 or greater.
|
|
|
|
|
*
|
|
|
|
|
* Returns NULL, if no valid conversion was found.
|
|
|
|
|
*/
|
|
|
|
|
static char *
|
|
|
|
|
get_iso_localename(const char *winlocname)
|
|
|
|
|
{
|
|
|
|
|
wchar_t wc_locale_name[LOCALE_NAME_MAX_LENGTH];
|
|
|
|
|
wchar_t buffer[LOCALE_NAME_MAX_LENGTH];
|
|
|
|
|
static char iso_lc_messages[LOCALE_NAME_MAX_LENGTH];
|
|
|
|
|
char *period;
|
|
|
|
|
int len;
|
|
|
|
|
int ret_val;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Valid locales have the following syntax:
|
|
|
|
|
* <Language>[_<Country>[.<CodePage>]]
|
|
|
|
|
*
|
|
|
|
|
* GetLocaleInfoEx can only take locale name without code-page and for the
|
|
|
|
|
* purpose of this API the code-page doesn't matter.
|
|
|
|
|
*/
|
|
|
|
|
period = strchr(winlocname, '.');
|
|
|
|
|
if (period != NULL)
|
|
|
|
|
len = period - winlocname;
|
|
|
|
|
else
|
|
|
|
|
len = pg_mbstrlen(winlocname);
|
|
|
|
|
|
|
|
|
|
memset(wc_locale_name, 0, sizeof(wc_locale_name));
|
|
|
|
|
memset(buffer, 0, sizeof(buffer));
|
|
|
|
|
MultiByteToWideChar(CP_ACP, 0, winlocname, len, wc_locale_name,
|
|
|
|
|
LOCALE_NAME_MAX_LENGTH);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If the lc_messages is already a Unix-style string, we have a direct
|
|
|
|
|
* match with LOCALE_SNAME, e.g. en-US, en_US.
|
|
|
|
|
*/
|
|
|
|
|
ret_val = GetLocaleInfoEx(wc_locale_name, LOCALE_SNAME, (LPWSTR) &buffer,
|
|
|
|
|
LOCALE_NAME_MAX_LENGTH);
|
|
|
|
|
if (!ret_val)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* Search for a locale in the system that matches language and country
|
|
|
|
|
* name.
|
|
|
|
|
*/
|
|
|
|
|
wchar_t *argv[3];
|
|
|
|
|
|
|
|
|
|
argv[0] = wc_locale_name;
|
|
|
|
|
argv[1] = buffer;
|
|
|
|
|
argv[2] = (wchar_t *) &ret_val;
|
|
|
|
|
EnumSystemLocalesEx(search_locale_enum, LOCALE_WINDOWS, (LPARAM) argv,
|
|
|
|
|
NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ret_val)
|
|
|
|
|
{
|
|
|
|
|
size_t rc;
|
|
|
|
|
char *hyphen;
|
|
|
|
|
|
|
|
|
|
/* Locale names use only ASCII, any conversion locale suffices. */
|
|
|
|
|
rc = wchar2char(iso_lc_messages, buffer, sizeof(iso_lc_messages), NULL);
|
|
|
|
|
if (rc == -1 || rc == sizeof(iso_lc_messages))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Since the message catalogs sit on a case-insensitive filesystem, we
|
|
|
|
|
* need not standardize letter case here. So long as we do not ship
|
|
|
|
|
* message catalogs for which it would matter, we also need not
|
|
|
|
|
* translate the script/variant portion, e.g. uz-Cyrl-UZ to
|
|
|
|
|
* uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
|
|
|
|
|
*/
|
|
|
|
|
hyphen = strchr(iso_lc_messages, '-');
|
|
|
|
|
if (hyphen)
|
|
|
|
|
*hyphen = '_';
|
|
|
|
|
return iso_lc_messages;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static char *
|
|
|
|
|
IsoLocaleName(const char *winlocname)
|
|
|
|
|
{
|
|
|
|
|
static char iso_lc_messages[LOCALE_NAME_MAX_LENGTH];
|
|
|
|
|
|
|
|
|
|
if (pg_strcasecmp("c", winlocname) == 0 ||
|
|
|
|
|
pg_strcasecmp("posix", winlocname) == 0)
|
|
|
|
|
{
|
|
|
|
|
strcpy(iso_lc_messages, "C");
|
|
|
|
|
return iso_lc_messages;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
return get_iso_localename(winlocname);
|
|
|
|
|
}
|
|
|
|
|
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
9 years ago
|
|
|
#endif /* WIN32 && LC_MESSAGES */
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Create a new pg_locale_t struct for the given collation oid.
|
|
|
|
|
*/
|
|
|
|
|
static pg_locale_t
|
|
|
|
|
create_pg_locale(Oid collid, MemoryContext context)
|
|
|
|
|
{
|
|
|
|
|
HeapTuple tp;
|
|
|
|
|
Form_pg_collation collform;
|
|
|
|
|
pg_locale_t result;
|
|
|
|
|
Datum datum;
|
|
|
|
|
bool isnull;
|
|
|
|
|
|
|
|
|
|
tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
|
|
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
|
elog(ERROR, "cache lookup failed for collation %u", collid);
|
|
|
|
|
collform = (Form_pg_collation) GETSTRUCT(tp);
|
|
|
|
|
|
|
|
|
|
if (collform->collprovider == COLLPROVIDER_BUILTIN)
|
|
|
|
|
result = create_pg_locale_builtin(collid, context);
|
|
|
|
|
else if (collform->collprovider == COLLPROVIDER_ICU)
|
|
|
|
|
result = create_pg_locale_icu(collid, context);
|
|
|
|
|
else if (collform->collprovider == COLLPROVIDER_LIBC)
|
|
|
|
|
result = create_pg_locale_libc(collid, context);
|
|
|
|
|
else
|
|
|
|
|
/* shouldn't happen */
|
|
|
|
|
PGLOCALE_SUPPORT_ERROR(collform->collprovider);
|
|
|
|
|
|
|
|
|
|
result->is_default = false;
|
|
|
|
|
|
|
|
|
|
Assert((result->collate_is_c && result->collate == NULL) ||
|
|
|
|
|
(!result->collate_is_c && result->collate != NULL));
|
|
|
|
|
|
|
|
|
|
Assert((result->ctype_is_c && result->ctype == NULL) ||
|
|
|
|
|
(!result->ctype_is_c && result->ctype != NULL));
|
|
|
|
|
|
|
|
|
|
datum = SysCacheGetAttr(COLLOID, tp, Anum_pg_collation_collversion,
|
|
|
|
|
&isnull);
|
|
|
|
|
if (!isnull)
|
|
|
|
|
{
|
|
|
|
|
char *actual_versionstr;
|
|
|
|
|
char *collversionstr;
|
|
|
|
|
|
|
|
|
|
collversionstr = TextDatumGetCString(datum);
|
|
|
|
|
|
|
|
|
|
if (collform->collprovider == COLLPROVIDER_LIBC)
|
|
|
|
|
datum = SysCacheGetAttrNotNull(COLLOID, tp, Anum_pg_collation_collcollate);
|
|
|
|
|
else
|
|
|
|
|
datum = SysCacheGetAttrNotNull(COLLOID, tp, Anum_pg_collation_colllocale);
|
|
|
|
|
|
|
|
|
|
actual_versionstr = get_collation_actual_version(collform->collprovider,
|
|
|
|
|
TextDatumGetCString(datum));
|
|
|
|
|
if (!actual_versionstr)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* This could happen when specifying a version in CREATE COLLATION
|
|
|
|
|
* but the provider does not support versioning, or manually
|
|
|
|
|
* creating a mess in the catalogs.
|
|
|
|
|
*/
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errmsg("collation \"%s\" has no actual version, but a version was recorded",
|
|
|
|
|
NameStr(collform->collname))));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (strcmp(actual_versionstr, collversionstr) != 0)
|
|
|
|
|
ereport(WARNING,
|
|
|
|
|
(errmsg("collation \"%s\" has version mismatch",
|
|
|
|
|
NameStr(collform->collname)),
|
|
|
|
|
errdetail("The collation in the database was created using version %s, "
|
|
|
|
|
"but the operating system provides version %s.",
|
|
|
|
|
collversionstr, actual_versionstr),
|
|
|
|
|
errhint("Rebuild all objects affected by this collation and run "
|
|
|
|
|
"ALTER COLLATION %s REFRESH VERSION, "
|
|
|
|
|
"or build PostgreSQL with the right library version.",
|
|
|
|
|
quote_qualified_identifier(get_namespace_name(collform->collnamespace),
|
|
|
|
|
NameStr(collform->collname)))));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Initialize default_locale with database locale settings.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
init_database_collation(void)
|
|
|
|
|
{
|
|
|
|
|
HeapTuple tup;
|
|
|
|
|
Form_pg_database dbform;
|
|
|
|
|
pg_locale_t result;
|
|
|
|
|
|
|
|
|
|
Assert(default_locale == NULL);
|
|
|
|
|
|
|
|
|
|
/* Fetch our pg_database row normally, via syscache */
|
|
|
|
|
tup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId));
|
|
|
|
|
if (!HeapTupleIsValid(tup))
|
|
|
|
|
elog(ERROR, "cache lookup failed for database %u", MyDatabaseId);
|
|
|
|
|
dbform = (Form_pg_database) GETSTRUCT(tup);
|
|
|
|
|
|
|
|
|
|
if (dbform->datlocprovider == COLLPROVIDER_BUILTIN)
|
|
|
|
|
result = create_pg_locale_builtin(DEFAULT_COLLATION_OID,
|
|
|
|
|
TopMemoryContext);
|
|
|
|
|
else if (dbform->datlocprovider == COLLPROVIDER_ICU)
|
|
|
|
|
result = create_pg_locale_icu(DEFAULT_COLLATION_OID,
|
|
|
|
|
TopMemoryContext);
|
|
|
|
|
else if (dbform->datlocprovider == COLLPROVIDER_LIBC)
|
|
|
|
|
result = create_pg_locale_libc(DEFAULT_COLLATION_OID,
|
|
|
|
|
TopMemoryContext);
|
|
|
|
|
else
|
|
|
|
|
/* shouldn't happen */
|
|
|
|
|
PGLOCALE_SUPPORT_ERROR(dbform->datlocprovider);
|
|
|
|
|
|
|
|
|
|
result->is_default = true;
|
|
|
|
|
ReleaseSysCache(tup);
|
|
|
|
|
|
|
|
|
|
default_locale = result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Create a pg_locale_t from a collation OID. Results are cached for the
|
|
|
|
|
* lifetime of the backend. Thus, do not free the result with freelocale().
|
|
|
|
|
*
|
|
|
|
|
* For simplicity, we always generate COLLATE + CTYPE even though we
|
|
|
|
|
* might only need one of them. Since this is called only once per session,
|
|
|
|
|
* it shouldn't cost much.
|
|
|
|
|
*/
|
|
|
|
|
pg_locale_t
|
|
|
|
|
pg_newlocale_from_collation(Oid collid)
|
|
|
|
|
{
|
|
|
|
|
collation_cache_entry *cache_entry;
|
|
|
|
|
bool found;
|
|
|
|
|
|
|
|
|
|
if (collid == DEFAULT_COLLATION_OID)
|
|
|
|
|
return default_locale;
|
|
|
|
|
|
|
|
|
|
if (!OidIsValid(collid))
|
|
|
|
|
elog(ERROR, "cache lookup failed for collation %u", collid);
|
|
|
|
|
|
|
|
|
|
AssertCouldGetRelation();
|
|
|
|
|
|
|
|
|
|
if (last_collation_cache_oid == collid)
|
|
|
|
|
return last_collation_cache_locale;
|
|
|
|
|
|
|
|
|
|
if (CollationCache == NULL)
|
|
|
|
|
{
|
|
|
|
|
CollationCacheContext = AllocSetContextCreate(TopMemoryContext,
|
|
|
|
|
"collation cache",
|
|
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
|
|
|
|
CollationCache = collation_cache_create(CollationCacheContext,
|
|
|
|
|
16, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cache_entry = collation_cache_insert(CollationCache, collid, &found);
|
|
|
|
|
if (!found)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* Make sure cache entry is marked invalid, in case we fail before
|
|
|
|
|
* setting things.
|
|
|
|
|
*/
|
|
|
|
|
cache_entry->locale = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (cache_entry->locale == 0)
|
|
|
|
|
{
|
|
|
|
|
cache_entry->locale = create_pg_locale(collid, CollationCacheContext);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
last_collation_cache_oid = collid;
|
|
|
|
|
last_collation_cache_locale = cache_entry->locale;
|
|
|
|
|
|
|
|
|
|
return cache_entry->locale;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Get provider-specific collation version string for the given collation from
|
|
|
|
|
* the operating system/library.
|
|
|
|
|
*/
|
|
|
|
|
char *
|
|
|
|
|
get_collation_actual_version(char collprovider, const char *collcollate)
|
|
|
|
|
{
|
|
|
|
|
char *collversion = NULL;
|
|
|
|
|
|
Introduce "builtin" collation provider.
New provider for collations, like "libc" or "icu", but without any
external dependency.
Initially, the only locale supported by the builtin provider is "C",
which is identical to the libc provider's "C" locale. The libc
provider's "C" locale has always been treated as a special case that
uses an internal implementation, without using libc at all -- so the
new builtin provider uses the same implementation.
The builtin provider's locale is independent of the server environment
variables LC_COLLATE and LC_CTYPE. Using the builtin provider, the
database collation locale can be "C" while LC_COLLATE and LC_CTYPE are
set to "en_US", which is impossible with the libc provider.
By offering a new builtin provider, it clarifies that the semantics of
a collation using this provider will never depend on libc, and makes
it easier to document the behavior.
Discussion: https://postgr.es/m/ab925f69-5f9d-f85e-b87c-bd2a44798659@joeconway.com
Discussion: https://postgr.es/m/dd9261f4-7a98-4565-93ec-336c1c110d90@manitou-mail.org
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
if (collprovider == COLLPROVIDER_BUILTIN)
|
|
|
|
|
collversion = get_collation_actual_version_builtin(collcollate);
|
|
|
|
|
#ifdef USE_ICU
|
|
|
|
|
else if (collprovider == COLLPROVIDER_ICU)
|
|
|
|
|
collversion = get_collation_actual_version_icu(collcollate);
|
|
|
|
|
#endif
|
|
|
|
|
else if (collprovider == COLLPROVIDER_LIBC)
|
|
|
|
|
collversion = get_collation_actual_version_libc(collcollate);
|
|
|
|
|
|
|
|
|
|
return collversion;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t
|
|
|
|
|
pg_strlower(char *dst, size_t dstsize, const char *src, ssize_t srclen,
|
|
|
|
|
pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->ctype->strlower(dst, dstsize, src, srclen, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t
|
|
|
|
|
pg_strtitle(char *dst, size_t dstsize, const char *src, ssize_t srclen,
|
|
|
|
|
pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->ctype->strtitle(dst, dstsize, src, srclen, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t
|
|
|
|
|
pg_strupper(char *dst, size_t dstsize, const char *src, ssize_t srclen,
|
|
|
|
|
pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->ctype->strupper(dst, dstsize, src, srclen, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t
|
|
|
|
|
pg_strfold(char *dst, size_t dstsize, const char *src, ssize_t srclen,
|
|
|
|
|
pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
if (locale->ctype->strfold)
|
|
|
|
|
return locale->ctype->strfold(dst, dstsize, src, srclen, locale);
|
|
|
|
|
else
|
|
|
|
|
return locale->ctype->strlower(dst, dstsize, src, srclen, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* pg_strcoll
|
|
|
|
|
*
|
|
|
|
|
* Like pg_strncoll for NUL-terminated input strings.
|
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
pg_strcoll(const char *arg1, const char *arg2, pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->collate->strncoll(arg1, -1, arg2, -1, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* pg_strncoll
|
|
|
|
|
*
|
|
|
|
|
* Call ucol_strcollUTF8(), ucol_strcoll(), strcoll_l() or wcscoll_l() as
|
|
|
|
|
* appropriate for the given locale, platform, and database encoding. If the
|
|
|
|
|
* locale is not specified, use the database collation.
|
|
|
|
|
*
|
|
|
|
|
* The input strings must be encoded in the database encoding. If an input
|
|
|
|
|
* string is NUL-terminated, its length may be specified as -1.
|
|
|
|
|
*
|
|
|
|
|
* The caller is responsible for breaking ties if the collation is
|
|
|
|
|
* deterministic; this maintains consistency with pg_strnxfrm(), which cannot
|
|
|
|
|
* easily account for deterministic collations.
|
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
pg_strncoll(const char *arg1, ssize_t len1, const char *arg2, ssize_t len2,
|
|
|
|
|
pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->collate->strncoll(arg1, len1, arg2, len2, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Return true if the collation provider supports pg_strxfrm() and
|
|
|
|
|
* pg_strnxfrm(); otherwise false.
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* No similar problem is known for the ICU provider.
|
|
|
|
|
*/
|
|
|
|
|
bool
|
|
|
|
|
pg_strxfrm_enabled(pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* locale->collate->strnxfrm is still a required method, even if it may
|
|
|
|
|
* have the wrong behavior, because the planner uses it for estimates in
|
|
|
|
|
* some cases.
|
|
|
|
|
*/
|
|
|
|
|
return locale->collate->strxfrm_is_safe;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* pg_strxfrm
|
|
|
|
|
*
|
|
|
|
|
* Like pg_strnxfrm for a NUL-terminated input string.
|
|
|
|
|
*/
|
|
|
|
|
size_t
|
|
|
|
|
pg_strxfrm(char *dest, const char *src, size_t destsize, pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->collate->strnxfrm(dest, destsize, src, -1, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* pg_strnxfrm
|
|
|
|
|
*
|
|
|
|
|
* Transforms 'src' to a nul-terminated string stored in 'dest' such that
|
|
|
|
|
* ordinary strcmp() on transformed strings is equivalent to pg_strcoll() on
|
|
|
|
|
* untransformed strings.
|
|
|
|
|
*
|
|
|
|
|
* The input string must be encoded in the database encoding. If the input
|
|
|
|
|
* string is NUL-terminated, its length may be specified as -1. If 'destsize'
|
|
|
|
|
* is zero, 'dest' may be NULL.
|
|
|
|
|
*
|
|
|
|
|
* Not all providers support pg_strnxfrm() safely. The caller should check
|
|
|
|
|
* pg_strxfrm_enabled() first, otherwise this function may return wrong
|
|
|
|
|
* results or an error.
|
|
|
|
|
*
|
|
|
|
|
* Returns the number of bytes needed (or more) to store the transformed
|
|
|
|
|
* string, excluding the terminating nul byte. If the value returned is
|
|
|
|
|
* 'destsize' or greater, the resulting contents of 'dest' are undefined.
|
|
|
|
|
*/
|
|
|
|
|
size_t
|
|
|
|
|
pg_strnxfrm(char *dest, size_t destsize, const char *src, ssize_t srclen,
|
|
|
|
|
pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->collate->strnxfrm(dest, destsize, src, srclen, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Return true if the collation provider supports pg_strxfrm_prefix() and
|
|
|
|
|
* pg_strnxfrm_prefix(); otherwise false.
|
|
|
|
|
*/
|
|
|
|
|
bool
|
|
|
|
|
pg_strxfrm_prefix_enabled(pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return (locale->collate->strnxfrm_prefix != NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* pg_strxfrm_prefix
|
|
|
|
|
*
|
|
|
|
|
* Like pg_strnxfrm_prefix for a NUL-terminated input string.
|
|
|
|
|
*/
|
|
|
|
|
size_t
|
|
|
|
|
pg_strxfrm_prefix(char *dest, const char *src, size_t destsize,
|
|
|
|
|
pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->collate->strnxfrm_prefix(dest, destsize, src, -1, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* pg_strnxfrm_prefix
|
|
|
|
|
*
|
|
|
|
|
* Transforms 'src' to a byte sequence stored in 'dest' such that ordinary
|
|
|
|
|
* memcmp() on the byte sequence is equivalent to pg_strncoll() on
|
|
|
|
|
* untransformed strings. The result is not nul-terminated.
|
|
|
|
|
*
|
|
|
|
|
* The input string must be encoded in the database encoding. If the input
|
|
|
|
|
* string is NUL-terminated, its length may be specified as -1.
|
|
|
|
|
*
|
|
|
|
|
* Not all providers support pg_strnxfrm_prefix() safely. The caller should
|
|
|
|
|
* check pg_strxfrm_prefix_enabled() first, otherwise this function may return
|
|
|
|
|
* wrong results or an error.
|
|
|
|
|
*
|
|
|
|
|
* If destsize is not large enough to hold the resulting byte sequence, stores
|
|
|
|
|
* only the first destsize bytes in 'dest'. Returns the number of bytes
|
|
|
|
|
* actually copied to 'dest'.
|
|
|
|
|
*/
|
|
|
|
|
size_t
|
|
|
|
|
pg_strnxfrm_prefix(char *dest, size_t destsize, const char *src,
|
|
|
|
|
ssize_t srclen, pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->collate->strnxfrm_prefix(dest, destsize, src, srclen, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* char_is_cased()
|
|
|
|
|
*
|
|
|
|
|
* Fuzzy test of whether the given char is case-varying or not. The argument
|
|
|
|
|
* is a single byte, so in a multibyte encoding, just assume any non-ASCII
|
|
|
|
|
* char is case-varying.
|
|
|
|
|
*/
|
|
|
|
|
bool
|
|
|
|
|
char_is_cased(char ch, pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->ctype->char_is_cased(ch, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* char_tolower_enabled()
|
|
|
|
|
*
|
|
|
|
|
* Does the provider support char_tolower()?
|
|
|
|
|
*/
|
|
|
|
|
bool
|
|
|
|
|
char_tolower_enabled(pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return (locale->ctype->char_tolower != NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* char_tolower()
|
|
|
|
|
*
|
|
|
|
|
* Convert char (single-byte encoding) to lowercase.
|
|
|
|
|
*/
|
|
|
|
|
char
|
|
|
|
|
char_tolower(unsigned char ch, pg_locale_t locale)
|
|
|
|
|
{
|
|
|
|
|
return locale->ctype->char_tolower(ch, locale);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Return required encoding ID for the given locale, or -1 if any encoding is
|
|
|
|
|
* valid for the locale.
|
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
builtin_locale_encoding(const char *locale)
|
|
|
|
|
{
|
Support C.UTF-8 locale in the new builtin collation provider.
The builtin C.UTF-8 locale has similar semantics to the libc locale of
the same name. That is, code point sort order (fast, memcmp-based)
combined with Unicode semantics for character operations such as
pattern matching, regular expressions, and
LOWER()/INITCAP()/UPPER(). The character semantics are based on
Unicode simple case mappings.
The builtin provider's C.UTF-8 offers several important advantages
over libc:
* faster sorting -- benefits from additional optimizations such as
abbreviated keys and varstrfastcmp_c
* faster case conversion, e.g. LOWER(), at least compared with some
libc implementations
* available on all platforms with identical semantics, and the
semantics are stable, testable, and documentable within a given
Postgres major version
Being based on memcmp, the builtin C.UTF-8 locale does not offer
natural language sort order. But it is an improvement for most use
cases that might otherwise use libc's "C.UTF-8" locale, as well as
many use cases that use libc's "C" locale.
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
if (strcmp(locale, "C") == 0)
|
|
|
|
|
return -1;
|
Support PG_UNICODE_FAST locale in the builtin collation provider.
The PG_UNICODE_FAST locale uses code point sort order (fast,
memcmp-based) combined with Unicode character semantics. The character
semantics are based on Unicode full case mapping.
Full case mapping can map a single codepoint to multiple codepoints,
such as "ß" uppercasing to "SS". Additionally, it handles
context-sensitive mappings like the "final sigma", and it uses
titlecase mappings such as "Dž" when titlecasing (rather than plain
uppercase mappings).
Importantly, the uppercasing of "ß" as "SS" is specifically mentioned
by the SQL standard. In Postgres, UCS_BASIC uses plain ASCII semantics
for case mapping and pattern matching, so if we changed it to use the
PG_UNICODE_FAST locale, it would offer better compliance with the
standard. For now, though, do not change the behavior of UCS_BASIC.
Discussion: https://postgr.es/m/ddfd67928818f138f51635712529bc5e1d25e4e7.camel@j-davis.com
Discussion: https://postgr.es/m/27bb0e52-801d-4f73-a0a4-02cfdd4a9ada@eisentraut.org
Reviewed-by: Peter Eisentraut, Daniel Verite
11 months ago
|
|
|
else if (strcmp(locale, "C.UTF-8") == 0)
|
Support C.UTF-8 locale in the new builtin collation provider.
The builtin C.UTF-8 locale has similar semantics to the libc locale of
the same name. That is, code point sort order (fast, memcmp-based)
combined with Unicode semantics for character operations such as
pattern matching, regular expressions, and
LOWER()/INITCAP()/UPPER(). The character semantics are based on
Unicode simple case mappings.
The builtin provider's C.UTF-8 offers several important advantages
over libc:
* faster sorting -- benefits from additional optimizations such as
abbreviated keys and varstrfastcmp_c
* faster case conversion, e.g. LOWER(), at least compared with some
libc implementations
* available on all platforms with identical semantics, and the
semantics are stable, testable, and documentable within a given
Postgres major version
Being based on memcmp, the builtin C.UTF-8 locale does not offer
natural language sort order. But it is an improvement for most use
cases that might otherwise use libc's "C.UTF-8" locale, as well as
many use cases that use libc's "C" locale.
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
return PG_UTF8;
|
Support PG_UNICODE_FAST locale in the builtin collation provider.
The PG_UNICODE_FAST locale uses code point sort order (fast,
memcmp-based) combined with Unicode character semantics. The character
semantics are based on Unicode full case mapping.
Full case mapping can map a single codepoint to multiple codepoints,
such as "ß" uppercasing to "SS". Additionally, it handles
context-sensitive mappings like the "final sigma", and it uses
titlecase mappings such as "Dž" when titlecasing (rather than plain
uppercase mappings).
Importantly, the uppercasing of "ß" as "SS" is specifically mentioned
by the SQL standard. In Postgres, UCS_BASIC uses plain ASCII semantics
for case mapping and pattern matching, so if we changed it to use the
PG_UNICODE_FAST locale, it would offer better compliance with the
standard. For now, though, do not change the behavior of UCS_BASIC.
Discussion: https://postgr.es/m/ddfd67928818f138f51635712529bc5e1d25e4e7.camel@j-davis.com
Discussion: https://postgr.es/m/27bb0e52-801d-4f73-a0a4-02cfdd4a9ada@eisentraut.org
Reviewed-by: Peter Eisentraut, Daniel Verite
11 months ago
|
|
|
else if (strcmp(locale, "PG_UNICODE_FAST") == 0)
|
|
|
|
|
return PG_UTF8;
|
|
|
|
|
|
Support C.UTF-8 locale in the new builtin collation provider.
The builtin C.UTF-8 locale has similar semantics to the libc locale of
the same name. That is, code point sort order (fast, memcmp-based)
combined with Unicode semantics for character operations such as
pattern matching, regular expressions, and
LOWER()/INITCAP()/UPPER(). The character semantics are based on
Unicode simple case mappings.
The builtin provider's C.UTF-8 offers several important advantages
over libc:
* faster sorting -- benefits from additional optimizations such as
abbreviated keys and varstrfastcmp_c
* faster case conversion, e.g. LOWER(), at least compared with some
libc implementations
* available on all platforms with identical semantics, and the
semantics are stable, testable, and documentable within a given
Postgres major version
Being based on memcmp, the builtin C.UTF-8 locale does not offer
natural language sort order. But it is an improvement for most use
cases that might otherwise use libc's "C.UTF-8" locale, as well as
many use cases that use libc's "C" locale.
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
|
|
|
|
errmsg("invalid locale name \"%s\" for builtin provider",
|
|
|
|
|
locale)));
|
|
|
|
|
|
Support C.UTF-8 locale in the new builtin collation provider.
The builtin C.UTF-8 locale has similar semantics to the libc locale of
the same name. That is, code point sort order (fast, memcmp-based)
combined with Unicode semantics for character operations such as
pattern matching, regular expressions, and
LOWER()/INITCAP()/UPPER(). The character semantics are based on
Unicode simple case mappings.
The builtin provider's C.UTF-8 offers several important advantages
over libc:
* faster sorting -- benefits from additional optimizations such as
abbreviated keys and varstrfastcmp_c
* faster case conversion, e.g. LOWER(), at least compared with some
libc implementations
* available on all platforms with identical semantics, and the
semantics are stable, testable, and documentable within a given
Postgres major version
Being based on memcmp, the builtin C.UTF-8 locale does not offer
natural language sort order. But it is an improvement for most use
cases that might otherwise use libc's "C.UTF-8" locale, as well as
many use cases that use libc's "C" locale.
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
return 0; /* keep compiler quiet */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Validate the locale and encoding combination, and return the canonical form
|
|
|
|
|
* of the locale name.
|
|
|
|
|
*/
|
Introduce "builtin" collation provider.
New provider for collations, like "libc" or "icu", but without any
external dependency.
Initially, the only locale supported by the builtin provider is "C",
which is identical to the libc provider's "C" locale. The libc
provider's "C" locale has always been treated as a special case that
uses an internal implementation, without using libc at all -- so the
new builtin provider uses the same implementation.
The builtin provider's locale is independent of the server environment
variables LC_COLLATE and LC_CTYPE. Using the builtin provider, the
database collation locale can be "C" while LC_COLLATE and LC_CTYPE are
set to "en_US", which is impossible with the libc provider.
By offering a new builtin provider, it clarifies that the semantics of
a collation using this provider will never depend on libc, and makes
it easier to document the behavior.
Discussion: https://postgr.es/m/ab925f69-5f9d-f85e-b87c-bd2a44798659@joeconway.com
Discussion: https://postgr.es/m/dd9261f4-7a98-4565-93ec-336c1c110d90@manitou-mail.org
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
const char *
|
|
|
|
|
builtin_validate_locale(int encoding, const char *locale)
|
|
|
|
|
{
|
Support C.UTF-8 locale in the new builtin collation provider.
The builtin C.UTF-8 locale has similar semantics to the libc locale of
the same name. That is, code point sort order (fast, memcmp-based)
combined with Unicode semantics for character operations such as
pattern matching, regular expressions, and
LOWER()/INITCAP()/UPPER(). The character semantics are based on
Unicode simple case mappings.
The builtin provider's C.UTF-8 offers several important advantages
over libc:
* faster sorting -- benefits from additional optimizations such as
abbreviated keys and varstrfastcmp_c
* faster case conversion, e.g. LOWER(), at least compared with some
libc implementations
* available on all platforms with identical semantics, and the
semantics are stable, testable, and documentable within a given
Postgres major version
Being based on memcmp, the builtin C.UTF-8 locale does not offer
natural language sort order. But it is an improvement for most use
cases that might otherwise use libc's "C.UTF-8" locale, as well as
many use cases that use libc's "C" locale.
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
const char *canonical_name = NULL;
|
|
|
|
|
int required_encoding;
|
|
|
|
|
|
|
|
|
|
if (strcmp(locale, "C") == 0)
|
|
|
|
|
canonical_name = "C";
|
|
|
|
|
else if (strcmp(locale, "C.UTF-8") == 0 || strcmp(locale, "C.UTF8") == 0)
|
|
|
|
|
canonical_name = "C.UTF-8";
|
Support PG_UNICODE_FAST locale in the builtin collation provider.
The PG_UNICODE_FAST locale uses code point sort order (fast,
memcmp-based) combined with Unicode character semantics. The character
semantics are based on Unicode full case mapping.
Full case mapping can map a single codepoint to multiple codepoints,
such as "ß" uppercasing to "SS". Additionally, it handles
context-sensitive mappings like the "final sigma", and it uses
titlecase mappings such as "Dž" when titlecasing (rather than plain
uppercase mappings).
Importantly, the uppercasing of "ß" as "SS" is specifically mentioned
by the SQL standard. In Postgres, UCS_BASIC uses plain ASCII semantics
for case mapping and pattern matching, so if we changed it to use the
PG_UNICODE_FAST locale, it would offer better compliance with the
standard. For now, though, do not change the behavior of UCS_BASIC.
Discussion: https://postgr.es/m/ddfd67928818f138f51635712529bc5e1d25e4e7.camel@j-davis.com
Discussion: https://postgr.es/m/27bb0e52-801d-4f73-a0a4-02cfdd4a9ada@eisentraut.org
Reviewed-by: Peter Eisentraut, Daniel Verite
11 months ago
|
|
|
else if (strcmp(locale, "PG_UNICODE_FAST") == 0)
|
|
|
|
|
canonical_name = "PG_UNICODE_FAST";
|
Support C.UTF-8 locale in the new builtin collation provider.
The builtin C.UTF-8 locale has similar semantics to the libc locale of
the same name. That is, code point sort order (fast, memcmp-based)
combined with Unicode semantics for character operations such as
pattern matching, regular expressions, and
LOWER()/INITCAP()/UPPER(). The character semantics are based on
Unicode simple case mappings.
The builtin provider's C.UTF-8 offers several important advantages
over libc:
* faster sorting -- benefits from additional optimizations such as
abbreviated keys and varstrfastcmp_c
* faster case conversion, e.g. LOWER(), at least compared with some
libc implementations
* available on all platforms with identical semantics, and the
semantics are stable, testable, and documentable within a given
Postgres major version
Being based on memcmp, the builtin C.UTF-8 locale does not offer
natural language sort order. But it is an improvement for most use
cases that might otherwise use libc's "C.UTF-8" locale, as well as
many use cases that use libc's "C" locale.
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
|
|
|
|
|
if (!canonical_name)
|
Introduce "builtin" collation provider.
New provider for collations, like "libc" or "icu", but without any
external dependency.
Initially, the only locale supported by the builtin provider is "C",
which is identical to the libc provider's "C" locale. The libc
provider's "C" locale has always been treated as a special case that
uses an internal implementation, without using libc at all -- so the
new builtin provider uses the same implementation.
The builtin provider's locale is independent of the server environment
variables LC_COLLATE and LC_CTYPE. Using the builtin provider, the
database collation locale can be "C" while LC_COLLATE and LC_CTYPE are
set to "en_US", which is impossible with the libc provider.
By offering a new builtin provider, it clarifies that the semantics of
a collation using this provider will never depend on libc, and makes
it easier to document the behavior.
Discussion: https://postgr.es/m/ab925f69-5f9d-f85e-b87c-bd2a44798659@joeconway.com
Discussion: https://postgr.es/m/dd9261f4-7a98-4565-93ec-336c1c110d90@manitou-mail.org
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
|
|
|
|
errmsg("invalid locale name \"%s\" for builtin provider",
|
|
|
|
|
locale)));
|
|
|
|
|
|
Support C.UTF-8 locale in the new builtin collation provider.
The builtin C.UTF-8 locale has similar semantics to the libc locale of
the same name. That is, code point sort order (fast, memcmp-based)
combined with Unicode semantics for character operations such as
pattern matching, regular expressions, and
LOWER()/INITCAP()/UPPER(). The character semantics are based on
Unicode simple case mappings.
The builtin provider's C.UTF-8 offers several important advantages
over libc:
* faster sorting -- benefits from additional optimizations such as
abbreviated keys and varstrfastcmp_c
* faster case conversion, e.g. LOWER(), at least compared with some
libc implementations
* available on all platforms with identical semantics, and the
semantics are stable, testable, and documentable within a given
Postgres major version
Being based on memcmp, the builtin C.UTF-8 locale does not offer
natural language sort order. But it is an improvement for most use
cases that might otherwise use libc's "C.UTF-8" locale, as well as
many use cases that use libc's "C" locale.
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
required_encoding = builtin_locale_encoding(canonical_name);
|
|
|
|
|
if (required_encoding >= 0 && encoding != required_encoding)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
|
|
|
|
errmsg("encoding \"%s\" does not match locale \"%s\"",
|
|
|
|
|
pg_encoding_to_char(encoding), locale)));
|
|
|
|
|
|
|
|
|
|
return canonical_name;
|
Introduce "builtin" collation provider.
New provider for collations, like "libc" or "icu", but without any
external dependency.
Initially, the only locale supported by the builtin provider is "C",
which is identical to the libc provider's "C" locale. The libc
provider's "C" locale has always been treated as a special case that
uses an internal implementation, without using libc at all -- so the
new builtin provider uses the same implementation.
The builtin provider's locale is independent of the server environment
variables LC_COLLATE and LC_CTYPE. Using the builtin provider, the
database collation locale can be "C" while LC_COLLATE and LC_CTYPE are
set to "en_US", which is impossible with the libc provider.
By offering a new builtin provider, it clarifies that the semantics of
a collation using this provider will never depend on libc, and makes
it easier to document the behavior.
Discussion: https://postgr.es/m/ab925f69-5f9d-f85e-b87c-bd2a44798659@joeconway.com
Discussion: https://postgr.es/m/dd9261f4-7a98-4565-93ec-336c1c110d90@manitou-mail.org
Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Vérité, Peter Eisentraut, Jeremy Schneider
2 years ago
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Canonicalize ICU locale names to language tags.
Convert to BCP47 language tags before storing in the catalog, except
during binary upgrade or when the locale comes from an existing
collation or template database.
The resulting language tags can vary slightly between ICU
versions. For instance, "@colBackwards=yes" is converted to
"und-u-kb-true" in older versions of ICU, and to the simpler (but
equivalent) "und-u-kb" in newer versions.
The process of canonicalizing to a language tag also understands more
input locale string formats than ucol_open(). For instance,
"fr_CA.UTF-8" is misinterpreted by ucol_open() and the region is
ignored; effectively treating it the same as the locale "fr" and
opening the wrong collator. Canonicalization properly interprets the
language and region, resulting in the language tag "fr-CA", which can
then be understood by ucol_open().
This commit fixes a problem in prior versions due to ucol_open()
misinterpreting locale strings as described above. For instance,
creating an ICU collation with locale "fr_CA.UTF-8" would store that
string directly in the catalog, which would later be passed to (and
misinterpreted by) ucol_open(). After this commit, the locale string
will be canonicalized to language tag "fr-CA" in the catalog, which
will be properly understood by ucol_open(). Because this fix affects
the resulting collator, we cannot change the locale string stored in
the catalog for existing databases or collations; otherwise we'd risk
corrupting indexes. Therefore, only canonicalize locales for
newly-created (not upgraded) collations/databases. For similar
reasons, do not backport.
Discussion: https://postgr.es/m/8c7af6820aed94dc7bc259d2aa7f9663518e6137.camel@j-davis.com
Reviewed-by: Peter Eisentraut
3 years ago
|
|
|
/*
|
|
|
|
|
* Return the BCP47 language tag representation of the requested locale.
|
|
|
|
|
*
|
|
|
|
|
* This function should be called before passing the string to ucol_open(),
|
|
|
|
|
* because conversion to a language tag also performs "level 2
|
|
|
|
|
* canonicalization". In addition to producing a consistent format, level 2
|
|
|
|
|
* canonicalization is able to more accurately interpret different input
|
|
|
|
|
* locale string formats, such as POSIX and .NET IDs.
|
|
|
|
|
*/
|
|
|
|
|
char *
|
|
|
|
|
icu_language_tag(const char *loc_str, int elevel)
|
|
|
|
|
{
|
|
|
|
|
#ifdef USE_ICU
|
|
|
|
|
UErrorCode status;
|
|
|
|
|
char *langtag;
|
|
|
|
|
size_t buflen = 32; /* arbitrary starting buffer size */
|
|
|
|
|
const bool strict = true;
|
Canonicalize ICU locale names to language tags.
Convert to BCP47 language tags before storing in the catalog, except
during binary upgrade or when the locale comes from an existing
collation or template database.
The resulting language tags can vary slightly between ICU
versions. For instance, "@colBackwards=yes" is converted to
"und-u-kb-true" in older versions of ICU, and to the simpler (but
equivalent) "und-u-kb" in newer versions.
The process of canonicalizing to a language tag also understands more
input locale string formats than ucol_open(). For instance,
"fr_CA.UTF-8" is misinterpreted by ucol_open() and the region is
ignored; effectively treating it the same as the locale "fr" and
opening the wrong collator. Canonicalization properly interprets the
language and region, resulting in the language tag "fr-CA", which can
then be understood by ucol_open().
This commit fixes a problem in prior versions due to ucol_open()
misinterpreting locale strings as described above. For instance,
creating an ICU collation with locale "fr_CA.UTF-8" would store that
string directly in the catalog, which would later be passed to (and
misinterpreted by) ucol_open(). After this commit, the locale string
will be canonicalized to language tag "fr-CA" in the catalog, which
will be properly understood by ucol_open(). Because this fix affects
the resulting collator, we cannot change the locale string stored in
the catalog for existing databases or collations; otherwise we'd risk
corrupting indexes. Therefore, only canonicalize locales for
newly-created (not upgraded) collations/databases. For similar
reasons, do not backport.
Discussion: https://postgr.es/m/8c7af6820aed94dc7bc259d2aa7f9663518e6137.camel@j-davis.com
Reviewed-by: Peter Eisentraut
3 years ago
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
|
|
|
|
|
* RFC5646 section 4.4). Additionally, in older ICU versions,
|
Canonicalize ICU locale names to language tags.
Convert to BCP47 language tags before storing in the catalog, except
during binary upgrade or when the locale comes from an existing
collation or template database.
The resulting language tags can vary slightly between ICU
versions. For instance, "@colBackwards=yes" is converted to
"und-u-kb-true" in older versions of ICU, and to the simpler (but
equivalent) "und-u-kb" in newer versions.
The process of canonicalizing to a language tag also understands more
input locale string formats than ucol_open(). For instance,
"fr_CA.UTF-8" is misinterpreted by ucol_open() and the region is
ignored; effectively treating it the same as the locale "fr" and
opening the wrong collator. Canonicalization properly interprets the
language and region, resulting in the language tag "fr-CA", which can
then be understood by ucol_open().
This commit fixes a problem in prior versions due to ucol_open()
misinterpreting locale strings as described above. For instance,
creating an ICU collation with locale "fr_CA.UTF-8" would store that
string directly in the catalog, which would later be passed to (and
misinterpreted by) ucol_open(). After this commit, the locale string
will be canonicalized to language tag "fr-CA" in the catalog, which
will be properly understood by ucol_open(). Because this fix affects
the resulting collator, we cannot change the locale string stored in
the catalog for existing databases or collations; otherwise we'd risk
corrupting indexes. Therefore, only canonicalize locales for
newly-created (not upgraded) collations/databases. For similar
reasons, do not backport.
Discussion: https://postgr.es/m/8c7af6820aed94dc7bc259d2aa7f9663518e6137.camel@j-davis.com
Reviewed-by: Peter Eisentraut
3 years ago
|
|
|
* uloc_toLanguageTag() doesn't always return the ultimate length on the
|
|
|
|
|
* first call, necessitating a loop.
|
|
|
|
|
*/
|
|
|
|
|
langtag = palloc(buflen);
|
|
|
|
|
while (true)
|
|
|
|
|
{
|
|
|
|
|
status = U_ZERO_ERROR;
|
|
|
|
|
uloc_toLanguageTag(loc_str, langtag, buflen, strict, &status);
|
Canonicalize ICU locale names to language tags.
Convert to BCP47 language tags before storing in the catalog, except
during binary upgrade or when the locale comes from an existing
collation or template database.
The resulting language tags can vary slightly between ICU
versions. For instance, "@colBackwards=yes" is converted to
"und-u-kb-true" in older versions of ICU, and to the simpler (but
equivalent) "und-u-kb" in newer versions.
The process of canonicalizing to a language tag also understands more
input locale string formats than ucol_open(). For instance,
"fr_CA.UTF-8" is misinterpreted by ucol_open() and the region is
ignored; effectively treating it the same as the locale "fr" and
opening the wrong collator. Canonicalization properly interprets the
language and region, resulting in the language tag "fr-CA", which can
then be understood by ucol_open().
This commit fixes a problem in prior versions due to ucol_open()
misinterpreting locale strings as described above. For instance,
creating an ICU collation with locale "fr_CA.UTF-8" would store that
string directly in the catalog, which would later be passed to (and
misinterpreted by) ucol_open(). After this commit, the locale string
will be canonicalized to language tag "fr-CA" in the catalog, which
will be properly understood by ucol_open(). Because this fix affects
the resulting collator, we cannot change the locale string stored in
the catalog for existing databases or collations; otherwise we'd risk
corrupting indexes. Therefore, only canonicalize locales for
newly-created (not upgraded) collations/databases. For similar
reasons, do not backport.
Discussion: https://postgr.es/m/8c7af6820aed94dc7bc259d2aa7f9663518e6137.camel@j-davis.com
Reviewed-by: Peter Eisentraut
3 years ago
|
|
|
|
|
|
|
|
/* try again if the buffer is not large enough */
|
Canonicalize ICU locale names to language tags.
Convert to BCP47 language tags before storing in the catalog, except
during binary upgrade or when the locale comes from an existing
collation or template database.
The resulting language tags can vary slightly between ICU
versions. For instance, "@colBackwards=yes" is converted to
"und-u-kb-true" in older versions of ICU, and to the simpler (but
equivalent) "und-u-kb" in newer versions.
The process of canonicalizing to a language tag also understands more
input locale string formats than ucol_open(). For instance,
"fr_CA.UTF-8" is misinterpreted by ucol_open() and the region is
ignored; effectively treating it the same as the locale "fr" and
opening the wrong collator. Canonicalization properly interprets the
language and region, resulting in the language tag "fr-CA", which can
then be understood by ucol_open().
This commit fixes a problem in prior versions due to ucol_open()
misinterpreting locale strings as described above. For instance,
creating an ICU collation with locale "fr_CA.UTF-8" would store that
string directly in the catalog, which would later be passed to (and
misinterpreted by) ucol_open(). After this commit, the locale string
will be canonicalized to language tag "fr-CA" in the catalog, which
will be properly understood by ucol_open(). Because this fix affects
the resulting collator, we cannot change the locale string stored in
the catalog for existing databases or collations; otherwise we'd risk
corrupting indexes. Therefore, only canonicalize locales for
newly-created (not upgraded) collations/databases. For similar
reasons, do not backport.
Discussion: https://postgr.es/m/8c7af6820aed94dc7bc259d2aa7f9663518e6137.camel@j-davis.com
Reviewed-by: Peter Eisentraut
3 years ago
|
|
|
if ((status == U_BUFFER_OVERFLOW_ERROR ||
|
|
|
|
|
status == U_STRING_NOT_TERMINATED_WARNING) &&
|
Canonicalize ICU locale names to language tags.
Convert to BCP47 language tags before storing in the catalog, except
during binary upgrade or when the locale comes from an existing
collation or template database.
The resulting language tags can vary slightly between ICU
versions. For instance, "@colBackwards=yes" is converted to
"und-u-kb-true" in older versions of ICU, and to the simpler (but
equivalent) "und-u-kb" in newer versions.
The process of canonicalizing to a language tag also understands more
input locale string formats than ucol_open(). For instance,
"fr_CA.UTF-8" is misinterpreted by ucol_open() and the region is
ignored; effectively treating it the same as the locale "fr" and
opening the wrong collator. Canonicalization properly interprets the
language and region, resulting in the language tag "fr-CA", which can
then be understood by ucol_open().
This commit fixes a problem in prior versions due to ucol_open()
misinterpreting locale strings as described above. For instance,
creating an ICU collation with locale "fr_CA.UTF-8" would store that
string directly in the catalog, which would later be passed to (and
misinterpreted by) ucol_open(). After this commit, the locale string
will be canonicalized to language tag "fr-CA" in the catalog, which
will be properly understood by ucol_open(). Because this fix affects
the resulting collator, we cannot change the locale string stored in
the catalog for existing databases or collations; otherwise we'd risk
corrupting indexes. Therefore, only canonicalize locales for
newly-created (not upgraded) collations/databases. For similar
reasons, do not backport.
Discussion: https://postgr.es/m/8c7af6820aed94dc7bc259d2aa7f9663518e6137.camel@j-davis.com
Reviewed-by: Peter Eisentraut
3 years ago
|
|
|
buflen < MaxAllocSize)
|
|
|
|
|
{
|
|
|
|
|
buflen = Min(buflen * 2, MaxAllocSize);
|
|
|
|
|
langtag = repalloc(langtag, buflen);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (U_FAILURE(status))
|
|
|
|
|
{
|
|
|
|
|
pfree(langtag);
|
|
|
|
|
|
|
|
|
|
if (elevel > 0)
|
|
|
|
|
ereport(elevel,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
|
errmsg("could not convert locale name \"%s\" to language tag: %s",
|
Canonicalize ICU locale names to language tags.
Convert to BCP47 language tags before storing in the catalog, except
during binary upgrade or when the locale comes from an existing
collation or template database.
The resulting language tags can vary slightly between ICU
versions. For instance, "@colBackwards=yes" is converted to
"und-u-kb-true" in older versions of ICU, and to the simpler (but
equivalent) "und-u-kb" in newer versions.
The process of canonicalizing to a language tag also understands more
input locale string formats than ucol_open(). For instance,
"fr_CA.UTF-8" is misinterpreted by ucol_open() and the region is
ignored; effectively treating it the same as the locale "fr" and
opening the wrong collator. Canonicalization properly interprets the
language and region, resulting in the language tag "fr-CA", which can
then be understood by ucol_open().
This commit fixes a problem in prior versions due to ucol_open()
misinterpreting locale strings as described above. For instance,
creating an ICU collation with locale "fr_CA.UTF-8" would store that
string directly in the catalog, which would later be passed to (and
misinterpreted by) ucol_open(). After this commit, the locale string
will be canonicalized to language tag "fr-CA" in the catalog, which
will be properly understood by ucol_open(). Because this fix affects
the resulting collator, we cannot change the locale string stored in
the catalog for existing databases or collations; otherwise we'd risk
corrupting indexes. Therefore, only canonicalize locales for
newly-created (not upgraded) collations/databases. For similar
reasons, do not backport.
Discussion: https://postgr.es/m/8c7af6820aed94dc7bc259d2aa7f9663518e6137.camel@j-davis.com
Reviewed-by: Peter Eisentraut
3 years ago
|
|
|
loc_str, u_errorName(status))));
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return langtag;
|
|
|
|
|
#else /* not USE_ICU */
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
|
errmsg("ICU is not supported in this build")));
|
|
|
|
|
return NULL; /* keep compiler quiet */
|
Canonicalize ICU locale names to language tags.
Convert to BCP47 language tags before storing in the catalog, except
during binary upgrade or when the locale comes from an existing
collation or template database.
The resulting language tags can vary slightly between ICU
versions. For instance, "@colBackwards=yes" is converted to
"und-u-kb-true" in older versions of ICU, and to the simpler (but
equivalent) "und-u-kb" in newer versions.
The process of canonicalizing to a language tag also understands more
input locale string formats than ucol_open(). For instance,
"fr_CA.UTF-8" is misinterpreted by ucol_open() and the region is
ignored; effectively treating it the same as the locale "fr" and
opening the wrong collator. Canonicalization properly interprets the
language and region, resulting in the language tag "fr-CA", which can
then be understood by ucol_open().
This commit fixes a problem in prior versions due to ucol_open()
misinterpreting locale strings as described above. For instance,
creating an ICU collation with locale "fr_CA.UTF-8" would store that
string directly in the catalog, which would later be passed to (and
misinterpreted by) ucol_open(). After this commit, the locale string
will be canonicalized to language tag "fr-CA" in the catalog, which
will be properly understood by ucol_open(). Because this fix affects
the resulting collator, we cannot change the locale string stored in
the catalog for existing databases or collations; otherwise we'd risk
corrupting indexes. Therefore, only canonicalize locales for
newly-created (not upgraded) collations/databases. For similar
reasons, do not backport.
Discussion: https://postgr.es/m/8c7af6820aed94dc7bc259d2aa7f9663518e6137.camel@j-davis.com
Reviewed-by: Peter Eisentraut
3 years ago
|
|
|
#endif /* not USE_ICU */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Perform best-effort check that the locale is a valid one.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
icu_validate_locale(const char *loc_str)
|
|
|
|
|
{
|
|
|
|
|
#ifdef USE_ICU
|
|
|
|
|
UCollator *collator;
|
|
|
|
|
UErrorCode status;
|
|
|
|
|
char lang[ULOC_LANG_CAPACITY];
|
|
|
|
|
bool found = false;
|
|
|
|
|
int elevel = icu_validation_level;
|
|
|
|
|
|
|
|
|
|
/* no validation */
|
|
|
|
|
if (elevel < 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* downgrade to WARNING during pg_upgrade */
|
|
|
|
|
if (IsBinaryUpgrade && elevel > WARNING)
|
|
|
|
|
elevel = WARNING;
|
|
|
|
|
|
|
|
|
|
/* validate that we can extract the language */
|
|
|
|
|
status = U_ZERO_ERROR;
|
|
|
|
|
uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status);
|
|
|
|
|
if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING)
|
|
|
|
|
{
|
|
|
|
|
ereport(elevel,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
|
errmsg("could not get language from ICU locale \"%s\": %s",
|
|
|
|
|
loc_str, u_errorName(status)),
|
|
|
|
|
errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
|
|
|
|
|
"icu_validation_level", "disabled")));
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* check for special language name */
|
|
|
|
|
if (strcmp(lang, "") == 0 ||
|
|
|
|
|
strcmp(lang, "root") == 0 || strcmp(lang, "und") == 0)
|
|
|
|
|
found = true;
|
|
|
|
|
|
|
|
|
|
/* search for matching language within ICU */
|
|
|
|
|
for (int32_t i = 0; !found && i < uloc_countAvailable(); i++)
|
|
|
|
|
{
|
|
|
|
|
const char *otherloc = uloc_getAvailable(i);
|
|
|
|
|
char otherlang[ULOC_LANG_CAPACITY];
|
|
|
|
|
|
|
|
|
|
status = U_ZERO_ERROR;
|
|
|
|
|
uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status);
|
|
|
|
|
if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (strcmp(lang, otherlang) == 0)
|
|
|
|
|
found = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!found)
|
|
|
|
|
ereport(elevel,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
|
errmsg("ICU locale \"%s\" has unknown language \"%s\"",
|
|
|
|
|
loc_str, lang),
|
|
|
|
|
errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
|
|
|
|
|
"icu_validation_level", "disabled")));
|
|
|
|
|
|
|
|
|
|
/* check that it can be opened */
|
|
|
|
|
collator = pg_ucol_open(loc_str);
|
|
|
|
|
ucol_close(collator);
|
|
|
|
|
#else /* not USE_ICU */
|
|
|
|
|
/* could get here if a collation was created by a build with ICU */
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
|
errmsg("ICU is not supported in this build")));
|
|
|
|
|
#endif /* not USE_ICU */
|
|
|
|
|
}
|