|
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
|
*
|
|
|
|
|
* json.c
|
|
|
|
|
* JSON data type support.
|
|
|
|
|
*
|
|
|
|
|
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
|
|
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
|
*
|
|
|
|
|
* IDENTIFICATION
|
|
|
|
|
* src/backend/utils/adt/json.c
|
|
|
|
|
*
|
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
#include "catalog/pg_proc.h"
|
|
|
|
|
#include "catalog/pg_type.h"
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
#include "common/hashfn.h"
|
|
|
|
|
#include "funcapi.h"
|
|
|
|
|
#include "libpq/pqformat.h"
|
|
|
|
|
#include "miscadmin.h"
|
|
|
|
|
#include "parser/parse_coerce.h"
|
|
|
|
|
#include "utils/array.h"
|
|
|
|
|
#include "utils/builtins.h"
|
|
|
|
|
#include "utils/date.h"
|
|
|
|
|
#include "utils/datetime.h"
|
|
|
|
|
#include "utils/json.h"
|
|
|
|
|
#include "utils/jsonfuncs.h"
|
|
|
|
|
#include "utils/lsyscache.h"
|
|
|
|
|
#include "utils/typcache.h"
|
|
|
|
|
|
|
|
|
|
typedef enum /* type categories for datum_to_json */
|
SQL/JSON constructors
This patch introduces the SQL/JSON standard constructors for JSON:
JSON()
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
For the most part these functions provide facilities that mimic
existing json/jsonb functions. However, they also offer some useful
additional functionality. In addition to text input, the JSON() function
accepts bytea input, which it will decode and constuct a json value from.
The other functions provide useful options for handling duplicate keys
and null values.
This series of patches will be followed by a consolidated documentation
patch.
Nikita Glukhov
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
4 years ago
|
|
|
{
|
|
|
|
|
JSONTYPE_NULL, /* null, so we didn't bother to identify */
|
|
|
|
|
JSONTYPE_BOOL, /* boolean (built-in types only) */
|
|
|
|
|
JSONTYPE_NUMERIC, /* numeric (ditto) */
|
|
|
|
|
JSONTYPE_DATE, /* we use special formatting for datetimes */
|
|
|
|
|
JSONTYPE_TIMESTAMP,
|
|
|
|
|
JSONTYPE_TIMESTAMPTZ,
|
|
|
|
|
JSONTYPE_JSON, /* JSON itself (and JSONB) */
|
|
|
|
|
JSONTYPE_ARRAY, /* array */
|
|
|
|
|
JSONTYPE_COMPOSITE, /* composite */
|
|
|
|
|
JSONTYPE_CAST, /* something with an explicit cast to JSON */
|
|
|
|
|
JSONTYPE_OTHER /* all else */
|
|
|
|
|
} JsonTypeCategory;
|
SQL/JSON constructors
This patch introduces the SQL/JSON standard constructors for JSON:
JSON()
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
For the most part these functions provide facilities that mimic
existing json/jsonb functions. However, they also offer some useful
additional functionality. In addition to text input, the JSON() function
accepts bytea input, which it will decode and constuct a json value from.
The other functions provide useful options for handling duplicate keys
and null values.
This series of patches will be followed by a consolidated documentation
patch.
Nikita Glukhov
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
4 years ago
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Support for fast key uniqueness checking.
|
|
|
|
|
*
|
|
|
|
|
* We maintain a hash table of used keys in JSON objects for fast detection
|
|
|
|
|
* of duplicates.
|
|
|
|
|
*/
|
|
|
|
|
/* Common context for key uniqueness check */
|
|
|
|
|
typedef struct HTAB *JsonUniqueCheckState; /* hash table for key names */
|
|
|
|
|
|
|
|
|
|
/* Hash entry for JsonUniqueCheckState */
|
|
|
|
|
typedef struct JsonUniqueHashEntry
|
|
|
|
|
{
|
|
|
|
|
const char *key;
|
|
|
|
|
int key_len;
|
|
|
|
|
int object_id;
|
|
|
|
|
} JsonUniqueHashEntry;
|
|
|
|
|
|
|
|
|
|
/* Context struct for key uniqueness check during JSON building */
|
|
|
|
|
typedef struct JsonUniqueBuilderState
|
|
|
|
|
{
|
|
|
|
|
JsonUniqueCheckState check; /* unique check */
|
|
|
|
|
StringInfoData skipped_keys; /* skipped keys with NULL values */
|
|
|
|
|
MemoryContext mcxt; /* context for saving skipped keys */
|
|
|
|
|
} JsonUniqueBuilderState;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* State struct for JSON aggregation */
|
|
|
|
|
typedef struct JsonAggState
|
|
|
|
|
{
|
|
|
|
|
StringInfo str;
|
|
|
|
|
JsonTypeCategory key_category;
|
|
|
|
|
Oid key_output_func;
|
|
|
|
|
JsonTypeCategory val_category;
|
|
|
|
|
Oid val_output_func;
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
JsonUniqueBuilderState unique_check;
|
|
|
|
|
} JsonAggState;
|
|
|
|
|
|
|
|
|
|
static void composite_to_json(Datum composite, StringInfo result,
|
|
|
|
|
bool use_line_feeds);
|
|
|
|
|
static void array_dim_to_json(StringInfo result, int dim, int ndims, int *dims,
|
|
|
|
|
Datum *vals, bool *nulls, int *valcount,
|
|
|
|
|
JsonTypeCategory tcategory, Oid outfuncoid,
|
|
|
|
|
bool use_line_feeds);
|
|
|
|
|
static void array_to_json_internal(Datum array, StringInfo result,
|
|
|
|
|
bool use_line_feeds);
|
|
|
|
|
static void json_categorize_type(Oid typoid,
|
|
|
|
|
JsonTypeCategory *tcategory,
|
|
|
|
|
Oid *outfuncoid);
|
|
|
|
|
static void datum_to_json(Datum val, bool is_null, StringInfo result,
|
|
|
|
|
JsonTypeCategory tcategory, Oid outfuncoid,
|
|
|
|
|
bool key_scalar);
|
|
|
|
|
static void add_json(Datum val, bool is_null, StringInfo result,
|
|
|
|
|
Oid val_type, bool key_scalar);
|
|
|
|
|
static text *catenate_stringinfo_string(StringInfo buffer, const char *addon);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Input.
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_in(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
char *json = PG_GETARG_CSTRING(0);
|
|
|
|
|
text *result = cstring_to_text(json);
|
|
|
|
|
JsonLexContext *lex;
|
|
|
|
|
|
|
|
|
|
/* validate it */
|
|
|
|
|
lex = makeJsonLexContext(result, false);
|
|
|
|
|
if (!pg_parse_json_or_errsave(lex, &nullSemAction, fcinfo->context))
|
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
|
|
|
|
|
/* Internal representation is the same as text */
|
|
|
|
|
PG_RETURN_TEXT_P(result);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Output.
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_out(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
/* we needn't detoast because text_to_cstring will handle that */
|
|
|
|
|
Datum txt = PG_GETARG_DATUM(0);
|
|
|
|
|
|
|
|
|
|
PG_RETURN_CSTRING(TextDatumGetCString(txt));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Binary send.
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_send(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
text *t = PG_GETARG_TEXT_PP(0);
|
|
|
|
|
StringInfoData buf;
|
|
|
|
|
|
|
|
|
|
pq_begintypsend(&buf);
|
|
|
|
|
pq_sendtext(&buf, VARDATA_ANY(t), VARSIZE_ANY_EXHDR(t));
|
|
|
|
|
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Binary receive.
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_recv(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
|
|
|
|
|
char *str;
|
|
|
|
|
int nbytes;
|
|
|
|
|
JsonLexContext *lex;
|
|
|
|
|
|
|
|
|
|
str = pq_getmsgtext(buf, buf->len - buf->cursor, &nbytes);
|
|
|
|
|
|
|
|
|
|
/* Validate it. */
|
|
|
|
|
lex = makeJsonLexContextCstringLen(str, nbytes, GetDatabaseEncoding(), false);
|
|
|
|
|
pg_parse_json_or_ereport(lex, &nullSemAction);
|
|
|
|
|
|
Introduce jsonb, a structured format for storing json.
The new format accepts exactly the same data as the json type. However, it is
stored in a format that does not require reparsing the orgiginal text in order
to process it, making it much more suitable for indexing and other operations.
Insignificant whitespace is discarded, and the order of object keys is not
preserved. Neither are duplicate object keys kept - the later value for a given
key is the only one stored.
The new type has all the functions and operators that the json type has,
with the exception of the json generation functions (to_json, json_agg etc.)
and with identical semantics. In addition, there are operator classes for
hash and btree indexing, and two classes for GIN indexing, that have no
equivalent in the json type.
This feature grew out of previous work by Oleg Bartunov and Teodor Sigaev, which
was intended to provide similar facilities to a nested hstore type, but which
in the end proved to have some significant compatibility issues.
Authors: Oleg Bartunov, Teodor Sigaev, Peter Geoghegan and Andrew Dunstan.
Review: Andres Freund
12 years ago
|
|
|
PG_RETURN_TEXT_P(cstring_to_text_with_len(str, nbytes));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
* Determine how we want to print values of a given type in datum_to_json.
|
|
|
|
|
*
|
|
|
|
|
* Given the datatype OID, return its JsonTypeCategory, as well as the type's
|
|
|
|
|
* output function OID. If the returned category is JSONTYPE_CAST, we
|
|
|
|
|
* return the OID of the type->JSON cast function instead.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
json_categorize_type(Oid typoid,
|
|
|
|
|
JsonTypeCategory *tcategory,
|
|
|
|
|
Oid *outfuncoid)
|
|
|
|
|
{
|
|
|
|
|
bool typisvarlena;
|
|
|
|
|
|
|
|
|
|
/* Look through any domain */
|
|
|
|
|
typoid = getBaseType(typoid);
|
|
|
|
|
|
|
|
|
|
*outfuncoid = InvalidOid;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We need to get the output function for everything except date and
|
Fix jsonb Unicode escape processing, and in consequence disallow \u0000.
We've been trying to support \u0000 in JSON values since commit
78ed8e03c67d7333, and have introduced increasingly worse hacks to try to
make it work, such as commit 0ad1a816320a2b53. However, it fundamentally
can't work in the way envisioned, because the stored representation looks
the same as for \\u0000 which is not the same thing at all. It's also
entirely bogus to output \u0000 when de-escaped output is called for.
The right way to do this would be to store an actual 0x00 byte, and then
throw error only if asked to produce de-escaped textual output. However,
getting to that point seems likely to take considerable work and may well
never be practical in the 9.4.x series.
To preserve our options for better behavior while getting rid of the nasty
side-effects of 0ad1a816320a2b53, revert that commit in toto and instead
throw error if \u0000 is used in a context where it needs to be de-escaped.
(These are the same contexts where non-ASCII Unicode escapes throw error
if the database encoding isn't UTF8, so this behavior is by no means
without precedent.)
In passing, make both the \u0000 case and the non-ASCII Unicode case report
ERRCODE_UNTRANSLATABLE_CHARACTER / "unsupported Unicode escape sequence"
rather than claiming there's something wrong with the input syntax.
Back-patch to 9.4, where we have to do something because 0ad1a816320a2b53
broke things for many cases having nothing to do with \u0000. 9.3 also has
bogus behavior, but only for that specific escape value, so given the lack
of field complaints it seems better to leave 9.3 alone.
11 years ago
|
|
|
* timestamp types, array and composite types, booleans, and non-builtin
|
|
|
|
|
* types where there's a cast to json.
|
|
|
|
|
*/
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
|
|
|
|
|
switch (typoid)
|
|
|
|
|
{
|
|
|
|
|
case BOOLOID:
|
|
|
|
|
*tcategory = JSONTYPE_BOOL;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case INT2OID:
|
|
|
|
|
case INT4OID:
|
|
|
|
|
case INT8OID:
|
|
|
|
|
case FLOAT4OID:
|
|
|
|
|
case FLOAT8OID:
|
|
|
|
|
case NUMERICOID:
|
|
|
|
|
getTypeOutputInfo(typoid, outfuncoid, &typisvarlena);
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
*tcategory = JSONTYPE_NUMERIC;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case DATEOID:
|
|
|
|
|
*tcategory = JSONTYPE_DATE;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case TIMESTAMPOID:
|
|
|
|
|
*tcategory = JSONTYPE_TIMESTAMP;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case TIMESTAMPTZOID:
|
|
|
|
|
*tcategory = JSONTYPE_TIMESTAMPTZ;
|
|
|
|
|
break;
|
|
|
|
|
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
case JSONOID:
|
|
|
|
|
case JSONBOID:
|
|
|
|
|
getTypeOutputInfo(typoid, outfuncoid, &typisvarlena);
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
*tcategory = JSONTYPE_JSON;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
/* Check for arrays and composites */
|
|
|
|
|
if (OidIsValid(get_element_type(typoid)) || typoid == ANYARRAYOID
|
Introduce "anycompatible" family of polymorphic types.
This patch adds the pseudo-types anycompatible, anycompatiblearray,
anycompatiblenonarray, and anycompatiblerange. They work much like
anyelement, anyarray, anynonarray, and anyrange respectively, except
that the actual input values need not match precisely in type.
Instead, if we can find a common supertype (using the same rules
as for UNION/CASE type resolution), then the parser automatically
promotes the input values to that type. For example,
"myfunc(anycompatible, anycompatible)" can match a call with one
integer and one bigint argument, with the integer automatically
promoted to bigint. With anyelement in the definition, the user
would have had to cast the integer explicitly.
The new types also provide a second, independent set of type variables
for function matching; thus with "myfunc(anyelement, anyelement,
anycompatible) returns anycompatible" the first two arguments are
constrained to be the same type, but the third can be some other
type, and the result has the type of the third argument. The need
for more than one set of type variables was foreseen back when we
first invented the polymorphic types, but we never did anything
about it.
Pavel Stehule, revised a bit by me
Discussion: https://postgr.es/m/CAFj8pRDna7VqNi8gR+Tt2Ktmz0cq5G93guc3Sbn_NVPLdXAkqA@mail.gmail.com
6 years ago
|
|
|
|| typoid == ANYCOMPATIBLEARRAYOID || typoid == RECORDARRAYOID)
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
*tcategory = JSONTYPE_ARRAY;
|
|
|
|
|
else if (type_is_rowtype(typoid)) /* includes RECORDOID */
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
*tcategory = JSONTYPE_COMPOSITE;
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/* It's probably the general case ... */
|
|
|
|
|
*tcategory = JSONTYPE_OTHER;
|
|
|
|
|
/* but let's look for a cast to json, if it's not built-in */
|
|
|
|
|
if (typoid >= FirstNormalObjectId)
|
|
|
|
|
{
|
Fix jsonb Unicode escape processing, and in consequence disallow \u0000.
We've been trying to support \u0000 in JSON values since commit
78ed8e03c67d7333, and have introduced increasingly worse hacks to try to
make it work, such as commit 0ad1a816320a2b53. However, it fundamentally
can't work in the way envisioned, because the stored representation looks
the same as for \\u0000 which is not the same thing at all. It's also
entirely bogus to output \u0000 when de-escaped output is called for.
The right way to do this would be to store an actual 0x00 byte, and then
throw error only if asked to produce de-escaped textual output. However,
getting to that point seems likely to take considerable work and may well
never be practical in the 9.4.x series.
To preserve our options for better behavior while getting rid of the nasty
side-effects of 0ad1a816320a2b53, revert that commit in toto and instead
throw error if \u0000 is used in a context where it needs to be de-escaped.
(These are the same contexts where non-ASCII Unicode escapes throw error
if the database encoding isn't UTF8, so this behavior is by no means
without precedent.)
In passing, make both the \u0000 case and the non-ASCII Unicode case report
ERRCODE_UNTRANSLATABLE_CHARACTER / "unsupported Unicode escape sequence"
rather than claiming there's something wrong with the input syntax.
Back-patch to 9.4, where we have to do something because 0ad1a816320a2b53
broke things for many cases having nothing to do with \u0000. 9.3 also has
bogus behavior, but only for that specific escape value, so given the lack
of field complaints it seems better to leave 9.3 alone.
11 years ago
|
|
|
Oid castfunc;
|
|
|
|
|
CoercionPathType ctype;
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
|
|
|
|
|
ctype = find_coercion_pathway(JSONOID, typoid,
|
Fix jsonb Unicode escape processing, and in consequence disallow \u0000.
We've been trying to support \u0000 in JSON values since commit
78ed8e03c67d7333, and have introduced increasingly worse hacks to try to
make it work, such as commit 0ad1a816320a2b53. However, it fundamentally
can't work in the way envisioned, because the stored representation looks
the same as for \\u0000 which is not the same thing at all. It's also
entirely bogus to output \u0000 when de-escaped output is called for.
The right way to do this would be to store an actual 0x00 byte, and then
throw error only if asked to produce de-escaped textual output. However,
getting to that point seems likely to take considerable work and may well
never be practical in the 9.4.x series.
To preserve our options for better behavior while getting rid of the nasty
side-effects of 0ad1a816320a2b53, revert that commit in toto and instead
throw error if \u0000 is used in a context where it needs to be de-escaped.
(These are the same contexts where non-ASCII Unicode escapes throw error
if the database encoding isn't UTF8, so this behavior is by no means
without precedent.)
In passing, make both the \u0000 case and the non-ASCII Unicode case report
ERRCODE_UNTRANSLATABLE_CHARACTER / "unsupported Unicode escape sequence"
rather than claiming there's something wrong with the input syntax.
Back-patch to 9.4, where we have to do something because 0ad1a816320a2b53
broke things for many cases having nothing to do with \u0000. 9.3 also has
bogus behavior, but only for that specific escape value, so given the lack
of field complaints it seems better to leave 9.3 alone.
11 years ago
|
|
|
COERCION_EXPLICIT,
|
|
|
|
|
&castfunc);
|
|
|
|
|
if (ctype == COERCION_PATH_FUNC && OidIsValid(castfunc))
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
{
|
|
|
|
|
*tcategory = JSONTYPE_CAST;
|
|
|
|
|
*outfuncoid = castfunc;
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/* non builtin type with no cast */
|
|
|
|
|
getTypeOutputInfo(typoid, outfuncoid, &typisvarlena);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/* any other builtin type */
|
|
|
|
|
getTypeOutputInfo(typoid, outfuncoid, &typisvarlena);
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Turn a Datum into JSON text, appending the string to "result".
|
|
|
|
|
*
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
* tcategory and outfuncoid are from a previous call to json_categorize_type,
|
|
|
|
|
* except that if is_null is true then they can be invalid.
|
|
|
|
|
*
|
|
|
|
|
* If key_scalar is true, the value is being printed as a key, so insist
|
|
|
|
|
* it's of an acceptable type, and force it to be quoted.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
datum_to_json(Datum val, bool is_null, StringInfo result,
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
JsonTypeCategory tcategory, Oid outfuncoid,
|
|
|
|
|
bool key_scalar)
|
|
|
|
|
{
|
|
|
|
|
char *outputstr;
|
|
|
|
|
text *jsontext;
|
|
|
|
|
|
|
|
|
|
check_stack_depth();
|
|
|
|
|
|
|
|
|
|
/* callers are expected to ensure that null keys are not passed in */
|
|
|
|
|
Assert(!(key_scalar && is_null));
|
|
|
|
|
|
|
|
|
|
if (is_null)
|
|
|
|
|
{
|
|
|
|
|
appendStringInfoString(result, "null");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
if (key_scalar &&
|
|
|
|
|
(tcategory == JSONTYPE_ARRAY ||
|
|
|
|
|
tcategory == JSONTYPE_COMPOSITE ||
|
|
|
|
|
tcategory == JSONTYPE_JSON ||
|
|
|
|
|
tcategory == JSONTYPE_CAST))
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
9 years ago
|
|
|
errmsg("key value must be scalar, not array, composite, or json")));
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
|
|
|
|
|
switch (tcategory)
|
|
|
|
|
{
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
case JSONTYPE_ARRAY:
|
|
|
|
|
array_to_json_internal(val, result, false);
|
|
|
|
|
break;
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
case JSONTYPE_COMPOSITE:
|
|
|
|
|
composite_to_json(val, result, false);
|
|
|
|
|
break;
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
case JSONTYPE_BOOL:
|
|
|
|
|
outputstr = DatumGetBool(val) ? "true" : "false";
|
|
|
|
|
if (key_scalar)
|
|
|
|
|
escape_json(result, outputstr);
|
|
|
|
|
else
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
appendStringInfoString(result, outputstr);
|
|
|
|
|
break;
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
case JSONTYPE_NUMERIC:
|
|
|
|
|
outputstr = OidOutputFunctionCall(outfuncoid, val);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Don't call escape_json for a non-key if it's a valid JSON
|
|
|
|
|
* number.
|
|
|
|
|
*/
|
|
|
|
|
if (!key_scalar && IsValidJsonNumber(outputstr, strlen(outputstr)))
|
|
|
|
|
appendStringInfoString(result, outputstr);
|
|
|
|
|
else
|
|
|
|
|
escape_json(result, outputstr);
|
|
|
|
|
pfree(outputstr);
|
|
|
|
|
break;
|
|
|
|
|
case JSONTYPE_DATE:
|
|
|
|
|
{
|
|
|
|
|
char buf[MAXDATELEN + 1];
|
|
|
|
|
|
|
|
|
|
JsonEncodeDateTime(buf, val, DATEOID, NULL);
|
|
|
|
|
appendStringInfo(result, "\"%s\"", buf);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case JSONTYPE_TIMESTAMP:
|
|
|
|
|
{
|
|
|
|
|
char buf[MAXDATELEN + 1];
|
|
|
|
|
|
|
|
|
|
JsonEncodeDateTime(buf, val, TIMESTAMPOID, NULL);
|
|
|
|
|
appendStringInfo(result, "\"%s\"", buf);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case JSONTYPE_TIMESTAMPTZ:
|
|
|
|
|
{
|
|
|
|
|
char buf[MAXDATELEN + 1];
|
|
|
|
|
|
|
|
|
|
JsonEncodeDateTime(buf, val, TIMESTAMPTZOID, NULL);
|
|
|
|
|
appendStringInfo(result, "\"%s\"", buf);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case JSONTYPE_JSON:
|
|
|
|
|
/* JSON and JSONB output will already be escaped */
|
|
|
|
|
outputstr = OidOutputFunctionCall(outfuncoid, val);
|
|
|
|
|
appendStringInfoString(result, outputstr);
|
|
|
|
|
pfree(outputstr);
|
|
|
|
|
break;
|
|
|
|
|
case JSONTYPE_CAST:
|
|
|
|
|
/* outfuncoid refers to a cast function, not an output function */
|
|
|
|
|
jsontext = DatumGetTextPP(OidFunctionCall1(outfuncoid, val));
|
|
|
|
|
outputstr = text_to_cstring(jsontext);
|
|
|
|
|
appendStringInfoString(result, outputstr);
|
|
|
|
|
pfree(outputstr);
|
|
|
|
|
pfree(jsontext);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
outputstr = OidOutputFunctionCall(outfuncoid, val);
|
|
|
|
|
escape_json(result, outputstr);
|
|
|
|
|
pfree(outputstr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Encode 'value' of datetime type 'typid' into JSON string in ISO format using
|
|
|
|
|
* optionally preallocated buffer 'buf'. Optional 'tzp' determines time-zone
|
|
|
|
|
* offset (in seconds) in which we want to show timestamptz.
|
|
|
|
|
*/
|
|
|
|
|
char *
|
|
|
|
|
JsonEncodeDateTime(char *buf, Datum value, Oid typid, const int *tzp)
|
|
|
|
|
{
|
|
|
|
|
if (!buf)
|
|
|
|
|
buf = palloc(MAXDATELEN + 1);
|
|
|
|
|
|
|
|
|
|
switch (typid)
|
|
|
|
|
{
|
|
|
|
|
case DATEOID:
|
|
|
|
|
{
|
|
|
|
|
DateADT date;
|
|
|
|
|
struct pg_tm tm;
|
|
|
|
|
|
|
|
|
|
date = DatumGetDateADT(value);
|
|
|
|
|
|
|
|
|
|
/* Same as date_out(), but forcing DateStyle */
|
|
|
|
|
if (DATE_NOT_FINITE(date))
|
|
|
|
|
EncodeSpecialDate(date, buf);
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
j2date(date + POSTGRES_EPOCH_JDATE,
|
|
|
|
|
&(tm.tm_year), &(tm.tm_mon), &(tm.tm_mday));
|
|
|
|
|
EncodeDateOnly(&tm, USE_XSD_DATES, buf);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case TIMEOID:
|
|
|
|
|
{
|
|
|
|
|
TimeADT time = DatumGetTimeADT(value);
|
|
|
|
|
struct pg_tm tt,
|
|
|
|
|
*tm = &tt;
|
|
|
|
|
fsec_t fsec;
|
|
|
|
|
|
|
|
|
|
/* Same as time_out(), but forcing DateStyle */
|
|
|
|
|
time2tm(time, tm, &fsec);
|
|
|
|
|
EncodeTimeOnly(tm, fsec, false, 0, USE_XSD_DATES, buf);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case TIMETZOID:
|
|
|
|
|
{
|
|
|
|
|
TimeTzADT *time = DatumGetTimeTzADTP(value);
|
|
|
|
|
struct pg_tm tt,
|
|
|
|
|
*tm = &tt;
|
|
|
|
|
fsec_t fsec;
|
|
|
|
|
int tz;
|
|
|
|
|
|
|
|
|
|
/* Same as timetz_out(), but forcing DateStyle */
|
|
|
|
|
timetz2tm(time, tm, &fsec, &tz);
|
|
|
|
|
EncodeTimeOnly(tm, fsec, true, tz, USE_XSD_DATES, buf);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case TIMESTAMPOID:
|
|
|
|
|
{
|
|
|
|
|
Timestamp timestamp;
|
|
|
|
|
struct pg_tm tm;
|
|
|
|
|
fsec_t fsec;
|
|
|
|
|
|
|
|
|
|
timestamp = DatumGetTimestamp(value);
|
|
|
|
|
/* Same as timestamp_out(), but forcing DateStyle */
|
|
|
|
|
if (TIMESTAMP_NOT_FINITE(timestamp))
|
|
|
|
|
EncodeSpecialTimestamp(timestamp, buf);
|
|
|
|
|
else if (timestamp2tm(timestamp, NULL, &tm, &fsec, NULL, NULL) == 0)
|
|
|
|
|
EncodeDateTime(&tm, fsec, false, 0, NULL, USE_XSD_DATES, buf);
|
|
|
|
|
else
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
|
|
|
|
|
errmsg("timestamp out of range")));
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case TIMESTAMPTZOID:
|
|
|
|
|
{
|
|
|
|
|
TimestampTz timestamp;
|
|
|
|
|
struct pg_tm tm;
|
|
|
|
|
int tz;
|
|
|
|
|
fsec_t fsec;
|
|
|
|
|
const char *tzn = NULL;
|
|
|
|
|
|
|
|
|
|
timestamp = DatumGetTimestampTz(value);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If a time zone is specified, we apply the time-zone shift,
|
|
|
|
|
* convert timestamptz to pg_tm as if it were without a time
|
|
|
|
|
* zone, and then use the specified time zone for converting
|
|
|
|
|
* the timestamp into a string.
|
|
|
|
|
*/
|
|
|
|
|
if (tzp)
|
|
|
|
|
{
|
|
|
|
|
tz = *tzp;
|
|
|
|
|
timestamp -= (TimestampTz) tz * USECS_PER_SEC;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Same as timestamptz_out(), but forcing DateStyle */
|
|
|
|
|
if (TIMESTAMP_NOT_FINITE(timestamp))
|
|
|
|
|
EncodeSpecialTimestamp(timestamp, buf);
|
|
|
|
|
else if (timestamp2tm(timestamp, tzp ? NULL : &tz, &tm, &fsec,
|
|
|
|
|
tzp ? NULL : &tzn, NULL) == 0)
|
|
|
|
|
{
|
|
|
|
|
if (tzp)
|
|
|
|
|
tm.tm_isdst = 1; /* set time-zone presence flag */
|
|
|
|
|
|
|
|
|
|
EncodeDateTime(&tm, fsec, true, tz, tzn, USE_XSD_DATES, buf);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
|
|
|
|
|
errmsg("timestamp out of range")));
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
elog(ERROR, "unknown jsonb value datetime type oid %u", typid);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Process a single dimension of an array.
|
|
|
|
|
* If it's the innermost dimension, output the values, otherwise call
|
|
|
|
|
* ourselves recursively to process the next dimension.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
array_dim_to_json(StringInfo result, int dim, int ndims, int *dims, Datum *vals,
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
bool *nulls, int *valcount, JsonTypeCategory tcategory,
|
|
|
|
|
Oid outfuncoid, bool use_line_feeds)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
const char *sep;
|
|
|
|
|
|
|
|
|
|
Assert(dim < ndims);
|
|
|
|
|
|
|
|
|
|
sep = use_line_feeds ? ",\n " : ",";
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(result, '[');
|
|
|
|
|
|
|
|
|
|
for (i = 1; i <= dims[dim]; i++)
|
|
|
|
|
{
|
|
|
|
|
if (i > 1)
|
|
|
|
|
appendStringInfoString(result, sep);
|
|
|
|
|
|
|
|
|
|
if (dim + 1 == ndims)
|
|
|
|
|
{
|
|
|
|
|
datum_to_json(vals[*valcount], nulls[*valcount], result, tcategory,
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
outfuncoid, false);
|
|
|
|
|
(*valcount)++;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* Do we want line feeds on inner dimensions of arrays? For now
|
|
|
|
|
* we'll say no.
|
|
|
|
|
*/
|
|
|
|
|
array_dim_to_json(result, dim + 1, ndims, dims, vals, nulls,
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
valcount, tcategory, outfuncoid, false);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(result, ']');
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Turn an array into JSON.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds)
|
|
|
|
|
{
|
|
|
|
|
ArrayType *v = DatumGetArrayTypeP(array);
|
|
|
|
|
Oid element_type = ARR_ELEMTYPE(v);
|
|
|
|
|
int *dim;
|
|
|
|
|
int ndim;
|
|
|
|
|
int nitems;
|
|
|
|
|
int count = 0;
|
|
|
|
|
Datum *elements;
|
|
|
|
|
bool *nulls;
|
|
|
|
|
int16 typlen;
|
|
|
|
|
bool typbyval;
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
char typalign;
|
|
|
|
|
JsonTypeCategory tcategory;
|
|
|
|
|
Oid outfuncoid;
|
|
|
|
|
|
|
|
|
|
ndim = ARR_NDIM(v);
|
|
|
|
|
dim = ARR_DIMS(v);
|
|
|
|
|
nitems = ArrayGetNItems(ndim, dim);
|
|
|
|
|
|
|
|
|
|
if (nitems <= 0)
|
|
|
|
|
{
|
|
|
|
|
appendStringInfoString(result, "[]");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
get_typlenbyvalalign(element_type,
|
|
|
|
|
&typlen, &typbyval, &typalign);
|
|
|
|
|
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
json_categorize_type(element_type,
|
|
|
|
|
&tcategory, &outfuncoid);
|
|
|
|
|
|
|
|
|
|
deconstruct_array(v, element_type, typlen, typbyval,
|
|
|
|
|
typalign, &elements, &nulls,
|
|
|
|
|
&nitems);
|
|
|
|
|
|
|
|
|
|
array_dim_to_json(result, 0, ndim, dim, elements, nulls, &count, tcategory,
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
outfuncoid, use_line_feeds);
|
|
|
|
|
|
|
|
|
|
pfree(elements);
|
|
|
|
|
pfree(nulls);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Turn a composite / record into JSON.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
composite_to_json(Datum composite, StringInfo result, bool use_line_feeds)
|
|
|
|
|
{
|
|
|
|
|
HeapTupleHeader td;
|
|
|
|
|
Oid tupType;
|
|
|
|
|
int32 tupTypmod;
|
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
|
HeapTupleData tmptup,
|
|
|
|
|
*tuple;
|
|
|
|
|
int i;
|
|
|
|
|
bool needsep = false;
|
|
|
|
|
const char *sep;
|
|
|
|
|
|
|
|
|
|
sep = use_line_feeds ? ",\n " : ",";
|
|
|
|
|
|
|
|
|
|
td = DatumGetHeapTupleHeader(composite);
|
|
|
|
|
|
|
|
|
|
/* Extract rowtype info and find a tupdesc */
|
|
|
|
|
tupType = HeapTupleHeaderGetTypeId(td);
|
|
|
|
|
tupTypmod = HeapTupleHeaderGetTypMod(td);
|
|
|
|
|
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
|
|
|
|
|
|
|
|
|
|
/* Build a temporary HeapTuple control structure */
|
|
|
|
|
tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
|
|
|
|
|
tmptup.t_data = td;
|
|
|
|
|
tuple = &tmptup;
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(result, '{');
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < tupdesc->natts; i++)
|
|
|
|
|
{
|
|
|
|
|
Datum val;
|
|
|
|
|
bool isnull;
|
|
|
|
|
char *attname;
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
JsonTypeCategory tcategory;
|
|
|
|
|
Oid outfuncoid;
|
|
|
|
|
Form_pg_attribute att = TupleDescAttr(tupdesc, i);
|
|
|
|
|
|
|
|
|
|
if (att->attisdropped)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (needsep)
|
|
|
|
|
appendStringInfoString(result, sep);
|
|
|
|
|
needsep = true;
|
|
|
|
|
|
|
|
|
|
attname = NameStr(att->attname);
|
|
|
|
|
escape_json(result, attname);
|
|
|
|
|
appendStringInfoChar(result, ':');
|
|
|
|
|
|
|
|
|
|
val = heap_getattr(tuple, i + 1, tupdesc, &isnull);
|
|
|
|
|
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
if (isnull)
|
|
|
|
|
{
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
tcategory = JSONTYPE_NULL;
|
|
|
|
|
outfuncoid = InvalidOid;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
json_categorize_type(att->atttypid, &tcategory, &outfuncoid);
|
|
|
|
|
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
datum_to_json(val, isnull, result, tcategory, outfuncoid, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(result, '}');
|
|
|
|
|
ReleaseTupleDesc(tupdesc);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
* Append JSON text for "val" to "result".
|
|
|
|
|
*
|
|
|
|
|
* This is just a thin wrapper around datum_to_json. If the same type will be
|
|
|
|
|
* printed many times, avoid using this; better to do the json_categorize_type
|
|
|
|
|
* lookups only once.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
add_json(Datum val, bool is_null, StringInfo result,
|
|
|
|
|
Oid val_type, bool key_scalar)
|
|
|
|
|
{
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
JsonTypeCategory tcategory;
|
|
|
|
|
Oid outfuncoid;
|
|
|
|
|
|
|
|
|
|
if (val_type == InvalidOid)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
|
errmsg("could not determine input data type")));
|
|
|
|
|
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
if (is_null)
|
|
|
|
|
{
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
tcategory = JSONTYPE_NULL;
|
|
|
|
|
outfuncoid = InvalidOid;
|
|
|
|
|
}
|
|
|
|
|
else
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
json_categorize_type(val_type,
|
|
|
|
|
&tcategory, &outfuncoid);
|
|
|
|
|
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
datum_to_json(val, is_null, result, tcategory, outfuncoid, key_scalar);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SQL function array_to_json(row)
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
array_to_json(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
Datum array = PG_GETARG_DATUM(0);
|
|
|
|
|
StringInfo result;
|
|
|
|
|
|
|
|
|
|
result = makeStringInfo();
|
|
|
|
|
|
|
|
|
|
array_to_json_internal(array, result, false);
|
|
|
|
|
|
|
|
|
|
PG_RETURN_TEXT_P(cstring_to_text_with_len(result->data, result->len));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SQL function array_to_json(row, prettybool)
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
array_to_json_pretty(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
Datum array = PG_GETARG_DATUM(0);
|
|
|
|
|
bool use_line_feeds = PG_GETARG_BOOL(1);
|
|
|
|
|
StringInfo result;
|
|
|
|
|
|
|
|
|
|
result = makeStringInfo();
|
|
|
|
|
|
|
|
|
|
array_to_json_internal(array, result, use_line_feeds);
|
|
|
|
|
|
|
|
|
|
PG_RETURN_TEXT_P(cstring_to_text_with_len(result->data, result->len));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SQL function row_to_json(row)
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
row_to_json(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
Datum array = PG_GETARG_DATUM(0);
|
|
|
|
|
StringInfo result;
|
|
|
|
|
|
|
|
|
|
result = makeStringInfo();
|
|
|
|
|
|
|
|
|
|
composite_to_json(array, result, false);
|
|
|
|
|
|
|
|
|
|
PG_RETURN_TEXT_P(cstring_to_text_with_len(result->data, result->len));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SQL function row_to_json(row, prettybool)
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
row_to_json_pretty(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
Datum array = PG_GETARG_DATUM(0);
|
|
|
|
|
bool use_line_feeds = PG_GETARG_BOOL(1);
|
|
|
|
|
StringInfo result;
|
|
|
|
|
|
|
|
|
|
result = makeStringInfo();
|
|
|
|
|
|
|
|
|
|
composite_to_json(array, result, use_line_feeds);
|
|
|
|
|
|
|
|
|
|
PG_RETURN_TEXT_P(cstring_to_text_with_len(result->data, result->len));
|
|
|
|
|
}
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
/*
|
|
|
|
|
* Is the given type immutable when coming out of a JSON context?
|
|
|
|
|
*
|
|
|
|
|
* At present, datetimes are all considered mutable, because they
|
|
|
|
|
* depend on timezone. XXX we should also drill down into objects
|
|
|
|
|
* and arrays, but do not.
|
|
|
|
|
*/
|
|
|
|
|
bool
|
|
|
|
|
to_json_is_immutable(Oid typoid)
|
|
|
|
|
{
|
|
|
|
|
JsonTypeCategory tcategory;
|
|
|
|
|
Oid outfuncoid;
|
|
|
|
|
|
|
|
|
|
json_categorize_type(typoid, &tcategory, &outfuncoid);
|
|
|
|
|
|
|
|
|
|
switch (tcategory)
|
|
|
|
|
{
|
|
|
|
|
case JSONTYPE_BOOL:
|
|
|
|
|
case JSONTYPE_JSON:
|
|
|
|
|
case JSONTYPE_NULL:
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
case JSONTYPE_DATE:
|
|
|
|
|
case JSONTYPE_TIMESTAMP:
|
|
|
|
|
case JSONTYPE_TIMESTAMPTZ:
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
case JSONTYPE_ARRAY:
|
|
|
|
|
return false; /* TODO recurse into elements */
|
|
|
|
|
|
|
|
|
|
case JSONTYPE_COMPOSITE:
|
|
|
|
|
return false; /* TODO recurse into fields */
|
|
|
|
|
|
|
|
|
|
case JSONTYPE_NUMERIC:
|
|
|
|
|
case JSONTYPE_CAST:
|
|
|
|
|
case JSONTYPE_OTHER:
|
|
|
|
|
return func_volatile(outfuncoid) == PROVOLATILE_IMMUTABLE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false; /* not reached */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SQL function to_json(anyvalue)
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
to_json(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
Datum val = PG_GETARG_DATUM(0);
|
|
|
|
|
Oid val_type = get_fn_expr_argtype(fcinfo->flinfo, 0);
|
|
|
|
|
StringInfo result;
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
JsonTypeCategory tcategory;
|
|
|
|
|
Oid outfuncoid;
|
|
|
|
|
|
|
|
|
|
if (val_type == InvalidOid)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
|
errmsg("could not determine input data type")));
|
|
|
|
|
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
json_categorize_type(val_type,
|
|
|
|
|
&tcategory, &outfuncoid);
|
|
|
|
|
|
|
|
|
|
result = makeStringInfo();
|
|
|
|
|
|
|
|
|
|
datum_to_json(val, false, result, tcategory, outfuncoid, false);
|
|
|
|
|
|
|
|
|
|
PG_RETURN_TEXT_P(cstring_to_text_with_len(result->data, result->len));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* json_agg transition function
|
|
|
|
|
*
|
|
|
|
|
* aggregate input column as a json array value.
|
|
|
|
|
*/
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
static Datum
|
|
|
|
|
json_agg_transfn_worker(FunctionCallInfo fcinfo, bool absent_on_null)
|
|
|
|
|
{
|
|
|
|
|
MemoryContext aggcontext,
|
|
|
|
|
oldcontext;
|
|
|
|
|
JsonAggState *state;
|
|
|
|
|
Datum val;
|
|
|
|
|
|
|
|
|
|
if (!AggCheckCallContext(fcinfo, &aggcontext))
|
|
|
|
|
{
|
|
|
|
|
/* cannot be called directly because of internal-type argument */
|
|
|
|
|
elog(ERROR, "json_agg_transfn called in non-aggregate context");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (PG_ARGISNULL(0))
|
|
|
|
|
{
|
|
|
|
|
Oid arg_type = get_fn_expr_argtype(fcinfo->flinfo, 1);
|
|
|
|
|
|
|
|
|
|
if (arg_type == InvalidOid)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
|
errmsg("could not determine input data type")));
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Make this state object in a context where it will persist for the
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
* duration of the aggregate call. MemoryContextSwitchTo is only
|
|
|
|
|
* needed the first time, as the StringInfo routines make sure they
|
|
|
|
|
* use the right context to enlarge the object if necessary.
|
|
|
|
|
*/
|
|
|
|
|
oldcontext = MemoryContextSwitchTo(aggcontext);
|
|
|
|
|
state = (JsonAggState *) palloc(sizeof(JsonAggState));
|
|
|
|
|
state->str = makeStringInfo();
|
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(state->str, '[');
|
|
|
|
|
json_categorize_type(arg_type, &state->val_category,
|
|
|
|
|
&state->val_output_func);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
state = (JsonAggState *) PG_GETARG_POINTER(0);
|
|
|
|
|
}
|
SQL/JSON constructors
This patch introduces the SQL/JSON standard constructors for JSON:
JSON()
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
For the most part these functions provide facilities that mimic
existing json/jsonb functions. However, they also offer some useful
additional functionality. In addition to text input, the JSON() function
accepts bytea input, which it will decode and constuct a json value from.
The other functions provide useful options for handling duplicate keys
and null values.
This series of patches will be followed by a consolidated documentation
patch.
Nikita Glukhov
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
4 years ago
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
if (absent_on_null && PG_ARGISNULL(1))
|
|
|
|
|
PG_RETURN_POINTER(state);
|
|
|
|
|
|
|
|
|
|
if (state->str->len > 1)
|
|
|
|
|
appendStringInfoString(state->str, ", ");
|
|
|
|
|
|
|
|
|
|
/* fast path for NULLs */
|
|
|
|
|
if (PG_ARGISNULL(1))
|
|
|
|
|
{
|
|
|
|
|
datum_to_json((Datum) 0, true, state->str, JSONTYPE_NULL,
|
|
|
|
|
InvalidOid, false);
|
|
|
|
|
PG_RETURN_POINTER(state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
val = PG_GETARG_DATUM(1);
|
|
|
|
|
|
Get rid of bogus dependency on typcategory in to_json() and friends.
These functions were relying on typcategory to identify arrays and
composites, which is not reliable and not the normal way to do it.
Using typcategory to identify boolean, numeric types, and json itself is
also pretty questionable, though the code in those cases didn't seem to be
at risk of anything worse than wrong output. Instead, use the standard
lsyscache functions to identify arrays and composites, and rely on a direct
check of the type OID for the other cases.
In HEAD, also be sure to look through domains so that a domain is treated
the same as its base type for conversions to JSON. However, this is a
small behavioral change; given the lack of field complaints, we won't
back-patch it.
In passing, refactor so that there's only one copy of the code that decides
which conversion strategy to apply, not multiple copies that could (and
have) gotten out of sync.
12 years ago
|
|
|
/* add some whitespace if structured type and not first item */
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
if (!PG_ARGISNULL(0) && state->str->len > 1 &&
|
|
|
|
|
(state->val_category == JSONTYPE_ARRAY ||
|
|
|
|
|
state->val_category == JSONTYPE_COMPOSITE))
|
|
|
|
|
{
|
|
|
|
|
appendStringInfoString(state->str, "\n ");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
datum_to_json(val, false, state->str, state->val_category,
|
|
|
|
|
state->val_output_func, false);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The transition type for json_agg() is declared to be "internal", which
|
|
|
|
|
* is a pass-by-value type the same size as a pointer. So we can safely
|
|
|
|
|
* pass the JsonAggState pointer through nodeAgg.c's machinations.
|
|
|
|
|
*/
|
|
|
|
|
PG_RETURN_POINTER(state);
|
|
|
|
|
}
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* json_agg aggregate function
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_agg_transfn(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
return json_agg_transfn_worker(fcinfo, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* json_agg_strict aggregate function
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_agg_strict_transfn(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
return json_agg_transfn_worker(fcinfo, true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* json_agg final function
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_agg_finalfn(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
JsonAggState *state;
|
|
|
|
|
|
|
|
|
|
/* cannot be called directly because of internal-type argument */
|
|
|
|
|
Assert(AggCheckCallContext(fcinfo, NULL));
|
|
|
|
|
|
|
|
|
|
state = PG_ARGISNULL(0) ?
|
|
|
|
|
NULL :
|
|
|
|
|
(JsonAggState *) PG_GETARG_POINTER(0);
|
|
|
|
|
|
|
|
|
|
/* NULL result for no rows in, as is standard with aggregates */
|
|
|
|
|
if (state == NULL)
|
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
|
|
|
|
|
/* Else return state with appropriate array terminator added */
|
|
|
|
|
PG_RETURN_TEXT_P(catenate_stringinfo_string(state->str, "]"));
|
|
|
|
|
}
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
/* Functions implementing hash table for key uniqueness check */
|
|
|
|
|
static uint32
|
|
|
|
|
json_unique_hash(const void *key, Size keysize)
|
|
|
|
|
{
|
|
|
|
|
const JsonUniqueHashEntry *entry = (JsonUniqueHashEntry *) key;
|
|
|
|
|
uint32 hash = hash_bytes_uint32(entry->object_id);
|
|
|
|
|
|
|
|
|
|
hash ^= hash_bytes((const unsigned char *) entry->key, entry->key_len);
|
|
|
|
|
|
|
|
|
|
return DatumGetUInt32(hash);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
json_unique_hash_match(const void *key1, const void *key2, Size keysize)
|
|
|
|
|
{
|
|
|
|
|
const JsonUniqueHashEntry *entry1 = (const JsonUniqueHashEntry *) key1;
|
|
|
|
|
const JsonUniqueHashEntry *entry2 = (const JsonUniqueHashEntry *) key2;
|
|
|
|
|
|
|
|
|
|
if (entry1->object_id != entry2->object_id)
|
|
|
|
|
return entry1->object_id > entry2->object_id ? 1 : -1;
|
|
|
|
|
|
|
|
|
|
if (entry1->key_len != entry2->key_len)
|
|
|
|
|
return entry1->key_len > entry2->key_len ? 1 : -1;
|
|
|
|
|
|
|
|
|
|
return strncmp(entry1->key, entry2->key, entry1->key_len);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Uniqueness detection support.
|
|
|
|
|
*
|
|
|
|
|
* In order to detect uniqueness during building or parsing of a JSON
|
|
|
|
|
* object, we maintain a hash table of key names already seen.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
json_unique_check_init(JsonUniqueCheckState *cxt)
|
|
|
|
|
{
|
|
|
|
|
HASHCTL ctl;
|
|
|
|
|
|
|
|
|
|
memset(&ctl, 0, sizeof(ctl));
|
|
|
|
|
ctl.keysize = sizeof(JsonUniqueHashEntry);
|
|
|
|
|
ctl.entrysize = sizeof(JsonUniqueHashEntry);
|
|
|
|
|
ctl.hcxt = CurrentMemoryContext;
|
|
|
|
|
ctl.hash = json_unique_hash;
|
|
|
|
|
ctl.match = json_unique_hash_match;
|
|
|
|
|
|
|
|
|
|
*cxt = hash_create("json object hashtable",
|
|
|
|
|
32,
|
|
|
|
|
&ctl,
|
|
|
|
|
HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION | HASH_COMPARE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
json_unique_builder_init(JsonUniqueBuilderState *cxt)
|
|
|
|
|
{
|
|
|
|
|
json_unique_check_init(&cxt->check);
|
|
|
|
|
cxt->mcxt = CurrentMemoryContext;
|
|
|
|
|
cxt->skipped_keys.data = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
json_unique_check_key(JsonUniqueCheckState *cxt, const char *key, int object_id)
|
|
|
|
|
{
|
|
|
|
|
JsonUniqueHashEntry entry;
|
|
|
|
|
bool found;
|
|
|
|
|
|
|
|
|
|
entry.key = key;
|
|
|
|
|
entry.key_len = strlen(key);
|
|
|
|
|
entry.object_id = object_id;
|
|
|
|
|
|
|
|
|
|
(void) hash_search(*cxt, &entry, HASH_ENTER, &found);
|
|
|
|
|
|
|
|
|
|
return !found;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* On-demand initialization of a throwaway StringInfo. This is used to
|
|
|
|
|
* read a key name that we don't need to store in the output object, for
|
|
|
|
|
* duplicate key detection when the value is NULL.
|
|
|
|
|
*/
|
|
|
|
|
static StringInfo
|
|
|
|
|
json_unique_builder_get_throwawaybuf(JsonUniqueBuilderState *cxt)
|
|
|
|
|
{
|
|
|
|
|
StringInfo out = &cxt->skipped_keys;
|
|
|
|
|
|
|
|
|
|
if (!out->data)
|
|
|
|
|
{
|
|
|
|
|
MemoryContext oldcxt = MemoryContextSwitchTo(cxt->mcxt);
|
|
|
|
|
|
|
|
|
|
initStringInfo(out);
|
|
|
|
|
MemoryContextSwitchTo(oldcxt);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
/* Just reset the string to empty */
|
|
|
|
|
out->len = 0;
|
|
|
|
|
|
|
|
|
|
return out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* json_object_agg transition function.
|
|
|
|
|
*
|
|
|
|
|
* aggregate two input columns as a single json object value.
|
|
|
|
|
*/
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
static Datum
|
|
|
|
|
json_object_agg_transfn_worker(FunctionCallInfo fcinfo,
|
|
|
|
|
bool absent_on_null, bool unique_keys)
|
|
|
|
|
{
|
|
|
|
|
MemoryContext aggcontext,
|
|
|
|
|
oldcontext;
|
|
|
|
|
JsonAggState *state;
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
StringInfo out;
|
|
|
|
|
Datum arg;
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
bool skip;
|
|
|
|
|
int key_offset;
|
|
|
|
|
|
|
|
|
|
if (!AggCheckCallContext(fcinfo, &aggcontext))
|
|
|
|
|
{
|
|
|
|
|
/* cannot be called directly because of internal-type argument */
|
|
|
|
|
elog(ERROR, "json_object_agg_transfn called in non-aggregate context");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (PG_ARGISNULL(0))
|
|
|
|
|
{
|
|
|
|
|
Oid arg_type;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Make the StringInfo in a context where it will persist for the
|
|
|
|
|
* duration of the aggregate call. Switching context is only needed
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
* for this initial step, as the StringInfo and dynahash routines make
|
|
|
|
|
* sure they use the right context to enlarge the object if necessary.
|
|
|
|
|
*/
|
|
|
|
|
oldcontext = MemoryContextSwitchTo(aggcontext);
|
|
|
|
|
state = (JsonAggState *) palloc(sizeof(JsonAggState));
|
|
|
|
|
state->str = makeStringInfo();
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
if (unique_keys)
|
|
|
|
|
json_unique_builder_init(&state->unique_check);
|
|
|
|
|
else
|
|
|
|
|
memset(&state->unique_check, 0, sizeof(state->unique_check));
|
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
|
|
|
|
|
arg_type = get_fn_expr_argtype(fcinfo->flinfo, 1);
|
|
|
|
|
|
|
|
|
|
if (arg_type == InvalidOid)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
9 years ago
|
|
|
errmsg("could not determine data type for argument %d", 1)));
|
|
|
|
|
|
|
|
|
|
json_categorize_type(arg_type, &state->key_category,
|
|
|
|
|
&state->key_output_func);
|
|
|
|
|
|
|
|
|
|
arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2);
|
|
|
|
|
|
|
|
|
|
if (arg_type == InvalidOid)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
9 years ago
|
|
|
errmsg("could not determine data type for argument %d", 2)));
|
|
|
|
|
|
|
|
|
|
json_categorize_type(arg_type, &state->val_category,
|
|
|
|
|
&state->val_output_func);
|
|
|
|
|
|
|
|
|
|
appendStringInfoString(state->str, "{ ");
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
state = (JsonAggState *) PG_GETARG_POINTER(0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Note: since json_object_agg() is declared as taking type "any", the
|
|
|
|
|
* parser will not do any type conversion on unknown-type literals (that
|
|
|
|
|
* is, undecorated strings or NULLs). Such values will arrive here as
|
|
|
|
|
* type UNKNOWN, which fortunately does not matter to us, since
|
|
|
|
|
* unknownout() works fine.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
if (PG_ARGISNULL(1))
|
|
|
|
|
ereport(ERROR,
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
|
|
|
|
|
errmsg("null value not allowed for object key")));
|
|
|
|
|
|
|
|
|
|
/* Skip null values if absent_on_null */
|
|
|
|
|
skip = absent_on_null && PG_ARGISNULL(2);
|
|
|
|
|
|
|
|
|
|
if (skip)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* We got a NULL value and we're not storing those; if we're not
|
|
|
|
|
* testing key uniqueness, we're done. If we are, use the throwaway
|
|
|
|
|
* buffer to store the key name so that we can check it.
|
|
|
|
|
*/
|
|
|
|
|
if (!unique_keys)
|
|
|
|
|
PG_RETURN_POINTER(state);
|
|
|
|
|
|
|
|
|
|
out = json_unique_builder_get_throwawaybuf(&state->unique_check);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
out = state->str;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Append comma delimiter only if we have already output some fields
|
|
|
|
|
* after the initial string "{ ".
|
|
|
|
|
*/
|
|
|
|
|
if (out->len > 2)
|
|
|
|
|
appendStringInfoString(out, ", ");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
arg = PG_GETARG_DATUM(1);
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
key_offset = out->len;
|
|
|
|
|
|
|
|
|
|
datum_to_json(arg, false, out, state->key_category,
|
|
|
|
|
state->key_output_func, true);
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
if (unique_keys)
|
|
|
|
|
{
|
|
|
|
|
const char *key = &out->data[key_offset];
|
|
|
|
|
|
|
|
|
|
if (!json_unique_check_key(&state->unique_check.check, key, 0))
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
errcode(ERRCODE_DUPLICATE_JSON_OBJECT_KEY_VALUE),
|
|
|
|
|
errmsg("duplicate JSON key %s", key));
|
|
|
|
|
|
|
|
|
|
if (skip)
|
|
|
|
|
PG_RETURN_POINTER(state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
appendStringInfoString(state->str, " : ");
|
|
|
|
|
|
|
|
|
|
if (PG_ARGISNULL(2))
|
|
|
|
|
arg = (Datum) 0;
|
|
|
|
|
else
|
|
|
|
|
arg = PG_GETARG_DATUM(2);
|
|
|
|
|
|
|
|
|
|
datum_to_json(arg, PG_ARGISNULL(2), state->str, state->val_category,
|
|
|
|
|
state->val_output_func, false);
|
|
|
|
|
|
|
|
|
|
PG_RETURN_POINTER(state);
|
|
|
|
|
}
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
/*
|
|
|
|
|
* json_object_agg aggregate function
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_object_agg_transfn(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
return json_object_agg_transfn_worker(fcinfo, false, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* json_object_agg_strict aggregate function
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_object_agg_strict_transfn(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
return json_object_agg_transfn_worker(fcinfo, true, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* json_object_agg_unique aggregate function
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_object_agg_unique_transfn(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
return json_object_agg_transfn_worker(fcinfo, false, true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* json_object_agg_unique_strict aggregate function
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_object_agg_unique_strict_transfn(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
return json_object_agg_transfn_worker(fcinfo, true, true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* json_object_agg final function.
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_object_agg_finalfn(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
JsonAggState *state;
|
|
|
|
|
|
|
|
|
|
/* cannot be called directly because of internal-type argument */
|
|
|
|
|
Assert(AggCheckCallContext(fcinfo, NULL));
|
|
|
|
|
|
|
|
|
|
state = PG_ARGISNULL(0) ? NULL : (JsonAggState *) PG_GETARG_POINTER(0);
|
|
|
|
|
|
|
|
|
|
/* NULL result for no rows in, as is standard with aggregates */
|
|
|
|
|
if (state == NULL)
|
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
|
|
|
|
|
/* Else return state with appropriate object terminator added */
|
|
|
|
|
PG_RETURN_TEXT_P(catenate_stringinfo_string(state->str, " }"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Helper function for aggregates: return given StringInfo's contents plus
|
|
|
|
|
* specified trailing string, as a text datum. We need this because aggregate
|
|
|
|
|
* final functions are not allowed to modify the aggregate state.
|
|
|
|
|
*/
|
|
|
|
|
static text *
|
|
|
|
|
catenate_stringinfo_string(StringInfo buffer, const char *addon)
|
|
|
|
|
{
|
|
|
|
|
/* custom version of cstring_to_text_with_len */
|
|
|
|
|
int buflen = buffer->len;
|
|
|
|
|
int addlen = strlen(addon);
|
|
|
|
|
text *result = (text *) palloc(buflen + addlen + VARHDRSZ);
|
|
|
|
|
|
|
|
|
|
SET_VARSIZE(result, buflen + addlen + VARHDRSZ);
|
|
|
|
|
memcpy(VARDATA(result), buffer->data, buflen);
|
|
|
|
|
memcpy(VARDATA(result) + buflen, addon, addlen);
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Datum
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
json_build_object_worker(int nargs, Datum *args, bool *nulls, Oid *types,
|
|
|
|
|
bool absent_on_null, bool unique_keys)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
const char *sep = "";
|
|
|
|
|
StringInfo result;
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
JsonUniqueBuilderState unique_check;
|
|
|
|
|
|
|
|
|
|
if (nargs % 2 != 0)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
|
errmsg("argument list must have even number of elements"),
|
|
|
|
|
/* translator: %s is a SQL function name */
|
|
|
|
|
errhint("The arguments of %s must consist of alternating keys and values.",
|
|
|
|
|
"json_build_object()")));
|
|
|
|
|
|
|
|
|
|
result = makeStringInfo();
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(result, '{');
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
if (unique_keys)
|
|
|
|
|
json_unique_builder_init(&unique_check);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nargs; i += 2)
|
|
|
|
|
{
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
StringInfo out;
|
|
|
|
|
bool skip;
|
|
|
|
|
int key_offset;
|
|
|
|
|
|
|
|
|
|
/* Skip null values if absent_on_null */
|
|
|
|
|
skip = absent_on_null && nulls[i + 1];
|
|
|
|
|
|
|
|
|
|
if (skip)
|
|
|
|
|
{
|
|
|
|
|
/* If key uniqueness check is needed we must save skipped keys */
|
|
|
|
|
if (!unique_keys)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
out = json_unique_builder_get_throwawaybuf(&unique_check);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
appendStringInfoString(result, sep);
|
|
|
|
|
sep = ", ";
|
|
|
|
|
out = result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* process key */
|
|
|
|
|
if (nulls[i])
|
|
|
|
|
ereport(ERROR,
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
|
|
|
|
|
errmsg("null value not allowed for object key")));
|
|
|
|
|
|
|
|
|
|
/* save key offset before appending it */
|
|
|
|
|
key_offset = out->len;
|
|
|
|
|
|
|
|
|
|
add_json(args[i], false, out, types[i], true);
|
|
|
|
|
|
|
|
|
|
if (unique_keys)
|
|
|
|
|
{
|
|
|
|
|
/* check key uniqueness after key appending */
|
|
|
|
|
const char *key = &out->data[key_offset];
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
if (!json_unique_check_key(&unique_check.check, key, 0))
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
errcode(ERRCODE_DUPLICATE_JSON_OBJECT_KEY_VALUE),
|
|
|
|
|
errmsg("duplicate JSON key %s", key));
|
|
|
|
|
|
|
|
|
|
if (skip)
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
appendStringInfoString(result, " : ");
|
|
|
|
|
|
|
|
|
|
/* process value */
|
|
|
|
|
add_json(args[i + 1], nulls[i + 1], result, types[i + 1], false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(result, '}');
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
return PointerGetDatum(cstring_to_text_with_len(result->data, result->len));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SQL function json_build_object(variadic "any")
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_build_object(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
Datum *args;
|
|
|
|
|
bool *nulls;
|
|
|
|
|
Oid *types;
|
|
|
|
|
|
|
|
|
|
/* build argument values to build the object */
|
|
|
|
|
int nargs = extract_variadic_args(fcinfo, 0, true,
|
|
|
|
|
&args, &types, &nulls);
|
|
|
|
|
|
|
|
|
|
if (nargs < 0)
|
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
|
|
|
|
|
PG_RETURN_DATUM(json_build_object_worker(nargs, args, nulls, types, false, false));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* degenerate case of json_build_object where it gets 0 arguments.
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_build_object_noargs(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Datum
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
json_build_array_worker(int nargs, Datum *args, bool *nulls, Oid *types,
|
|
|
|
|
bool absent_on_null)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
const char *sep = "";
|
|
|
|
|
StringInfo result;
|
|
|
|
|
|
|
|
|
|
result = makeStringInfo();
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(result, '[');
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nargs; i++)
|
|
|
|
|
{
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
if (absent_on_null && nulls[i])
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
appendStringInfoString(result, sep);
|
|
|
|
|
sep = ", ";
|
|
|
|
|
add_json(args[i], nulls[i], result, types[i], false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(result, ']');
|
|
|
|
|
|
SQL/JSON: add standard JSON constructor functions
This commit introduces the SQL/JSON standard-conforming constructors for
JSON types:
JSON_ARRAY()
JSON_ARRAYAGG()
JSON_OBJECT()
JSON_OBJECTAGG()
Most of the functionality was already present in PostgreSQL-specific
functions, but these include some new functionality such as the ability
to skip or include NULL values, and to allow duplicate keys or throw
error when they are found, as well as the standard specified syntax to
specify output type and format.
Author: Nikita Glukhov <n.gluhov@postgrespro.ru>
Author: Teodor Sigaev <teodor@sigaev.ru>
Author: Oleg Bartunov <obartunov@gmail.com>
Author: Alexander Korotkov <aekorotkov@gmail.com>
Author: Amit Langote <amitlangote09@gmail.com>
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/CAF4Au4w2x-5LTnN_bxky-mq4=WOqsGsxSpENCzHRAzSnEd8+WQ@mail.gmail.com
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
Discussion: https://postgr.es/m/20220616233130.rparivafipt6doj3@alap3.anarazel.de
Discussion: https://postgr.es/m/abd9b83b-aa66-f230-3d6d-734817f0995d%40postgresql.org
3 years ago
|
|
|
return PointerGetDatum(cstring_to_text_with_len(result->data, result->len));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SQL function json_build_array(variadic "any")
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_build_array(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
Datum *args;
|
|
|
|
|
bool *nulls;
|
|
|
|
|
Oid *types;
|
|
|
|
|
|
|
|
|
|
/* build argument values to build the object */
|
|
|
|
|
int nargs = extract_variadic_args(fcinfo, 0, true,
|
|
|
|
|
&args, &types, &nulls);
|
|
|
|
|
|
|
|
|
|
if (nargs < 0)
|
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
|
|
|
|
|
PG_RETURN_DATUM(json_build_array_worker(nargs, args, nulls, types, false));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* degenerate case of json_build_array where it gets 0 arguments.
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_build_array_noargs(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
PG_RETURN_TEXT_P(cstring_to_text_with_len("[]", 2));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SQL function json_object(text[])
|
|
|
|
|
*
|
|
|
|
|
* take a one or two dimensional array of text as key/value pairs
|
|
|
|
|
* for a json object.
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_object(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
ArrayType *in_array = PG_GETARG_ARRAYTYPE_P(0);
|
|
|
|
|
int ndims = ARR_NDIM(in_array);
|
|
|
|
|
StringInfoData result;
|
|
|
|
|
Datum *in_datums;
|
|
|
|
|
bool *in_nulls;
|
|
|
|
|
int in_count,
|
|
|
|
|
count,
|
|
|
|
|
i;
|
|
|
|
|
text *rval;
|
|
|
|
|
char *v;
|
|
|
|
|
|
|
|
|
|
switch (ndims)
|
|
|
|
|
{
|
|
|
|
|
case 0:
|
|
|
|
|
PG_RETURN_DATUM(CStringGetTextDatum("{}"));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
|
if ((ARR_DIMS(in_array)[0]) % 2)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
|
|
|
|
errmsg("array must have even number of elements")));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
|
if ((ARR_DIMS(in_array)[1]) != 2)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
|
|
|
|
errmsg("array must have two columns")));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
|
|
|
|
errmsg("wrong number of array subscripts")));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
deconstruct_array_builtin(in_array, TEXTOID, &in_datums, &in_nulls, &in_count);
|
|
|
|
|
|
|
|
|
|
count = in_count / 2;
|
|
|
|
|
|
|
|
|
|
initStringInfo(&result);
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(&result, '{');
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < count; ++i)
|
|
|
|
|
{
|
|
|
|
|
if (in_nulls[i * 2])
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
|
|
|
|
|
errmsg("null value not allowed for object key")));
|
|
|
|
|
|
|
|
|
|
v = TextDatumGetCString(in_datums[i * 2]);
|
|
|
|
|
if (i > 0)
|
|
|
|
|
appendStringInfoString(&result, ", ");
|
|
|
|
|
escape_json(&result, v);
|
|
|
|
|
appendStringInfoString(&result, " : ");
|
|
|
|
|
pfree(v);
|
|
|
|
|
if (in_nulls[i * 2 + 1])
|
|
|
|
|
appendStringInfoString(&result, "null");
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
v = TextDatumGetCString(in_datums[i * 2 + 1]);
|
|
|
|
|
escape_json(&result, v);
|
|
|
|
|
pfree(v);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(&result, '}');
|
|
|
|
|
|
|
|
|
|
pfree(in_datums);
|
|
|
|
|
pfree(in_nulls);
|
|
|
|
|
|
|
|
|
|
rval = cstring_to_text_with_len(result.data, result.len);
|
|
|
|
|
pfree(result.data);
|
|
|
|
|
|
|
|
|
|
PG_RETURN_TEXT_P(rval);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SQL function json_object(text[], text[])
|
|
|
|
|
*
|
|
|
|
|
* take separate key and value arrays of text to construct a json object
|
|
|
|
|
* pairwise.
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_object_two_arg(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(0);
|
|
|
|
|
ArrayType *val_array = PG_GETARG_ARRAYTYPE_P(1);
|
|
|
|
|
int nkdims = ARR_NDIM(key_array);
|
|
|
|
|
int nvdims = ARR_NDIM(val_array);
|
|
|
|
|
StringInfoData result;
|
|
|
|
|
Datum *key_datums,
|
|
|
|
|
*val_datums;
|
|
|
|
|
bool *key_nulls,
|
|
|
|
|
*val_nulls;
|
|
|
|
|
int key_count,
|
|
|
|
|
val_count,
|
|
|
|
|
i;
|
|
|
|
|
text *rval;
|
|
|
|
|
char *v;
|
|
|
|
|
|
|
|
|
|
if (nkdims > 1 || nkdims != nvdims)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
|
|
|
|
errmsg("wrong number of array subscripts")));
|
|
|
|
|
|
|
|
|
|
if (nkdims == 0)
|
|
|
|
|
PG_RETURN_DATUM(CStringGetTextDatum("{}"));
|
|
|
|
|
|
|
|
|
|
deconstruct_array_builtin(key_array, TEXTOID, &key_datums, &key_nulls, &key_count);
|
|
|
|
|
deconstruct_array_builtin(val_array, TEXTOID, &val_datums, &val_nulls, &val_count);
|
|
|
|
|
|
|
|
|
|
if (key_count != val_count)
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
|
|
|
|
errmsg("mismatched array dimensions")));
|
|
|
|
|
|
|
|
|
|
initStringInfo(&result);
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(&result, '{');
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < key_count; ++i)
|
|
|
|
|
{
|
|
|
|
|
if (key_nulls[i])
|
|
|
|
|
ereport(ERROR,
|
|
|
|
|
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
|
|
|
|
|
errmsg("null value not allowed for object key")));
|
|
|
|
|
|
|
|
|
|
v = TextDatumGetCString(key_datums[i]);
|
|
|
|
|
if (i > 0)
|
|
|
|
|
appendStringInfoString(&result, ", ");
|
|
|
|
|
escape_json(&result, v);
|
|
|
|
|
appendStringInfoString(&result, " : ");
|
|
|
|
|
pfree(v);
|
|
|
|
|
if (val_nulls[i])
|
|
|
|
|
appendStringInfoString(&result, "null");
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
v = TextDatumGetCString(val_datums[i]);
|
|
|
|
|
escape_json(&result, v);
|
|
|
|
|
pfree(v);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
appendStringInfoChar(&result, '}');
|
|
|
|
|
|
|
|
|
|
pfree(key_datums);
|
|
|
|
|
pfree(key_nulls);
|
|
|
|
|
pfree(val_datums);
|
|
|
|
|
pfree(val_nulls);
|
|
|
|
|
|
|
|
|
|
rval = cstring_to_text_with_len(result.data, result.len);
|
|
|
|
|
pfree(result.data);
|
|
|
|
|
|
|
|
|
|
PG_RETURN_TEXT_P(rval);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Produce a JSON string literal, properly escaping characters in the text.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
escape_json(StringInfo buf, const char *str)
|
|
|
|
|
{
|
|
|
|
|
const char *p;
|
|
|
|
|
|
|
|
|
|
appendStringInfoCharMacro(buf, '"');
|
|
|
|
|
for (p = str; *p; p++)
|
|
|
|
|
{
|
|
|
|
|
switch (*p)
|
|
|
|
|
{
|
|
|
|
|
case '\b':
|
|
|
|
|
appendStringInfoString(buf, "\\b");
|
|
|
|
|
break;
|
|
|
|
|
case '\f':
|
|
|
|
|
appendStringInfoString(buf, "\\f");
|
|
|
|
|
break;
|
|
|
|
|
case '\n':
|
|
|
|
|
appendStringInfoString(buf, "\\n");
|
|
|
|
|
break;
|
|
|
|
|
case '\r':
|
|
|
|
|
appendStringInfoString(buf, "\\r");
|
|
|
|
|
break;
|
|
|
|
|
case '\t':
|
|
|
|
|
appendStringInfoString(buf, "\\t");
|
|
|
|
|
break;
|
|
|
|
|
case '"':
|
|
|
|
|
appendStringInfoString(buf, "\\\"");
|
|
|
|
|
break;
|
|
|
|
|
case '\\':
|
Fix jsonb Unicode escape processing, and in consequence disallow \u0000.
We've been trying to support \u0000 in JSON values since commit
78ed8e03c67d7333, and have introduced increasingly worse hacks to try to
make it work, such as commit 0ad1a816320a2b53. However, it fundamentally
can't work in the way envisioned, because the stored representation looks
the same as for \\u0000 which is not the same thing at all. It's also
entirely bogus to output \u0000 when de-escaped output is called for.
The right way to do this would be to store an actual 0x00 byte, and then
throw error only if asked to produce de-escaped textual output. However,
getting to that point seems likely to take considerable work and may well
never be practical in the 9.4.x series.
To preserve our options for better behavior while getting rid of the nasty
side-effects of 0ad1a816320a2b53, revert that commit in toto and instead
throw error if \u0000 is used in a context where it needs to be de-escaped.
(These are the same contexts where non-ASCII Unicode escapes throw error
if the database encoding isn't UTF8, so this behavior is by no means
without precedent.)
In passing, make both the \u0000 case and the non-ASCII Unicode case report
ERRCODE_UNTRANSLATABLE_CHARACTER / "unsupported Unicode escape sequence"
rather than claiming there's something wrong with the input syntax.
Back-patch to 9.4, where we have to do something because 0ad1a816320a2b53
broke things for many cases having nothing to do with \u0000. 9.3 also has
bogus behavior, but only for that specific escape value, so given the lack
of field complaints it seems better to leave 9.3 alone.
11 years ago
|
|
|
appendStringInfoString(buf, "\\\\");
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
if ((unsigned char) *p < ' ')
|
|
|
|
|
appendStringInfo(buf, "\\u%04x", (int) *p);
|
|
|
|
|
else
|
|
|
|
|
appendStringInfoCharMacro(buf, *p);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
appendStringInfoCharMacro(buf, '"');
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SQL function json_typeof(json) -> text
|
|
|
|
|
*
|
|
|
|
|
* Returns the type of the outermost JSON value as TEXT. Possible types are
|
|
|
|
|
* "object", "array", "string", "number", "boolean", and "null".
|
|
|
|
|
*
|
|
|
|
|
* Performs a single call to json_lex() to get the first token of the supplied
|
|
|
|
|
* value. This initial token uniquely determines the value's type. As our
|
|
|
|
|
* input must already have been validated by json_in() or json_recv(), the
|
|
|
|
|
* initial token should never be JSON_TOKEN_OBJECT_END, JSON_TOKEN_ARRAY_END,
|
|
|
|
|
* JSON_TOKEN_COLON, JSON_TOKEN_COMMA, or JSON_TOKEN_END.
|
|
|
|
|
*/
|
|
|
|
|
Datum
|
|
|
|
|
json_typeof(PG_FUNCTION_ARGS)
|
|
|
|
|
{
|
|
|
|
|
text *json;
|
|
|
|
|
|
|
|
|
|
JsonLexContext *lex;
|
IS JSON predicate
This patch intrdocuces the SQL standard IS JSON predicate. It operates
on text and bytea values representing JSON as well as on the json and
jsonb types. Each test has an IS and IS NOT variant. The tests are:
IS JSON [VALUE]
IS JSON ARRAY
IS JSON OBJECT
IS JSON SCALAR
IS JSON WITH | WITHOUT UNIQUE KEYS
These are mostly self-explanatory, but note that IS JSON WITHOUT UNIQUE
KEYS is true whenever IS JSON is true, and IS JSON WITH UNIQUE KEYS is
true whenever IS JSON is true except it IS JSON OBJECT is true and there
are duplicate keys (which is never the case when applied to jsonb values).
Nikita Glukhov
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
4 years ago
|
|
|
JsonTokenType tok;
|
|
|
|
|
char *type;
|
|
|
|
|
JsonParseErrorType result;
|
Introduce jsonb, a structured format for storing json.
The new format accepts exactly the same data as the json type. However, it is
stored in a format that does not require reparsing the orgiginal text in order
to process it, making it much more suitable for indexing and other operations.
Insignificant whitespace is discarded, and the order of object keys is not
preserved. Neither are duplicate object keys kept - the later value for a given
key is the only one stored.
The new type has all the functions and operators that the json type has,
with the exception of the json generation functions (to_json, json_agg etc.)
and with identical semantics. In addition, there are operator classes for
hash and btree indexing, and two classes for GIN indexing, that have no
equivalent in the json type.
This feature grew out of previous work by Oleg Bartunov and Teodor Sigaev, which
was intended to provide similar facilities to a nested hstore type, but which
in the end proved to have some significant compatibility issues.
Authors: Oleg Bartunov, Teodor Sigaev, Peter Geoghegan and Andrew Dunstan.
Review: Andres Freund
12 years ago
|
|
|
|
|
|
|
|
json = PG_GETARG_TEXT_PP(0);
|
|
|
|
|
lex = makeJsonLexContext(json, false);
|
IS JSON predicate
This patch intrdocuces the SQL standard IS JSON predicate. It operates
on text and bytea values representing JSON as well as on the json and
jsonb types. Each test has an IS and IS NOT variant. The tests are:
IS JSON [VALUE]
IS JSON ARRAY
IS JSON OBJECT
IS JSON SCALAR
IS JSON WITH | WITHOUT UNIQUE KEYS
These are mostly self-explanatory, but note that IS JSON WITHOUT UNIQUE
KEYS is true whenever IS JSON is true, and IS JSON WITH UNIQUE KEYS is
true whenever IS JSON is true except it IS JSON OBJECT is true and there
are duplicate keys (which is never the case when applied to jsonb values).
Nikita Glukhov
Reviewers have included (in no particular order) Andres Freund, Alexander
Korotkov, Pavel Stehule, Andrew Alsup, Erik Rijkers, Zihong Yu,
Himanshu Upadhyaya, Daniel Gustafsson, Justin Pryzby.
Discussion: https://postgr.es/m/cd0bb935-0158-78a7-08b5-904886deac4b@postgrespro.ru
4 years ago
|
|
|
|
|
|
|
|
/* Lex exactly one token from the input and check its type. */
|
|
|
|
|
result = json_lex(lex);
|
|
|
|
|
if (result != JSON_SUCCESS)
|
|
|
|
|
json_errsave_error(result, lex, NULL);
|
|
|
|
|
tok = lex->token_type;
|
|
|
|
|
switch (tok)
|
|
|
|
|
{
|
|
|
|
|
case JSON_TOKEN_OBJECT_START:
|
|
|
|
|
type = "object";
|
|
|
|
|
break;
|
|
|
|
|
case JSON_TOKEN_ARRAY_START:
|
|
|
|
|
type = "array";
|
|
|
|
|
break;
|
|
|
|
|
case JSON_TOKEN_STRING:
|
|
|
|
|
type = "string";
|
|
|
|
|
break;
|
|
|
|
|
case JSON_TOKEN_NUMBER:
|
|
|
|
|
type = "number";
|
|
|
|
|
break;
|
|
|
|
|
case JSON_TOKEN_TRUE:
|
|
|
|
|
case JSON_TOKEN_FALSE:
|
|
|
|
|
type = "boolean";
|
|
|
|
|
break;
|
|
|
|
|
case JSON_TOKEN_NULL:
|
|
|
|
|
type = "null";
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
elog(ERROR, "unexpected json token: %d", tok);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
PG_RETURN_TEXT_P(cstring_to_text(type));
|
|
|
|
|
}
|