|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* makefuncs.c
|
|
|
|
* creator functions for various nodes. The functions here are for the
|
|
|
|
* most frequently created nodes.
|
|
|
|
*
|
|
|
|
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
|
|
|
* src/backend/nodes/makefuncs.c
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
|
|
#include "catalog/pg_class.h"
|
|
|
|
#include "catalog/pg_type.h"
|
|
|
|
#include "nodes/makefuncs.h"
|
|
|
|
#include "nodes/nodeFuncs.h"
|
|
|
|
#include "utils/lsyscache.h"
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeA_Expr -
|
|
|
|
* makes an A_Expr node
|
|
|
|
*/
|
|
|
|
A_Expr *
|
|
|
|
makeA_Expr(A_Expr_Kind kind, List *name,
|
|
|
|
Node *lexpr, Node *rexpr, int location)
|
|
|
|
{
|
|
|
|
A_Expr *a = makeNode(A_Expr);
|
|
|
|
|
|
|
|
a->kind = kind;
|
|
|
|
a->name = name;
|
|
|
|
a->lexpr = lexpr;
|
|
|
|
a->rexpr = rexpr;
|
|
|
|
a->location = location;
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeSimpleA_Expr -
|
|
|
|
* As above, given a simple (unqualified) operator name
|
|
|
|
*/
|
|
|
|
A_Expr *
|
|
|
|
makeSimpleA_Expr(A_Expr_Kind kind, char *name,
|
|
|
|
Node *lexpr, Node *rexpr, int location)
|
|
|
|
{
|
|
|
|
A_Expr *a = makeNode(A_Expr);
|
|
|
|
|
|
|
|
a->kind = kind;
|
|
|
|
a->name = list_make1(makeString((char *) name));
|
|
|
|
a->lexpr = lexpr;
|
|
|
|
a->rexpr = rexpr;
|
|
|
|
a->location = location;
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeVar -
|
|
|
|
* creates a Var node
|
|
|
|
*/
|
|
|
|
Var *
|
|
|
|
makeVar(Index varno,
|
|
|
|
AttrNumber varattno,
|
|
|
|
Oid vartype,
|
|
|
|
int32 vartypmod,
|
|
|
|
Oid varcollid,
|
|
|
|
Index varlevelsup)
|
|
|
|
{
|
|
|
|
Var *var = makeNode(Var);
|
|
|
|
|
|
|
|
var->varno = varno;
|
|
|
|
var->varattno = varattno;
|
|
|
|
var->vartype = vartype;
|
|
|
|
var->vartypmod = vartypmod;
|
|
|
|
var->varcollid = varcollid;
|
|
|
|
var->varlevelsup = varlevelsup;
|
|
|
|
|
|
|
|
/*
|
Reconsider the representation of join alias Vars.
The core idea of this patch is to make the parser generate join alias
Vars (that is, ones with varno pointing to a JOIN RTE) only when the
alias Var is actually different from any raw join input, that is a type
coercion and/or COALESCE is necessary to generate the join output value.
Otherwise just generate varno/varattno pointing to the relevant join
input column.
In effect, this means that the planner's flatten_join_alias_vars()
transformation is already done in the parser, for all cases except
(a) columns that are merged by JOIN USING and are transformed in the
process, and (b) whole-row join Vars. In principle that would allow
us to skip doing flatten_join_alias_vars() in many more queries than
we do now, but we don't have quite enough infrastructure to know that
we can do so --- in particular there's no cheap way to know whether
there are any whole-row join Vars. I'm not sure if it's worth the
trouble to add a Query-level flag for that, and in any case it seems
like fit material for a separate patch. But even without skipping the
work entirely, this should make flatten_join_alias_vars() faster,
particularly where there are nested joins that it previously had to
flatten recursively.
An essential part of this change is to replace Var nodes'
varnoold/varoattno fields with varnosyn/varattnosyn, which have
considerably more tightly-defined meanings than the old fields: when
they differ from varno/varattno, they identify the Var's position in
an aliased JOIN RTE, and the join alias is what ruleutils.c should
print for the Var. This is necessary because the varno change
destroyed ruleutils.c's ability to find the JOIN RTE from the Var's
varno.
Another way in which this change broke ruleutils.c is that it's no
longer feasible to determine, from a JOIN RTE's joinaliasvars list,
which join columns correspond to which columns of the join's immediate
input relations. (If those are sub-joins, the joinaliasvars entries
may point to columns of their base relations, not the sub-joins.)
But that was a horrid mess requiring a lot of fragile assumptions
already, so let's just bite the bullet and add some more JOIN RTE
fields to make it more straightforward to figure that out. I added
two integer-List fields containing the relevant column numbers from
the left and right input rels, plus a count of how many merged columns
there are.
This patch depends on the ParseNamespaceColumn infrastructure that
I added in commit 5815696bc. The biggest bit of code change is
restructuring transformFromClauseItem's handling of JOINs so that
the ParseNamespaceColumn data is propagated upward correctly.
Other than that and the ruleutils fixes, everything pretty much
just works, though some processing is now inessential. I grabbed
two pieces of low-hanging fruit in that line:
1. In find_expr_references, we don't need to recurse into join alias
Vars anymore. There aren't any except for references to merged USING
columns, which are more properly handled when we scan the join's RTE.
This change actually fixes an edge-case issue: we will now record a
dependency on any type-coercion function present in a USING column's
joinaliasvar, even if that join column has no references in the query
text. The odds of the missing dependency causing a problem seem quite
small: you'd have to posit somebody dropping an implicit cast between
two data types, without removing the types themselves, and then having
a stored rule containing a whole-row Var for a join whose USING merge
depends on that cast. So I don't feel a great need to change this in
the back branches. But in theory this way is more correct.
2. markRTEForSelectPriv and markTargetListOrigin don't need to recurse
into join alias Vars either, because the cases they care about don't
apply to alias Vars for USING columns that are semantically distinct
from the underlying columns. This removes the only case in which
markVarForSelectPriv could be called with NULL for the RTE, so adjust
the comments to describe that hack as being strictly internal to
markRTEForSelectPriv.
catversion bump required due to changes in stored rules.
Discussion: https://postgr.es/m/7115.1577986646@sss.pgh.pa.us
6 years ago
|
|
|
* Only a few callers need to make Var nodes with varnosyn/varattnosyn
|
|
|
|
* different from varno/varattno. We don't provide separate arguments for
|
|
|
|
* them, but just initialize them to the given varno/varattno. This
|
|
|
|
* reduces code clutter and chance of error for most callers.
|
|
|
|
*/
|
Reconsider the representation of join alias Vars.
The core idea of this patch is to make the parser generate join alias
Vars (that is, ones with varno pointing to a JOIN RTE) only when the
alias Var is actually different from any raw join input, that is a type
coercion and/or COALESCE is necessary to generate the join output value.
Otherwise just generate varno/varattno pointing to the relevant join
input column.
In effect, this means that the planner's flatten_join_alias_vars()
transformation is already done in the parser, for all cases except
(a) columns that are merged by JOIN USING and are transformed in the
process, and (b) whole-row join Vars. In principle that would allow
us to skip doing flatten_join_alias_vars() in many more queries than
we do now, but we don't have quite enough infrastructure to know that
we can do so --- in particular there's no cheap way to know whether
there are any whole-row join Vars. I'm not sure if it's worth the
trouble to add a Query-level flag for that, and in any case it seems
like fit material for a separate patch. But even without skipping the
work entirely, this should make flatten_join_alias_vars() faster,
particularly where there are nested joins that it previously had to
flatten recursively.
An essential part of this change is to replace Var nodes'
varnoold/varoattno fields with varnosyn/varattnosyn, which have
considerably more tightly-defined meanings than the old fields: when
they differ from varno/varattno, they identify the Var's position in
an aliased JOIN RTE, and the join alias is what ruleutils.c should
print for the Var. This is necessary because the varno change
destroyed ruleutils.c's ability to find the JOIN RTE from the Var's
varno.
Another way in which this change broke ruleutils.c is that it's no
longer feasible to determine, from a JOIN RTE's joinaliasvars list,
which join columns correspond to which columns of the join's immediate
input relations. (If those are sub-joins, the joinaliasvars entries
may point to columns of their base relations, not the sub-joins.)
But that was a horrid mess requiring a lot of fragile assumptions
already, so let's just bite the bullet and add some more JOIN RTE
fields to make it more straightforward to figure that out. I added
two integer-List fields containing the relevant column numbers from
the left and right input rels, plus a count of how many merged columns
there are.
This patch depends on the ParseNamespaceColumn infrastructure that
I added in commit 5815696bc. The biggest bit of code change is
restructuring transformFromClauseItem's handling of JOINs so that
the ParseNamespaceColumn data is propagated upward correctly.
Other than that and the ruleutils fixes, everything pretty much
just works, though some processing is now inessential. I grabbed
two pieces of low-hanging fruit in that line:
1. In find_expr_references, we don't need to recurse into join alias
Vars anymore. There aren't any except for references to merged USING
columns, which are more properly handled when we scan the join's RTE.
This change actually fixes an edge-case issue: we will now record a
dependency on any type-coercion function present in a USING column's
joinaliasvar, even if that join column has no references in the query
text. The odds of the missing dependency causing a problem seem quite
small: you'd have to posit somebody dropping an implicit cast between
two data types, without removing the types themselves, and then having
a stored rule containing a whole-row Var for a join whose USING merge
depends on that cast. So I don't feel a great need to change this in
the back branches. But in theory this way is more correct.
2. markRTEForSelectPriv and markTargetListOrigin don't need to recurse
into join alias Vars either, because the cases they care about don't
apply to alias Vars for USING columns that are semantically distinct
from the underlying columns. This removes the only case in which
markVarForSelectPriv could be called with NULL for the RTE, so adjust
the comments to describe that hack as being strictly internal to
markRTEForSelectPriv.
catversion bump required due to changes in stored rules.
Discussion: https://postgr.es/m/7115.1577986646@sss.pgh.pa.us
6 years ago
|
|
|
var->varnosyn = varno;
|
|
|
|
var->varattnosyn = varattno;
|
|
|
|
|
|
|
|
/* Likewise, we just set location to "unknown" here */
|
|
|
|
var->location = -1;
|
|
|
|
|
|
|
|
return var;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeVarFromTargetEntry -
|
|
|
|
* convenience function to create a same-level Var node from a
|
|
|
|
* TargetEntry
|
|
|
|
*/
|
|
|
|
Var *
|
|
|
|
makeVarFromTargetEntry(Index varno,
|
|
|
|
TargetEntry *tle)
|
|
|
|
{
|
|
|
|
return makeVar(varno,
|
|
|
|
tle->resno,
|
|
|
|
exprType((Node *) tle->expr),
|
|
|
|
exprTypmod((Node *) tle->expr),
|
|
|
|
exprCollation((Node *) tle->expr),
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeWholeRowVar -
|
|
|
|
* creates a Var node representing a whole row of the specified RTE
|
|
|
|
*
|
|
|
|
* A whole-row reference is a Var with varno set to the correct range
|
|
|
|
* table entry, and varattno == 0 to signal that it references the whole
|
|
|
|
* tuple. (Use of zero here is unclean, since it could easily be confused
|
|
|
|
* with error cases, but it's not worth changing now.) The vartype indicates
|
|
|
|
* a rowtype; either a named composite type, or a domain over a named
|
|
|
|
* composite type (only possible if the RTE is a function returning that),
|
|
|
|
* or RECORD. This function encapsulates the logic for determining the
|
|
|
|
* correct rowtype OID to use.
|
|
|
|
*
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
* If allowScalar is true, then for the case where the RTE is a single function
|
|
|
|
* returning a non-composite result type, we produce a normal Var referencing
|
|
|
|
* the function's result directly, instead of the single-column composite
|
|
|
|
* value that the whole-row notation might otherwise suggest.
|
|
|
|
*/
|
|
|
|
Var *
|
|
|
|
makeWholeRowVar(RangeTblEntry *rte,
|
|
|
|
Index varno,
|
|
|
|
Index varlevelsup,
|
|
|
|
bool allowScalar)
|
|
|
|
{
|
|
|
|
Var *result;
|
|
|
|
Oid toid;
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
Node *fexpr;
|
|
|
|
|
|
|
|
switch (rte->rtekind)
|
|
|
|
{
|
|
|
|
case RTE_RELATION:
|
|
|
|
/* relation: the rowtype is a named composite type */
|
|
|
|
toid = get_rel_type_id(rte->relid);
|
|
|
|
if (!OidIsValid(toid))
|
|
|
|
elog(ERROR, "could not find type OID for relation %u",
|
|
|
|
rte->relid);
|
|
|
|
result = makeVar(varno,
|
|
|
|
InvalidAttrNumber,
|
|
|
|
toid,
|
|
|
|
-1,
|
|
|
|
InvalidOid,
|
|
|
|
varlevelsup);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RTE_FUNCTION:
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
|
|
|
|
/*
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
* If there's more than one function, or ordinality is requested,
|
|
|
|
* force a RECORD result, since there's certainly more than one
|
|
|
|
* column involved and it can't be a known named type.
|
|
|
|
*/
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
if (rte->funcordinality || list_length(rte->functions) != 1)
|
|
|
|
{
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
/* always produces an anonymous RECORD result */
|
|
|
|
result = makeVar(varno,
|
|
|
|
InvalidAttrNumber,
|
|
|
|
RECORDOID,
|
|
|
|
-1,
|
|
|
|
InvalidOid,
|
|
|
|
varlevelsup);
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
break;
|
|
|
|
}
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
|
|
|
|
fexpr = ((RangeTblFunction *) linitial(rte->functions))->funcexpr;
|
|
|
|
toid = exprType(fexpr);
|
|
|
|
if (type_is_rowtype(toid))
|
|
|
|
{
|
|
|
|
/* func returns composite; same as relation case */
|
|
|
|
result = makeVar(varno,
|
|
|
|
InvalidAttrNumber,
|
|
|
|
toid,
|
|
|
|
-1,
|
|
|
|
InvalidOid,
|
|
|
|
varlevelsup);
|
|
|
|
}
|
|
|
|
else if (allowScalar)
|
|
|
|
{
|
|
|
|
/* func returns scalar; just return its output as-is */
|
|
|
|
result = makeVar(varno,
|
|
|
|
1,
|
|
|
|
toid,
|
|
|
|
-1,
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
exprCollation(fexpr),
|
|
|
|
varlevelsup);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* func returns scalar, but we want a composite result */
|
|
|
|
result = makeVar(varno,
|
|
|
|
InvalidAttrNumber,
|
|
|
|
RECORDOID,
|
|
|
|
-1,
|
|
|
|
InvalidOid,
|
|
|
|
varlevelsup);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
|
|
|
|
/*
|
|
|
|
* RTE is a join, subselect, tablefunc, or VALUES. We represent
|
|
|
|
* this as a whole-row Var of RECORD type. (Note that in most
|
|
|
|
* cases the Var will be expanded to a RowExpr during planning,
|
|
|
|
* but that is not our concern here.)
|
|
|
|
*/
|
|
|
|
result = makeVar(varno,
|
|
|
|
InvalidAttrNumber,
|
|
|
|
RECORDOID,
|
|
|
|
-1,
|
|
|
|
InvalidOid,
|
|
|
|
varlevelsup);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeTargetEntry -
|
|
|
|
* creates a TargetEntry node
|
|
|
|
*/
|
|
|
|
TargetEntry *
|
|
|
|
makeTargetEntry(Expr *expr,
|
|
|
|
AttrNumber resno,
|
|
|
|
char *resname,
|
|
|
|
bool resjunk)
|
|
|
|
{
|
|
|
|
TargetEntry *tle = makeNode(TargetEntry);
|
|
|
|
|
|
|
|
tle->expr = expr;
|
|
|
|
tle->resno = resno;
|
|
|
|
tle->resname = resname;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We always set these fields to 0. If the caller wants to change them he
|
|
|
|
* must do so explicitly. Few callers do that, so omitting these
|
|
|
|
* arguments reduces the chance of error.
|
|
|
|
*/
|
|
|
|
tle->ressortgroupref = 0;
|
|
|
|
tle->resorigtbl = InvalidOid;
|
|
|
|
tle->resorigcol = 0;
|
|
|
|
|
|
|
|
tle->resjunk = resjunk;
|
|
|
|
|
|
|
|
return tle;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* flatCopyTargetEntry -
|
|
|
|
* duplicate a TargetEntry, but don't copy substructure
|
|
|
|
*
|
|
|
|
* This is commonly used when we just want to modify the resno or substitute
|
|
|
|
* a new expression.
|
|
|
|
*/
|
|
|
|
TargetEntry *
|
|
|
|
flatCopyTargetEntry(TargetEntry *src_tle)
|
|
|
|
{
|
|
|
|
TargetEntry *tle = makeNode(TargetEntry);
|
|
|
|
|
|
|
|
Assert(IsA(src_tle, TargetEntry));
|
|
|
|
memcpy(tle, src_tle, sizeof(TargetEntry));
|
|
|
|
return tle;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeFromExpr -
|
|
|
|
* creates a FromExpr node
|
|
|
|
*/
|
|
|
|
FromExpr *
|
|
|
|
makeFromExpr(List *fromlist, Node *quals)
|
|
|
|
{
|
|
|
|
FromExpr *f = makeNode(FromExpr);
|
|
|
|
|
|
|
|
f->fromlist = fromlist;
|
|
|
|
f->quals = quals;
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeConst -
|
|
|
|
* creates a Const node
|
|
|
|
*/
|
|
|
|
Const *
|
|
|
|
makeConst(Oid consttype,
|
|
|
|
int32 consttypmod,
|
|
|
|
Oid constcollid,
|
|
|
|
int constlen,
|
|
|
|
Datum constvalue,
|
|
|
|
bool constisnull,
|
|
|
|
bool constbyval)
|
|
|
|
{
|
|
|
|
Const *cnst = makeNode(Const);
|
|
|
|
|
Add defenses against putting expanded objects into Const nodes.
Putting a reference to an expanded-format value into a Const node would be
a bad idea for a couple of reasons. It'd be possible for the supposedly
immutable Const to change value, if something modified the referenced
variable ... in fact, if the Const's reference were R/W, any function that
has the Const as argument might itself change it at runtime. Also, because
datumIsEqual() is pretty simplistic, the Const might fail to compare equal
to other Consts that it should compare equal to, notably including copies
of itself. This could lead to unexpected planner behavior, such as "could
not find pathkey item to sort" errors or inferior plans.
I have not been able to find any way to get an expanded value into a Const
within the existing core code; but Paul Ramsey was able to trigger the
problem by writing a datatype input function that returns an expanded
value.
The best fix seems to be to establish a rule that varlena values being
placed into Const nodes should be passed through pg_detoast_datum().
That will do nothing (and cost little) in normal cases, but it will flatten
expanded values and thereby avoid the above problems. Also, it will
convert short-header or compressed values into canonical format, which will
avoid possible unexpected lack-of-equality issues for those cases too.
And it provides a last-ditch defense against putting a toasted value into
a Const, which we already knew was dangerous, cf commit 2b0c86b66563cf2f.
(In the light of this discussion, I'm no longer sure that that commit
provided 100% protection against such cases, but this fix should do it.)
The test added in commit 65c3d05e18e7c530 to catch datatype input functions
with unstable results would fail for functions that returned expanded
values; but it seems a bit uncharitable to deem a result unstable just
because it's expressed in expanded form, so revise the coding so that we
check for bitwise equality only after applying pg_detoast_datum(). That's
a sufficient condition anyway given the new rule about detoasting when
forming a Const.
Back-patch to 9.5 where the expanded-object facility was added. It's
possible that this should go back further; but in the absence of clear
evidence that there's any live bug in older branches, I'll refrain for now.
10 years ago
|
|
|
/*
|
|
|
|
* If it's a varlena value, force it to be in non-expanded (non-toasted)
|
|
|
|
* format; this avoids any possible dependency on external values and
|
|
|
|
* improves consistency of representation, which is important for equal().
|
|
|
|
*/
|
|
|
|
if (!constisnull && constlen == -1)
|
|
|
|
constvalue = PointerGetDatum(PG_DETOAST_DATUM(constvalue));
|
|
|
|
|
|
|
|
cnst->consttype = consttype;
|
|
|
|
cnst->consttypmod = consttypmod;
|
|
|
|
cnst->constcollid = constcollid;
|
|
|
|
cnst->constlen = constlen;
|
|
|
|
cnst->constvalue = constvalue;
|
|
|
|
cnst->constisnull = constisnull;
|
|
|
|
cnst->constbyval = constbyval;
|
|
|
|
cnst->location = -1; /* "unknown" */
|
|
|
|
|
|
|
|
return cnst;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeNullConst -
|
|
|
|
* creates a Const node representing a NULL of the specified type/typmod
|
|
|
|
*
|
|
|
|
* This is a convenience routine that just saves a lookup of the type's
|
|
|
|
* storage properties.
|
|
|
|
*/
|
|
|
|
Const *
|
|
|
|
makeNullConst(Oid consttype, int32 consttypmod, Oid constcollid)
|
|
|
|
{
|
|
|
|
int16 typLen;
|
|
|
|
bool typByVal;
|
|
|
|
|
|
|
|
get_typlenbyval(consttype, &typLen, &typByVal);
|
|
|
|
return makeConst(consttype,
|
|
|
|
consttypmod,
|
|
|
|
constcollid,
|
|
|
|
(int) typLen,
|
|
|
|
(Datum) 0,
|
|
|
|
true,
|
|
|
|
typByVal);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeBoolConst -
|
|
|
|
* creates a Const node representing a boolean value (can be NULL too)
|
|
|
|
*/
|
|
|
|
Node *
|
|
|
|
makeBoolConst(bool value, bool isnull)
|
|
|
|
{
|
|
|
|
/* note that pg_type.h hardwires size of bool as 1 ... duplicate it */
|
|
|
|
return (Node *) makeConst(BOOLOID, -1, InvalidOid, 1,
|
|
|
|
BoolGetDatum(value), isnull, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeBoolExpr -
|
|
|
|
* creates a BoolExpr node
|
|
|
|
*/
|
|
|
|
Expr *
|
|
|
|
makeBoolExpr(BoolExprType boolop, List *args, int location)
|
|
|
|
{
|
|
|
|
BoolExpr *b = makeNode(BoolExpr);
|
|
|
|
|
|
|
|
b->boolop = boolop;
|
|
|
|
b->args = args;
|
|
|
|
b->location = location;
|
|
|
|
|
|
|
|
return (Expr *) b;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeAlias -
|
|
|
|
* creates an Alias node
|
|
|
|
*
|
|
|
|
* NOTE: the given name is copied, but the colnames list (if any) isn't.
|
|
|
|
*/
|
|
|
|
Alias *
|
|
|
|
makeAlias(const char *aliasname, List *colnames)
|
|
|
|
{
|
|
|
|
Alias *a = makeNode(Alias);
|
|
|
|
|
|
|
|
a->aliasname = pstrdup(aliasname);
|
|
|
|
a->colnames = colnames;
|
|
|
|
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeRelabelType -
|
|
|
|
* creates a RelabelType node
|
|
|
|
*/
|
|
|
|
RelabelType *
|
|
|
|
makeRelabelType(Expr *arg, Oid rtype, int32 rtypmod, Oid rcollid,
|
|
|
|
CoercionForm rformat)
|
|
|
|
{
|
|
|
|
RelabelType *r = makeNode(RelabelType);
|
|
|
|
|
|
|
|
r->arg = arg;
|
|
|
|
r->resulttype = rtype;
|
|
|
|
r->resulttypmod = rtypmod;
|
|
|
|
r->resultcollid = rcollid;
|
Extend pg_cast castimplicit column to a three-way value; this allows us
to be flexible about assignment casts without introducing ambiguity in
operator/function resolution. Introduce a well-defined promotion hierarchy
for numeric datatypes (int2->int4->int8->numeric->float4->float8).
Change make_const to initially label numeric literals as int4, int8, or
numeric (never float8 anymore).
Explicitly mark Func and RelabelType nodes to indicate whether they came
from a function call, explicit cast, or implicit cast; use this to do
reverse-listing more accurately and without so many heuristics.
Explicit casts to char, varchar, bit, varbit will truncate or pad without
raising an error (the pre-7.2 behavior), while assigning to a column without
any explicit cast will still raise an error for wrong-length data like 7.3.
This more nearly follows the SQL spec than 7.2 behavior (we should be
reporting a 'completion condition' in the explicit-cast cases, but we have
no mechanism for that, so just do silent truncation).
Fix some problems with enforcement of typmod for array elements;
it didn't work at all in 'UPDATE ... SET array[n] = foo', for example.
Provide a generalized array_length_coerce() function to replace the
specialized per-array-type functions that used to be needed (and were
missing for NUMERIC as well as all the datetime types).
Add missing conversions int8<->float4, text<->numeric, oid<->int8.
initdb forced.
23 years ago
|
|
|
r->relabelformat = rformat;
|
|
|
|
r->location = -1;
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeRangeVar -
|
|
|
|
* creates a RangeVar node (rather oversimplified case)
|
|
|
|
*/
|
|
|
|
RangeVar *
|
|
|
|
makeRangeVar(char *schemaname, char *relname, int location)
|
|
|
|
{
|
|
|
|
RangeVar *r = makeNode(RangeVar);
|
|
|
|
|
|
|
|
r->catalogname = NULL;
|
|
|
|
r->schemaname = schemaname;
|
|
|
|
r->relname = relname;
|
|
|
|
r->inh = true;
|
|
|
|
r->relpersistence = RELPERSISTENCE_PERMANENT;
|
|
|
|
r->alias = NULL;
|
|
|
|
r->location = location;
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeTypeName -
|
|
|
|
* build a TypeName node for an unqualified name.
|
|
|
|
*
|
|
|
|
* typmod is defaulted, but can be changed later by caller.
|
|
|
|
*/
|
|
|
|
TypeName *
|
|
|
|
makeTypeName(char *typnam)
|
|
|
|
{
|
|
|
|
return makeTypeNameFromNameList(list_make1(makeString(typnam)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeTypeNameFromNameList -
|
|
|
|
* build a TypeName node for a String list representing a qualified name.
|
|
|
|
*
|
|
|
|
* typmod is defaulted, but can be changed later by caller.
|
|
|
|
*/
|
|
|
|
TypeName *
|
|
|
|
makeTypeNameFromNameList(List *names)
|
|
|
|
{
|
|
|
|
TypeName *n = makeNode(TypeName);
|
|
|
|
|
|
|
|
n->names = names;
|
|
|
|
n->typmods = NIL;
|
|
|
|
n->typemod = -1;
|
|
|
|
n->location = -1;
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeTypeNameFromOid -
|
Remove collation information from TypeName, where it does not belong.
The initial collations patch treated a COLLATE spec as part of a TypeName,
following what can only be described as brain fade on the part of the SQL
committee. It's a lot more reasonable to treat COLLATE as a syntactically
separate object, so that it can be added in only the productions where it
actually belongs, rather than needing to reject it in a boatload of places
where it doesn't belong (something the original patch mostly failed to do).
In addition this change lets us meet the spec's requirement to allow
COLLATE anywhere in the clauses of a ColumnDef, and it avoids unfriendly
behavior for constructs such as "foo::type COLLATE collation".
To do this, pull collation information out of TypeName and put it in
ColumnDef instead, thus reverting most of the collation-related changes in
parse_type.c's API. I made one additional structural change, which was to
use a ColumnDef as an intermediate node in AT_AlterColumnType AlterTableCmd
nodes. This provides enough room to get rid of the "transform" wart in
AlterTableCmd too, since the ColumnDef can carry the USING expression
easily enough.
Also fix some other minor bugs that have crept in in the same areas,
like failure to copy recently-added fields of ColumnDef in copyfuncs.c.
While at it, document the formerly secret ability to specify a collation
in ALTER TABLE ALTER COLUMN TYPE, ALTER TYPE ADD ATTRIBUTE, and
ALTER TYPE ALTER ATTRIBUTE TYPE; and correct some misstatements about
what the default collation selection will be when COLLATE is omitted.
BTW, the three-parameter form of format_type() should go away too,
since it just contributes to the confusion in this area; but I'll do
that in a separate patch.
15 years ago
|
|
|
* build a TypeName node to represent a type already known by OID/typmod.
|
|
|
|
*/
|
|
|
|
TypeName *
|
Remove collation information from TypeName, where it does not belong.
The initial collations patch treated a COLLATE spec as part of a TypeName,
following what can only be described as brain fade on the part of the SQL
committee. It's a lot more reasonable to treat COLLATE as a syntactically
separate object, so that it can be added in only the productions where it
actually belongs, rather than needing to reject it in a boatload of places
where it doesn't belong (something the original patch mostly failed to do).
In addition this change lets us meet the spec's requirement to allow
COLLATE anywhere in the clauses of a ColumnDef, and it avoids unfriendly
behavior for constructs such as "foo::type COLLATE collation".
To do this, pull collation information out of TypeName and put it in
ColumnDef instead, thus reverting most of the collation-related changes in
parse_type.c's API. I made one additional structural change, which was to
use a ColumnDef as an intermediate node in AT_AlterColumnType AlterTableCmd
nodes. This provides enough room to get rid of the "transform" wart in
AlterTableCmd too, since the ColumnDef can carry the USING expression
easily enough.
Also fix some other minor bugs that have crept in in the same areas,
like failure to copy recently-added fields of ColumnDef in copyfuncs.c.
While at it, document the formerly secret ability to specify a collation
in ALTER TABLE ALTER COLUMN TYPE, ALTER TYPE ADD ATTRIBUTE, and
ALTER TYPE ALTER ATTRIBUTE TYPE; and correct some misstatements about
what the default collation selection will be when COLLATE is omitted.
BTW, the three-parameter form of format_type() should go away too,
since it just contributes to the confusion in this area; but I'll do
that in a separate patch.
15 years ago
|
|
|
makeTypeNameFromOid(Oid typeOid, int32 typmod)
|
|
|
|
{
|
|
|
|
TypeName *n = makeNode(TypeName);
|
|
|
|
|
|
|
|
n->typeOid = typeOid;
|
|
|
|
n->typemod = typmod;
|
|
|
|
n->location = -1;
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeColumnDef -
|
|
|
|
* build a ColumnDef node to represent a simple column definition.
|
|
|
|
*
|
|
|
|
* Type and collation are specified by OID.
|
|
|
|
* Other properties are all basic to start with.
|
|
|
|
*/
|
|
|
|
ColumnDef *
|
|
|
|
makeColumnDef(const char *colname, Oid typeOid, int32 typmod, Oid collOid)
|
|
|
|
{
|
|
|
|
ColumnDef *n = makeNode(ColumnDef);
|
|
|
|
|
|
|
|
n->colname = pstrdup(colname);
|
|
|
|
n->typeName = makeTypeNameFromOid(typeOid, typmod);
|
|
|
|
n->inhcount = 0;
|
|
|
|
n->is_local = true;
|
|
|
|
n->is_not_null = false;
|
|
|
|
n->is_from_type = false;
|
|
|
|
n->storage = 0;
|
|
|
|
n->raw_default = NULL;
|
|
|
|
n->cooked_default = NULL;
|
|
|
|
n->collClause = NULL;
|
|
|
|
n->collOid = collOid;
|
|
|
|
n->constraints = NIL;
|
|
|
|
n->fdwoptions = NIL;
|
|
|
|
n->location = -1;
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeFuncExpr -
|
|
|
|
* build an expression tree representing a function call.
|
|
|
|
*
|
|
|
|
* The argument expressions must have been transformed already.
|
|
|
|
*/
|
|
|
|
FuncExpr *
|
|
|
|
makeFuncExpr(Oid funcid, Oid rettype, List *args,
|
|
|
|
Oid funccollid, Oid inputcollid, CoercionForm fformat)
|
|
|
|
{
|
|
|
|
FuncExpr *funcexpr;
|
|
|
|
|
|
|
|
funcexpr = makeNode(FuncExpr);
|
|
|
|
funcexpr->funcid = funcid;
|
|
|
|
funcexpr->funcresulttype = rettype;
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
8 years ago
|
|
|
funcexpr->funcretset = false; /* only allowed case here */
|
|
|
|
funcexpr->funcvariadic = false; /* only allowed case here */
|
|
|
|
funcexpr->funcformat = fformat;
|
|
|
|
funcexpr->funccollid = funccollid;
|
|
|
|
funcexpr->inputcollid = inputcollid;
|
|
|
|
funcexpr->args = args;
|
|
|
|
funcexpr->location = -1;
|
|
|
|
|
|
|
|
return funcexpr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeDefElem -
|
|
|
|
* build a DefElem node
|
|
|
|
*
|
|
|
|
* This is sufficient for the "typical" case with an unqualified option name
|
|
|
|
* and no special action.
|
|
|
|
*/
|
|
|
|
DefElem *
|
|
|
|
makeDefElem(char *name, Node *arg, int location)
|
|
|
|
{
|
|
|
|
DefElem *res = makeNode(DefElem);
|
|
|
|
|
|
|
|
res->defnamespace = NULL;
|
|
|
|
res->defname = name;
|
|
|
|
res->arg = arg;
|
|
|
|
res->defaction = DEFELEM_UNSPEC;
|
|
|
|
res->location = location;
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeDefElemExtended -
|
|
|
|
* build a DefElem node with all fields available to be specified
|
|
|
|
*/
|
|
|
|
DefElem *
|
|
|
|
makeDefElemExtended(char *nameSpace, char *name, Node *arg,
|
|
|
|
DefElemAction defaction, int location)
|
|
|
|
{
|
|
|
|
DefElem *res = makeNode(DefElem);
|
|
|
|
|
|
|
|
res->defnamespace = nameSpace;
|
|
|
|
res->defname = name;
|
|
|
|
res->arg = arg;
|
|
|
|
res->defaction = defaction;
|
|
|
|
res->location = location;
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeFuncCall -
|
|
|
|
*
|
|
|
|
* Initialize a FuncCall struct with the information every caller must
|
|
|
|
* supply. Any non-default parameters have to be inserted by the caller.
|
|
|
|
*/
|
|
|
|
FuncCall *
|
|
|
|
makeFuncCall(List *name, List *args, int location)
|
|
|
|
{
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
FuncCall *n = makeNode(FuncCall);
|
|
|
|
|
|
|
|
n->funcname = name;
|
|
|
|
n->args = args;
|
|
|
|
n->agg_order = NIL;
|
|
|
|
n->agg_filter = NULL;
|
Support ordered-set (WITHIN GROUP) aggregates.
This patch introduces generic support for ordered-set and hypothetical-set
aggregate functions, as well as implementations of the instances defined in
SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(),
percent_rank(), cume_dist()). We also added mode() though it is not in the
spec, as well as versions of percentile_cont() and percentile_disc() that
can compute multiple percentile values in one pass over the data.
Unlike the original submission, this patch puts full control of the sorting
process in the hands of the aggregate's support functions. To allow the
support functions to find out how they're supposed to sort, a new API
function AggGetAggref() is added to nodeAgg.c. This allows retrieval of
the aggregate call's Aggref node, which may have other uses beyond the
immediate need. There is also support for ordered-set aggregates to
install cleanup callback functions, so that they can be sure that
infrastructure such as tuplesort objects gets cleaned up.
In passing, make some fixes in the recently-added support for variadic
aggregates, and make some editorial adjustments in the recent FILTER
additions for aggregates. Also, simplify use of IsBinaryCoercible() by
allowing it to succeed whenever the target type is ANY or ANYELEMENT.
It was inconsistent that it dealt with other polymorphic target types
but not these.
Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing,
and rather heavily editorialized upon by Tom Lane
12 years ago
|
|
|
n->agg_within_group = false;
|
|
|
|
n->agg_star = false;
|
|
|
|
n->agg_distinct = false;
|
|
|
|
n->func_variadic = false;
|
|
|
|
n->over = NULL;
|
Support multi-argument UNNEST(), and TABLE() syntax for multiple functions.
This patch adds the ability to write TABLE( function1(), function2(), ...)
as a single FROM-clause entry. The result is the concatenation of the
first row from each function, followed by the second row from each
function, etc; with NULLs inserted if any function produces fewer rows than
others. This is believed to be a much more useful behavior than what
Postgres currently does with multiple SRFs in a SELECT list.
This syntax also provides a reasonable way to combine use of column
definition lists with WITH ORDINALITY: put the column definition list
inside TABLE(), where it's clear that it doesn't control the ordinality
column as well.
Also implement SQL-compliant multiple-argument UNNEST(), by turning
UNNEST(a,b,c) into TABLE(unnest(a), unnest(b), unnest(c)).
The SQL standard specifies TABLE() with only a single function, not
multiple functions, and it seems to require an implicit UNNEST() which is
not what this patch does. There may be something wrong with that reading
of the spec, though, because if it's right then the spec's TABLE() is just
a pointless alternative spelling of UNNEST(). After further review of
that, we might choose to adopt a different syntax for what this patch does,
but in any case this functionality seems clearly worthwhile.
Andrew Gierth, reviewed by Zoltán Böszörményi and Heikki Linnakangas, and
significantly revised by me
12 years ago
|
|
|
n->location = location;
|
|
|
|
return n;
|
|
|
|
}
|
Support GROUPING SETS, CUBE and ROLLUP.
This SQL standard functionality allows to aggregate data by different
GROUP BY clauses at once. Each grouping set returns rows with columns
grouped by in other sets set to NULL.
This could previously be achieved by doing each grouping as a separate
query, conjoined by UNION ALLs. Besides being considerably more concise,
grouping sets will in many cases be faster, requiring only one scan over
the underlying data.
The current implementation of grouping sets only supports using sorting
for input. Individual sets that share a sort order are computed in one
pass. If there are sets that don't share a sort order, additional sort &
aggregation steps are performed. These additional passes are sourced by
the previous sort step; thus avoiding repeated scans of the source data.
The code is structured in a way that adding support for purely using
hash aggregation or a mix of hashing and sorting is possible. Sorting
was chosen to be supported first, as it is the most generic method of
implementation.
Instead of, as in an earlier versions of the patch, representing the
chain of sort and aggregation steps as full blown planner and executor
nodes, all but the first sort are performed inside the aggregation node
itself. This avoids the need to do some unusual gymnastics to handle
having to return aggregated and non-aggregated tuples from underlying
nodes, as well as having to shut down underlying nodes early to limit
memory usage. The optimizer still builds Sort/Agg node to describe each
phase, but they're not part of the plan tree, but instead additional
data for the aggregation node. They're a convenient and preexisting way
to describe aggregation and sorting. The first (and possibly only) sort
step is still performed as a separate execution step. That retains
similarity with existing group by plans, makes rescans fairly simple,
avoids very deep plans (leading to slow explains) and easily allows to
avoid the sorting step if the underlying data is sorted by other means.
A somewhat ugly side of this patch is having to deal with a grammar
ambiguity between the new CUBE keyword and the cube extension/functions
named cube (and rollup). To avoid breaking existing deployments of the
cube extension it has not been renamed, neither has cube been made a
reserved keyword. Instead precedence hacking is used to make GROUP BY
cube(..) refer to the CUBE grouping sets feature, and not the function
cube(). To actually group by a function cube(), unlikely as that might
be, the function name has to be quoted.
Needs a catversion bump because stored rules may change.
Author: Andrew Gierth and Atri Sharma, with contributions from Andres Freund
Reviewed-By: Andres Freund, Noah Misch, Tom Lane, Svenne Krap, Tomas
Vondra, Erik Rijkers, Marti Raudsepp, Pavel Stehule
Discussion: CAOeZVidmVRe2jU6aMk_5qkxnB7dfmPROzM7Ur8JPW5j8Y5X-Lw@mail.gmail.com
10 years ago
|
|
|
|
|
|
|
/*
|
|
|
|
* make_opclause
|
|
|
|
* Creates an operator clause given its operator info, left operand
|
|
|
|
* and right operand (pass NULL to create single-operand clause),
|
|
|
|
* and collation info.
|
|
|
|
*/
|
|
|
|
Expr *
|
|
|
|
make_opclause(Oid opno, Oid opresulttype, bool opretset,
|
|
|
|
Expr *leftop, Expr *rightop,
|
|
|
|
Oid opcollid, Oid inputcollid)
|
|
|
|
{
|
|
|
|
OpExpr *expr = makeNode(OpExpr);
|
|
|
|
|
|
|
|
expr->opno = opno;
|
|
|
|
expr->opfuncid = InvalidOid;
|
|
|
|
expr->opresulttype = opresulttype;
|
|
|
|
expr->opretset = opretset;
|
|
|
|
expr->opcollid = opcollid;
|
|
|
|
expr->inputcollid = inputcollid;
|
|
|
|
if (rightop)
|
|
|
|
expr->args = list_make2(leftop, rightop);
|
|
|
|
else
|
|
|
|
expr->args = list_make1(leftop);
|
|
|
|
expr->location = -1;
|
|
|
|
return (Expr *) expr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* make_andclause
|
|
|
|
*
|
|
|
|
* Creates an 'and' clause given a list of its subclauses.
|
|
|
|
*/
|
|
|
|
Expr *
|
|
|
|
make_andclause(List *andclauses)
|
|
|
|
{
|
|
|
|
BoolExpr *expr = makeNode(BoolExpr);
|
|
|
|
|
|
|
|
expr->boolop = AND_EXPR;
|
|
|
|
expr->args = andclauses;
|
|
|
|
expr->location = -1;
|
|
|
|
return (Expr *) expr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* make_orclause
|
|
|
|
*
|
|
|
|
* Creates an 'or' clause given a list of its subclauses.
|
|
|
|
*/
|
|
|
|
Expr *
|
|
|
|
make_orclause(List *orclauses)
|
|
|
|
{
|
|
|
|
BoolExpr *expr = makeNode(BoolExpr);
|
|
|
|
|
|
|
|
expr->boolop = OR_EXPR;
|
|
|
|
expr->args = orclauses;
|
|
|
|
expr->location = -1;
|
|
|
|
return (Expr *) expr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* make_notclause
|
|
|
|
*
|
|
|
|
* Create a 'not' clause given the expression to be negated.
|
|
|
|
*/
|
|
|
|
Expr *
|
|
|
|
make_notclause(Expr *notclause)
|
|
|
|
{
|
|
|
|
BoolExpr *expr = makeNode(BoolExpr);
|
|
|
|
|
|
|
|
expr->boolop = NOT_EXPR;
|
|
|
|
expr->args = list_make1(notclause);
|
|
|
|
expr->location = -1;
|
|
|
|
return (Expr *) expr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* make_and_qual
|
|
|
|
*
|
|
|
|
* Variant of make_andclause for ANDing two qual conditions together.
|
|
|
|
* Qual conditions have the property that a NULL nodetree is interpreted
|
|
|
|
* as 'true'.
|
|
|
|
*
|
|
|
|
* NB: this makes no attempt to preserve AND/OR flatness; so it should not
|
|
|
|
* be used on a qual that has already been run through prepqual.c.
|
|
|
|
*/
|
|
|
|
Node *
|
|
|
|
make_and_qual(Node *qual1, Node *qual2)
|
|
|
|
{
|
|
|
|
if (qual1 == NULL)
|
|
|
|
return qual2;
|
|
|
|
if (qual2 == NULL)
|
|
|
|
return qual1;
|
|
|
|
return (Node *) make_andclause(list_make2(qual1, qual2));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The planner and executor usually represent qualification expressions
|
|
|
|
* as lists of boolean expressions with implicit AND semantics.
|
|
|
|
*
|
|
|
|
* These functions convert between an AND-semantics expression list and the
|
|
|
|
* ordinary representation of a boolean expression.
|
|
|
|
*
|
|
|
|
* Note that an empty list is considered equivalent to TRUE.
|
|
|
|
*/
|
|
|
|
Expr *
|
|
|
|
make_ands_explicit(List *andclauses)
|
|
|
|
{
|
|
|
|
if (andclauses == NIL)
|
|
|
|
return (Expr *) makeBoolConst(true, false);
|
|
|
|
else if (list_length(andclauses) == 1)
|
|
|
|
return (Expr *) linitial(andclauses);
|
|
|
|
else
|
|
|
|
return make_andclause(andclauses);
|
|
|
|
}
|
|
|
|
|
|
|
|
List *
|
|
|
|
make_ands_implicit(Expr *clause)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* NB: because the parser sets the qual field to NULL in a query that has
|
|
|
|
* no WHERE clause, we must consider a NULL input clause as TRUE, even
|
|
|
|
* though one might more reasonably think it FALSE.
|
|
|
|
*/
|
|
|
|
if (clause == NULL)
|
|
|
|
return NIL; /* NULL -> NIL list == TRUE */
|
|
|
|
else if (is_andclause(clause))
|
|
|
|
return ((BoolExpr *) clause)->args;
|
|
|
|
else if (IsA(clause, Const) &&
|
|
|
|
!((Const *) clause)->constisnull &&
|
|
|
|
DatumGetBool(((Const *) clause)->constvalue))
|
|
|
|
return NIL; /* constant TRUE input -> NIL list */
|
|
|
|
else
|
|
|
|
return list_make1(clause);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeIndexInfo
|
|
|
|
* create an IndexInfo node
|
|
|
|
*/
|
|
|
|
IndexInfo *
|
|
|
|
makeIndexInfo(int numattrs, int numkeyattrs, Oid amoid, List *expressions,
|
|
|
|
List *predicates, bool unique, bool isready, bool concurrent)
|
|
|
|
{
|
|
|
|
IndexInfo *n = makeNode(IndexInfo);
|
|
|
|
|
|
|
|
n->ii_NumIndexAttrs = numattrs;
|
|
|
|
n->ii_NumIndexKeyAttrs = numkeyattrs;
|
|
|
|
Assert(n->ii_NumIndexKeyAttrs != 0);
|
|
|
|
Assert(n->ii_NumIndexKeyAttrs <= n->ii_NumIndexAttrs);
|
|
|
|
n->ii_Unique = unique;
|
|
|
|
n->ii_ReadyForInserts = isready;
|
|
|
|
n->ii_Concurrent = concurrent;
|
|
|
|
|
|
|
|
/* expressions */
|
|
|
|
n->ii_Expressions = expressions;
|
|
|
|
n->ii_ExpressionsState = NIL;
|
|
|
|
|
|
|
|
/* predicates */
|
|
|
|
n->ii_Predicate = predicates;
|
|
|
|
n->ii_PredicateState = NULL;
|
|
|
|
|
|
|
|
/* exclusion constraints */
|
|
|
|
n->ii_ExclusionOps = NULL;
|
|
|
|
n->ii_ExclusionProcs = NULL;
|
|
|
|
n->ii_ExclusionStrats = NULL;
|
|
|
|
|
|
|
|
/* speculative inserts */
|
|
|
|
n->ii_UniqueOps = NULL;
|
|
|
|
n->ii_UniqueProcs = NULL;
|
|
|
|
n->ii_UniqueStrats = NULL;
|
|
|
|
|
|
|
|
/* initialize index-build state to default */
|
|
|
|
n->ii_BrokenHotChain = false;
|
|
|
|
n->ii_ParallelWorkers = 0;
|
|
|
|
|
|
|
|
/* set up for possible use by index AM */
|
|
|
|
n->ii_Am = amoid;
|
|
|
|
n->ii_AmCache = NULL;
|
|
|
|
n->ii_Context = CurrentMemoryContext;
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
Support GROUPING SETS, CUBE and ROLLUP.
This SQL standard functionality allows to aggregate data by different
GROUP BY clauses at once. Each grouping set returns rows with columns
grouped by in other sets set to NULL.
This could previously be achieved by doing each grouping as a separate
query, conjoined by UNION ALLs. Besides being considerably more concise,
grouping sets will in many cases be faster, requiring only one scan over
the underlying data.
The current implementation of grouping sets only supports using sorting
for input. Individual sets that share a sort order are computed in one
pass. If there are sets that don't share a sort order, additional sort &
aggregation steps are performed. These additional passes are sourced by
the previous sort step; thus avoiding repeated scans of the source data.
The code is structured in a way that adding support for purely using
hash aggregation or a mix of hashing and sorting is possible. Sorting
was chosen to be supported first, as it is the most generic method of
implementation.
Instead of, as in an earlier versions of the patch, representing the
chain of sort and aggregation steps as full blown planner and executor
nodes, all but the first sort are performed inside the aggregation node
itself. This avoids the need to do some unusual gymnastics to handle
having to return aggregated and non-aggregated tuples from underlying
nodes, as well as having to shut down underlying nodes early to limit
memory usage. The optimizer still builds Sort/Agg node to describe each
phase, but they're not part of the plan tree, but instead additional
data for the aggregation node. They're a convenient and preexisting way
to describe aggregation and sorting. The first (and possibly only) sort
step is still performed as a separate execution step. That retains
similarity with existing group by plans, makes rescans fairly simple,
avoids very deep plans (leading to slow explains) and easily allows to
avoid the sorting step if the underlying data is sorted by other means.
A somewhat ugly side of this patch is having to deal with a grammar
ambiguity between the new CUBE keyword and the cube extension/functions
named cube (and rollup). To avoid breaking existing deployments of the
cube extension it has not been renamed, neither has cube been made a
reserved keyword. Instead precedence hacking is used to make GROUP BY
cube(..) refer to the CUBE grouping sets feature, and not the function
cube(). To actually group by a function cube(), unlikely as that might
be, the function name has to be quoted.
Needs a catversion bump because stored rules may change.
Author: Andrew Gierth and Atri Sharma, with contributions from Andres Freund
Reviewed-By: Andres Freund, Noah Misch, Tom Lane, Svenne Krap, Tomas
Vondra, Erik Rijkers, Marti Raudsepp, Pavel Stehule
Discussion: CAOeZVidmVRe2jU6aMk_5qkxnB7dfmPROzM7Ur8JPW5j8Y5X-Lw@mail.gmail.com
10 years ago
|
|
|
/*
|
|
|
|
* makeGroupingSet
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
GroupingSet *
|
|
|
|
makeGroupingSet(GroupingSetKind kind, List *content, int location)
|
|
|
|
{
|
|
|
|
GroupingSet *n = makeNode(GroupingSet);
|
Support GROUPING SETS, CUBE and ROLLUP.
This SQL standard functionality allows to aggregate data by different
GROUP BY clauses at once. Each grouping set returns rows with columns
grouped by in other sets set to NULL.
This could previously be achieved by doing each grouping as a separate
query, conjoined by UNION ALLs. Besides being considerably more concise,
grouping sets will in many cases be faster, requiring only one scan over
the underlying data.
The current implementation of grouping sets only supports using sorting
for input. Individual sets that share a sort order are computed in one
pass. If there are sets that don't share a sort order, additional sort &
aggregation steps are performed. These additional passes are sourced by
the previous sort step; thus avoiding repeated scans of the source data.
The code is structured in a way that adding support for purely using
hash aggregation or a mix of hashing and sorting is possible. Sorting
was chosen to be supported first, as it is the most generic method of
implementation.
Instead of, as in an earlier versions of the patch, representing the
chain of sort and aggregation steps as full blown planner and executor
nodes, all but the first sort are performed inside the aggregation node
itself. This avoids the need to do some unusual gymnastics to handle
having to return aggregated and non-aggregated tuples from underlying
nodes, as well as having to shut down underlying nodes early to limit
memory usage. The optimizer still builds Sort/Agg node to describe each
phase, but they're not part of the plan tree, but instead additional
data for the aggregation node. They're a convenient and preexisting way
to describe aggregation and sorting. The first (and possibly only) sort
step is still performed as a separate execution step. That retains
similarity with existing group by plans, makes rescans fairly simple,
avoids very deep plans (leading to slow explains) and easily allows to
avoid the sorting step if the underlying data is sorted by other means.
A somewhat ugly side of this patch is having to deal with a grammar
ambiguity between the new CUBE keyword and the cube extension/functions
named cube (and rollup). To avoid breaking existing deployments of the
cube extension it has not been renamed, neither has cube been made a
reserved keyword. Instead precedence hacking is used to make GROUP BY
cube(..) refer to the CUBE grouping sets feature, and not the function
cube(). To actually group by a function cube(), unlikely as that might
be, the function name has to be quoted.
Needs a catversion bump because stored rules may change.
Author: Andrew Gierth and Atri Sharma, with contributions from Andres Freund
Reviewed-By: Andres Freund, Noah Misch, Tom Lane, Svenne Krap, Tomas
Vondra, Erik Rijkers, Marti Raudsepp, Pavel Stehule
Discussion: CAOeZVidmVRe2jU6aMk_5qkxnB7dfmPROzM7Ur8JPW5j8Y5X-Lw@mail.gmail.com
10 years ago
|
|
|
|
|
|
|
n->kind = kind;
|
|
|
|
n->content = content;
|
|
|
|
n->location = location;
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* makeVacuumRelation -
|
|
|
|
* create a VacuumRelation node
|
|
|
|
*/
|
|
|
|
VacuumRelation *
|
|
|
|
makeVacuumRelation(RangeVar *relation, Oid oid, List *va_cols)
|
|
|
|
{
|
|
|
|
VacuumRelation *v = makeNode(VacuumRelation);
|
|
|
|
|
|
|
|
v->relation = relation;
|
|
|
|
v->oid = oid;
|
|
|
|
v->va_cols = va_cols;
|
|
|
|
return v;
|
|
|
|
}
|