|
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
|
*
|
|
|
|
|
* portal.h
|
|
|
|
|
* POSTGRES portal definitions.
|
|
|
|
|
*
|
|
|
|
|
* A portal is an abstraction which represents the execution state of
|
|
|
|
|
* a running or runnable query. Portals support both SQL-level CURSORs
|
|
|
|
|
* and protocol-level portals.
|
|
|
|
|
*
|
|
|
|
|
* Scrolling (nonsequential access) and suspension of execution are allowed
|
|
|
|
|
* only for portals that contain a single SELECT-type query. We do not want
|
|
|
|
|
* to let the client suspend an update-type query partway through! Because
|
|
|
|
|
* the query rewriter does not allow arbitrary ON SELECT rewrite rules,
|
|
|
|
|
* only queries that were originally update-type could produce multiple
|
|
|
|
|
* parse/plan trees; so the restriction to a single query is not a problem
|
|
|
|
|
* in practice.
|
|
|
|
|
*
|
|
|
|
|
* For SQL cursors, we support three kinds of scroll behavior:
|
|
|
|
|
*
|
|
|
|
|
* (1) Neither NO SCROLL nor SCROLL was specified: to remain backward
|
|
|
|
|
* compatible, we allow backward fetches here, unless it would
|
|
|
|
|
* impose additional runtime overhead to do so.
|
|
|
|
|
*
|
|
|
|
|
* (2) NO SCROLL was specified: don't allow any backward fetches.
|
|
|
|
|
*
|
|
|
|
|
* (3) SCROLL was specified: allow all kinds of backward fetches, even
|
|
|
|
|
* if we need to take a performance hit to do so. (The planner sticks
|
|
|
|
|
* a Materialize node atop the query plan if needed.)
|
|
|
|
|
*
|
|
|
|
|
* Case #1 is converted to #2 or #3 by looking at the query itself and
|
|
|
|
|
* determining if scrollability can be supported without additional
|
|
|
|
|
* overhead.
|
|
|
|
|
*
|
|
|
|
|
* Protocol-level portals have no nonsequential-fetch API and so the
|
|
|
|
|
* distinction doesn't matter for them. They are always initialized
|
|
|
|
|
* to look like NO SCROLL cursors.
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
|
|
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
|
*
|
|
|
|
|
* $PostgreSQL: pgsql/src/include/utils/portal.h,v 1.56 2005/06/17 22:32:50 tgl Exp $
|
|
|
|
|
*
|
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
#ifndef PORTAL_H
|
|
|
|
|
#define PORTAL_H
|
|
|
|
|
|
|
|
|
|
#include "executor/execdesc.h"
|
|
|
|
|
#include "nodes/memnodes.h"
|
|
|
|
|
#include "utils/resowner.h"
|
This patch implements holdable cursors, following the proposal
(materialization into a tuple store) discussed on pgsql-hackers earlier.
I've updated the documentation and the regression tests.
Notes on the implementation:
- I needed to change the tuple store API slightly -- it assumes that it
won't be used to hold data across transaction boundaries, so the temp
files that it uses for on-disk storage are automatically reclaimed at
end-of-transaction. I added a flag to tuplestore_begin_heap() to control
this behavior. Is changing the tuple store API in this fashion OK?
- in order to store executor results in a tuple store, I added a new
CommandDest. This works well for the most part, with one exception: the
current DestFunction API doesn't provide enough information to allow the
Executor to store results into an arbitrary tuple store (where the
particular tuple store to use is chosen by the call site of
ExecutorRun). To workaround this, I've temporarily hacked up a solution
that works, but is not ideal: since the receiveTuple DestFunction is
passed the portal name, we can use that to lookup the Portal data
structure for the cursor and then use that to get at the tuple store the
Portal is using. This unnecessarily ties the Portal code with the
tupleReceiver code, but it works...
The proper fix for this is probably to change the DestFunction API --
Tom suggested passing the full QueryDesc to the receiveTuple function.
In that case, callers of ExecutorRun could "subclass" QueryDesc to add
any additional fields that their particular CommandDest needed to get
access to. This approach would work, but I'd like to think about it for
a little bit longer before deciding which route to go. In the mean time,
the code works fine, so I don't think a fix is urgent.
- (semi-related) I added a NO SCROLL keyword to DECLARE CURSOR, and
adjusted the behavior of SCROLL in accordance with the discussion on
-hackers.
- (unrelated) Cleaned up some SGML markup in sql.sgml, copy.sgml
Neil Conway
23 years ago
|
|
|
#include "utils/tuplestore.h"
|
|
|
|
|
|
|
|
|
|
|
This patch implements holdable cursors, following the proposal
(materialization into a tuple store) discussed on pgsql-hackers earlier.
I've updated the documentation and the regression tests.
Notes on the implementation:
- I needed to change the tuple store API slightly -- it assumes that it
won't be used to hold data across transaction boundaries, so the temp
files that it uses for on-disk storage are automatically reclaimed at
end-of-transaction. I added a flag to tuplestore_begin_heap() to control
this behavior. Is changing the tuple store API in this fashion OK?
- in order to store executor results in a tuple store, I added a new
CommandDest. This works well for the most part, with one exception: the
current DestFunction API doesn't provide enough information to allow the
Executor to store results into an arbitrary tuple store (where the
particular tuple store to use is chosen by the call site of
ExecutorRun). To workaround this, I've temporarily hacked up a solution
that works, but is not ideal: since the receiveTuple DestFunction is
passed the portal name, we can use that to lookup the Portal data
structure for the cursor and then use that to get at the tuple store the
Portal is using. This unnecessarily ties the Portal code with the
tupleReceiver code, but it works...
The proper fix for this is probably to change the DestFunction API --
Tom suggested passing the full QueryDesc to the receiveTuple function.
In that case, callers of ExecutorRun could "subclass" QueryDesc to add
any additional fields that their particular CommandDest needed to get
access to. This approach would work, but I'd like to think about it for
a little bit longer before deciding which route to go. In the mean time,
the code works fine, so I don't think a fix is urgent.
- (semi-related) I added a NO SCROLL keyword to DECLARE CURSOR, and
adjusted the behavior of SCROLL in accordance with the discussion on
-hackers.
- (unrelated) Cleaned up some SGML markup in sql.sgml, copy.sgml
Neil Conway
23 years ago
|
|
|
/*
|
|
|
|
|
* We have several execution strategies for Portals, depending on what
|
|
|
|
|
* query or queries are to be executed. (Note: in all cases, a Portal
|
|
|
|
|
* executes just a single source-SQL query, and thus produces just a
|
|
|
|
|
* single result from the user's viewpoint. However, the rule rewriter
|
|
|
|
|
* may expand the single source query to zero or many actual queries.)
|
This patch implements holdable cursors, following the proposal
(materialization into a tuple store) discussed on pgsql-hackers earlier.
I've updated the documentation and the regression tests.
Notes on the implementation:
- I needed to change the tuple store API slightly -- it assumes that it
won't be used to hold data across transaction boundaries, so the temp
files that it uses for on-disk storage are automatically reclaimed at
end-of-transaction. I added a flag to tuplestore_begin_heap() to control
this behavior. Is changing the tuple store API in this fashion OK?
- in order to store executor results in a tuple store, I added a new
CommandDest. This works well for the most part, with one exception: the
current DestFunction API doesn't provide enough information to allow the
Executor to store results into an arbitrary tuple store (where the
particular tuple store to use is chosen by the call site of
ExecutorRun). To workaround this, I've temporarily hacked up a solution
that works, but is not ideal: since the receiveTuple DestFunction is
passed the portal name, we can use that to lookup the Portal data
structure for the cursor and then use that to get at the tuple store the
Portal is using. This unnecessarily ties the Portal code with the
tupleReceiver code, but it works...
The proper fix for this is probably to change the DestFunction API --
Tom suggested passing the full QueryDesc to the receiveTuple function.
In that case, callers of ExecutorRun could "subclass" QueryDesc to add
any additional fields that their particular CommandDest needed to get
access to. This approach would work, but I'd like to think about it for
a little bit longer before deciding which route to go. In the mean time,
the code works fine, so I don't think a fix is urgent.
- (semi-related) I added a NO SCROLL keyword to DECLARE CURSOR, and
adjusted the behavior of SCROLL in accordance with the discussion on
-hackers.
- (unrelated) Cleaned up some SGML markup in sql.sgml, copy.sgml
Neil Conway
23 years ago
|
|
|
*
|
|
|
|
|
* PORTAL_ONE_SELECT: the portal contains one single SELECT query. We run
|
|
|
|
|
* the Executor incrementally as results are demanded. This strategy also
|
|
|
|
|
* supports holdable cursors (the Executor results can be dumped into a
|
|
|
|
|
* tuplestore for access after transaction completion).
|
This patch implements holdable cursors, following the proposal
(materialization into a tuple store) discussed on pgsql-hackers earlier.
I've updated the documentation and the regression tests.
Notes on the implementation:
- I needed to change the tuple store API slightly -- it assumes that it
won't be used to hold data across transaction boundaries, so the temp
files that it uses for on-disk storage are automatically reclaimed at
end-of-transaction. I added a flag to tuplestore_begin_heap() to control
this behavior. Is changing the tuple store API in this fashion OK?
- in order to store executor results in a tuple store, I added a new
CommandDest. This works well for the most part, with one exception: the
current DestFunction API doesn't provide enough information to allow the
Executor to store results into an arbitrary tuple store (where the
particular tuple store to use is chosen by the call site of
ExecutorRun). To workaround this, I've temporarily hacked up a solution
that works, but is not ideal: since the receiveTuple DestFunction is
passed the portal name, we can use that to lookup the Portal data
structure for the cursor and then use that to get at the tuple store the
Portal is using. This unnecessarily ties the Portal code with the
tupleReceiver code, but it works...
The proper fix for this is probably to change the DestFunction API --
Tom suggested passing the full QueryDesc to the receiveTuple function.
In that case, callers of ExecutorRun could "subclass" QueryDesc to add
any additional fields that their particular CommandDest needed to get
access to. This approach would work, but I'd like to think about it for
a little bit longer before deciding which route to go. In the mean time,
the code works fine, so I don't think a fix is urgent.
- (semi-related) I added a NO SCROLL keyword to DECLARE CURSOR, and
adjusted the behavior of SCROLL in accordance with the discussion on
-hackers.
- (unrelated) Cleaned up some SGML markup in sql.sgml, copy.sgml
Neil Conway
23 years ago
|
|
|
*
|
|
|
|
|
* PORTAL_UTIL_SELECT: the portal contains a utility statement that returns
|
|
|
|
|
* a SELECT-like result (for example, EXPLAIN or SHOW). On first execution,
|
|
|
|
|
* we run the statement and dump its results into the portal tuplestore;
|
|
|
|
|
* the results are then returned to the client as demanded.
|
This patch implements holdable cursors, following the proposal
(materialization into a tuple store) discussed on pgsql-hackers earlier.
I've updated the documentation and the regression tests.
Notes on the implementation:
- I needed to change the tuple store API slightly -- it assumes that it
won't be used to hold data across transaction boundaries, so the temp
files that it uses for on-disk storage are automatically reclaimed at
end-of-transaction. I added a flag to tuplestore_begin_heap() to control
this behavior. Is changing the tuple store API in this fashion OK?
- in order to store executor results in a tuple store, I added a new
CommandDest. This works well for the most part, with one exception: the
current DestFunction API doesn't provide enough information to allow the
Executor to store results into an arbitrary tuple store (where the
particular tuple store to use is chosen by the call site of
ExecutorRun). To workaround this, I've temporarily hacked up a solution
that works, but is not ideal: since the receiveTuple DestFunction is
passed the portal name, we can use that to lookup the Portal data
structure for the cursor and then use that to get at the tuple store the
Portal is using. This unnecessarily ties the Portal code with the
tupleReceiver code, but it works...
The proper fix for this is probably to change the DestFunction API --
Tom suggested passing the full QueryDesc to the receiveTuple function.
In that case, callers of ExecutorRun could "subclass" QueryDesc to add
any additional fields that their particular CommandDest needed to get
access to. This approach would work, but I'd like to think about it for
a little bit longer before deciding which route to go. In the mean time,
the code works fine, so I don't think a fix is urgent.
- (semi-related) I added a NO SCROLL keyword to DECLARE CURSOR, and
adjusted the behavior of SCROLL in accordance with the discussion on
-hackers.
- (unrelated) Cleaned up some SGML markup in sql.sgml, copy.sgml
Neil Conway
23 years ago
|
|
|
*
|
|
|
|
|
* PORTAL_MULTI_QUERY: all other cases. Here, we do not support partial
|
|
|
|
|
* execution: the portal's queries will be run to completion on first call.
|
This patch implements holdable cursors, following the proposal
(materialization into a tuple store) discussed on pgsql-hackers earlier.
I've updated the documentation and the regression tests.
Notes on the implementation:
- I needed to change the tuple store API slightly -- it assumes that it
won't be used to hold data across transaction boundaries, so the temp
files that it uses for on-disk storage are automatically reclaimed at
end-of-transaction. I added a flag to tuplestore_begin_heap() to control
this behavior. Is changing the tuple store API in this fashion OK?
- in order to store executor results in a tuple store, I added a new
CommandDest. This works well for the most part, with one exception: the
current DestFunction API doesn't provide enough information to allow the
Executor to store results into an arbitrary tuple store (where the
particular tuple store to use is chosen by the call site of
ExecutorRun). To workaround this, I've temporarily hacked up a solution
that works, but is not ideal: since the receiveTuple DestFunction is
passed the portal name, we can use that to lookup the Portal data
structure for the cursor and then use that to get at the tuple store the
Portal is using. This unnecessarily ties the Portal code with the
tupleReceiver code, but it works...
The proper fix for this is probably to change the DestFunction API --
Tom suggested passing the full QueryDesc to the receiveTuple function.
In that case, callers of ExecutorRun could "subclass" QueryDesc to add
any additional fields that their particular CommandDest needed to get
access to. This approach would work, but I'd like to think about it for
a little bit longer before deciding which route to go. In the mean time,
the code works fine, so I don't think a fix is urgent.
- (semi-related) I added a NO SCROLL keyword to DECLARE CURSOR, and
adjusted the behavior of SCROLL in accordance with the discussion on
-hackers.
- (unrelated) Cleaned up some SGML markup in sql.sgml, copy.sgml
Neil Conway
23 years ago
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
typedef enum PortalStrategy
|
This patch implements holdable cursors, following the proposal
(materialization into a tuple store) discussed on pgsql-hackers earlier.
I've updated the documentation and the regression tests.
Notes on the implementation:
- I needed to change the tuple store API slightly -- it assumes that it
won't be used to hold data across transaction boundaries, so the temp
files that it uses for on-disk storage are automatically reclaimed at
end-of-transaction. I added a flag to tuplestore_begin_heap() to control
this behavior. Is changing the tuple store API in this fashion OK?
- in order to store executor results in a tuple store, I added a new
CommandDest. This works well for the most part, with one exception: the
current DestFunction API doesn't provide enough information to allow the
Executor to store results into an arbitrary tuple store (where the
particular tuple store to use is chosen by the call site of
ExecutorRun). To workaround this, I've temporarily hacked up a solution
that works, but is not ideal: since the receiveTuple DestFunction is
passed the portal name, we can use that to lookup the Portal data
structure for the cursor and then use that to get at the tuple store the
Portal is using. This unnecessarily ties the Portal code with the
tupleReceiver code, but it works...
The proper fix for this is probably to change the DestFunction API --
Tom suggested passing the full QueryDesc to the receiveTuple function.
In that case, callers of ExecutorRun could "subclass" QueryDesc to add
any additional fields that their particular CommandDest needed to get
access to. This approach would work, but I'd like to think about it for
a little bit longer before deciding which route to go. In the mean time,
the code works fine, so I don't think a fix is urgent.
- (semi-related) I added a NO SCROLL keyword to DECLARE CURSOR, and
adjusted the behavior of SCROLL in accordance with the discussion on
-hackers.
- (unrelated) Cleaned up some SGML markup in sql.sgml, copy.sgml
Neil Conway
23 years ago
|
|
|
{
|
|
|
|
|
PORTAL_ONE_SELECT,
|
|
|
|
|
PORTAL_UTIL_SELECT,
|
|
|
|
|
PORTAL_MULTI_QUERY
|
|
|
|
|
} PortalStrategy;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* A portal is always in one of these states. It is possible to transit
|
|
|
|
|
* from ACTIVE back to READY if the query is not run to completion;
|
|
|
|
|
* otherwise we never back up in status.
|
|
|
|
|
*/
|
|
|
|
|
typedef enum PortalStatus
|
|
|
|
|
{
|
|
|
|
|
PORTAL_NEW, /* in process of creation */
|
|
|
|
|
PORTAL_READY, /* PortalStart complete, can run it */
|
|
|
|
|
PORTAL_ACTIVE, /* portal is running (can't delete it) */
|
|
|
|
|
PORTAL_DONE, /* portal is finished (don't re-run it) */
|
|
|
|
|
PORTAL_FAILED /* portal got error (can't re-run it) */
|
|
|
|
|
} PortalStatus;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Note: typedef Portal is declared in tcop/dest.h as
|
|
|
|
|
* typedef struct PortalData *Portal;
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
typedef struct PortalData
|
|
|
|
|
{
|
|
|
|
|
/* Bookkeeping data */
|
|
|
|
|
const char *name; /* portal's name */
|
|
|
|
|
MemoryContext heap; /* subsidiary memory for portal */
|
|
|
|
|
ResourceOwner resowner; /* resources owned by portal */
|
|
|
|
|
void (*cleanup) (Portal portal); /* cleanup hook */
|
|
|
|
|
SubTransactionId createSubid; /* the ID of the creating subxact */
|
|
|
|
|
/*
|
|
|
|
|
* if createSubid is InvalidSubTransactionId, the portal is held over
|
|
|
|
|
* from a previous transaction
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* The query or queries the portal will execute */
|
|
|
|
|
const char *sourceText; /* text of query, if known (may be NULL) */
|
|
|
|
|
const char *commandTag; /* command tag for original query */
|
|
|
|
|
List *parseTrees; /* parse tree(s) */
|
|
|
|
|
List *planTrees; /* plan tree(s) */
|
|
|
|
|
MemoryContext queryContext; /* where the above trees live */
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Note: queryContext effectively identifies which prepared statement
|
|
|
|
|
* the portal depends on, if any. The queryContext is *not* owned by
|
|
|
|
|
* the portal and is not to be deleted by portal destruction. (But
|
|
|
|
|
* for a cursor it is the same as "heap", and that context is deleted
|
|
|
|
|
* by portal destruction.)
|
|
|
|
|
*/
|
|
|
|
|
ParamListInfo portalParams; /* params to pass to query */
|
|
|
|
|
|
|
|
|
|
/* Features/options */
|
|
|
|
|
PortalStrategy strategy; /* see above */
|
|
|
|
|
int cursorOptions; /* DECLARE CURSOR option bits */
|
|
|
|
|
|
|
|
|
|
/* Status data */
|
|
|
|
|
PortalStatus status; /* see above */
|
|
|
|
|
bool portalUtilReady; /* PortalRunUtility complete? */
|
|
|
|
|
|
|
|
|
|
/* If not NULL, Executor is active; call ExecutorEnd eventually: */
|
|
|
|
|
QueryDesc *queryDesc; /* info needed for executor invocation */
|
|
|
|
|
|
|
|
|
|
/* If portal returns tuples, this is their tupdesc: */
|
|
|
|
|
TupleDesc tupDesc; /* descriptor for result tuples */
|
|
|
|
|
/* and these are the format codes to use for the columns: */
|
|
|
|
|
int16 *formats; /* a format code for each column */
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Where we store tuples for a held cursor or a PORTAL_UTIL_SELECT
|
|
|
|
|
* query. (A cursor held past the end of its transaction no longer has
|
|
|
|
|
* any active executor state.)
|
|
|
|
|
*/
|
|
|
|
|
Tuplestorestate *holdStore; /* store for holdable cursors */
|
|
|
|
|
MemoryContext holdContext; /* memory containing holdStore */
|
This patch implements holdable cursors, following the proposal
(materialization into a tuple store) discussed on pgsql-hackers earlier.
I've updated the documentation and the regression tests.
Notes on the implementation:
- I needed to change the tuple store API slightly -- it assumes that it
won't be used to hold data across transaction boundaries, so the temp
files that it uses for on-disk storage are automatically reclaimed at
end-of-transaction. I added a flag to tuplestore_begin_heap() to control
this behavior. Is changing the tuple store API in this fashion OK?
- in order to store executor results in a tuple store, I added a new
CommandDest. This works well for the most part, with one exception: the
current DestFunction API doesn't provide enough information to allow the
Executor to store results into an arbitrary tuple store (where the
particular tuple store to use is chosen by the call site of
ExecutorRun). To workaround this, I've temporarily hacked up a solution
that works, but is not ideal: since the receiveTuple DestFunction is
passed the portal name, we can use that to lookup the Portal data
structure for the cursor and then use that to get at the tuple store the
Portal is using. This unnecessarily ties the Portal code with the
tupleReceiver code, but it works...
The proper fix for this is probably to change the DestFunction API --
Tom suggested passing the full QueryDesc to the receiveTuple function.
In that case, callers of ExecutorRun could "subclass" QueryDesc to add
any additional fields that their particular CommandDest needed to get
access to. This approach would work, but I'd like to think about it for
a little bit longer before deciding which route to go. In the mean time,
the code works fine, so I don't think a fix is urgent.
- (semi-related) I added a NO SCROLL keyword to DECLARE CURSOR, and
adjusted the behavior of SCROLL in accordance with the discussion on
-hackers.
- (unrelated) Cleaned up some SGML markup in sql.sgml, copy.sgml
Neil Conway
23 years ago
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* atStart, atEnd and portalPos indicate the current cursor position.
|
|
|
|
|
* portalPos is zero before the first row, N after fetching N'th row
|
|
|
|
|
* of query. After we run off the end, portalPos = # of rows in
|
|
|
|
|
* query, and atEnd is true. If portalPos overflows, set posOverflow
|
|
|
|
|
* (this causes us to stop relying on its value for navigation). Note
|
|
|
|
|
* that atStart implies portalPos == 0, but not the reverse (portalPos
|
|
|
|
|
* could have overflowed).
|
|
|
|
|
*/
|
|
|
|
|
bool atStart;
|
|
|
|
|
bool atEnd;
|
|
|
|
|
bool posOverflow;
|
|
|
|
|
long portalPos;
|
|
|
|
|
} PortalData;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* PortalIsValid
|
|
|
|
|
* True iff portal is valid.
|
|
|
|
|
*/
|
|
|
|
|
#define PortalIsValid(p) PointerIsValid(p)
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Access macros for Portal ... use these in preference to field access.
|
|
|
|
|
*/
|
|
|
|
|
#define PortalGetQueryDesc(portal) ((portal)->queryDesc)
|
|
|
|
|
#define PortalGetHeapMemory(portal) ((portal)->heap)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Prototypes for functions in utils/mmgr/portalmem.c */
|
|
|
|
|
extern void EnablePortalManager(void);
|
|
|
|
|
extern bool CommitHoldablePortals(void);
|
|
|
|
|
extern bool PrepareHoldablePortals(void);
|
|
|
|
|
extern void AtCommit_Portals(void);
|
|
|
|
|
extern void AtAbort_Portals(void);
|
|
|
|
|
extern void AtCleanup_Portals(void);
|
|
|
|
|
extern void AtSubCommit_Portals(SubTransactionId mySubid,
|
|
|
|
|
SubTransactionId parentSubid,
|
|
|
|
|
ResourceOwner parentXactOwner);
|
|
|
|
|
extern void AtSubAbort_Portals(SubTransactionId mySubid,
|
|
|
|
|
SubTransactionId parentSubid,
|
|
|
|
|
ResourceOwner parentXactOwner);
|
|
|
|
|
extern void AtSubCleanup_Portals(SubTransactionId mySubid);
|
|
|
|
|
extern Portal CreatePortal(const char *name, bool allowDup, bool dupSilent);
|
|
|
|
|
extern Portal CreateNewPortal(void);
|
|
|
|
|
extern void PortalDrop(Portal portal, bool isTopCommit);
|
|
|
|
|
extern void DropDependentPortals(MemoryContext queryContext);
|
|
|
|
|
extern Portal GetPortalByName(const char *name);
|
|
|
|
|
extern void PortalDefineQuery(Portal portal,
|
|
|
|
|
const char *sourceText,
|
|
|
|
|
const char *commandTag,
|
|
|
|
|
List *parseTrees,
|
|
|
|
|
List *planTrees,
|
|
|
|
|
MemoryContext queryContext);
|
|
|
|
|
extern void PortalCreateHoldStore(Portal portal);
|
|
|
|
|
|
|
|
|
|
#endif /* PORTAL_H */
|