@ -283,9 +283,9 @@ static char *convertRegProcReference(const char *proc);
static char * getFormattedOperatorName ( const char * oproid ) ;
static char * convertTSFunction ( Archive * fout , Oid funcOid ) ;
static const char * getFormattedTypeName ( Archive * fout , Oid oid , OidOptions opts ) ;
static void getBlob s ( Archive * fout ) ;
static void dumpBlob ( Archive * fout , const Blob Info * binfo ) ;
static int dumpBlob s ( Archive * fout , const void * arg ) ;
static void getLO s ( Archive * fout ) ;
static void dumpLO ( Archive * fout , const Lo Info * binfo ) ;
static int dumpLO s ( Archive * fout , const void * arg ) ;
static void dumpPolicy ( Archive * fout , const PolicyInfo * polinfo ) ;
static void dumpPublication ( Archive * fout , const PublicationInfo * pubinfo ) ;
static void dumpPublicationTable ( Archive * fout , const PublicationRelInfo * pubrinfo ) ;
@ -356,7 +356,9 @@ main(int argc, char **argv)
static struct option long_options [ ] = {
{ " data-only " , no_argument , NULL , ' a ' } ,
{ " blobs " , no_argument , NULL , ' b ' } ,
{ " large-objects " , no_argument , NULL , ' b ' } ,
{ " no-blobs " , no_argument , NULL , ' B ' } ,
{ " no-large-objects " , no_argument , NULL , ' B ' } ,
{ " clean " , no_argument , NULL , ' c ' } ,
{ " create " , no_argument , NULL , ' C ' } ,
{ " dbname " , required_argument , NULL , ' d ' } ,
@ -460,12 +462,12 @@ main(int argc, char **argv)
dopt . dataOnly = true ;
break ;
case ' b ' : /* Dump blob s */
dopt . outputBlob s = true ;
case ' b ' : /* Dump LO s */
dopt . outputLO s = true ;
break ;
case ' B ' : /* Don't dump blob s */
dopt . dontOutputBlob s = true ;
case ' B ' : /* Don't dump LO s */
dopt . dontOutputLO s = true ;
break ;
case ' c ' : /* clean (i.e., drop) schema prior to create */
@ -841,16 +843,16 @@ main(int argc, char **argv)
}
/*
* Dumping blob s is the default for dumps where an inclusion switch is not
* used ( an " include everything " dump ) . - B can be used to exclude blob s
* from those dumps . - b can be used to include blob s even when an
* Dumping LO s is the default for dumps where an inclusion switch is not
* used ( an " include everything " dump ) . - B can be used to exclude LO s
* from those dumps . - b can be used to include LO s even when an
* inclusion switch is used .
*
* - s means " schema only " and blob s are data , not schema , so we never
* include blob s when - s is used .
* - s means " schema only " and LO s are data , not schema , so we never
* include LO s when - s is used .
*/
if ( dopt . include_everything & & ! dopt . schemaOnly & & ! dopt . dontOutputBlob s )
dopt . outputBlob s = true ;
if ( dopt . include_everything & & ! dopt . schemaOnly & & ! dopt . dontOutputLO s )
dopt . outputLO s = true ;
/*
* Collect role names so we can map object owner OIDs to names .
@ -875,15 +877,15 @@ main(int argc, char **argv)
getTableData ( & dopt , tblinfo , numTables , RELKIND_SEQUENCE ) ;
/*
* In binary - upgrade mode , we do not have to worry about the actual blob
* In binary - upgrade mode , we do not have to worry about the actual LO
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables , respectively .
*
* However , we do need to collect blob information as there may be
* comments or other information on blob s that we do need to dump out .
* However , we do need to collect LO information as there may be
* comments or other information on LO s that we do need to dump out .
*/
if ( dopt . outputBlob s | | dopt . binary_upgrade )
getBlob s ( fout ) ;
if ( dopt . outputLO s | | dopt . binary_upgrade )
getLO s ( fout ) ;
/*
* Collect dependency data to assist in ordering the objects .
@ -1036,8 +1038,10 @@ help(const char *progname)
printf ( _ ( " \n Options controlling the output content: \n " ) ) ;
printf ( _ ( " -a, --data-only dump only the data, not the schema \n " ) ) ;
printf ( _ ( " -b, --blobs include large objects in dump \n " ) ) ;
printf ( _ ( " -B, --no-blobs exclude large objects in dump \n " ) ) ;
printf ( _ ( " -b, --large-objects, --blobs \n "
" include large objects in dump \n " ) ) ;
printf ( _ ( " -B, --no-large-objects, --no-blobs \n "
" exclude large objects in dump \n " ) ) ;
printf ( _ ( " -c, --clean clean (drop) database objects before recreating \n " ) ) ;
printf ( _ ( " -C, --create include commands to create database in dump \n " ) ) ;
printf ( _ ( " -e, --extension=PATTERN dump the specified extension(s) only \n " ) ) ;
@ -3409,16 +3413,16 @@ dumpSearchPath(Archive *AH)
/*
* getBlob s :
* getLO s :
* Collect schema - level data about large objects
*/
static void
getBlob s ( Archive * fout )
getLO s ( Archive * fout )
{
DumpOptions * dopt = fout - > dopt ;
PQExpBuffer b lob Qry = createPQExpBuffer ( ) ;
BlobInfo * b info;
DumpableObject * b data;
PQExpBuffer loQry = createPQExpBuffer ( ) ;
LoInfo * lo info;
DumpableObject * lo data;
PGresult * res ;
int ntups ;
int i ;
@ -3429,13 +3433,13 @@ getBlobs(Archive *fout)
pg_log_info ( " reading large objects " ) ;
/* Fetch B LOB OIDs, and owner/ACL data */
appendPQExpBufferStr ( b lob Qry,
/* Fetch LO OIDs, and owner/ACL data */
appendPQExpBufferStr ( loQry ,
" SELECT oid, lomowner, lomacl, "
" acldefault('L', lomowner) AS acldefault "
" FROM pg_largeobject_metadata " ) ;
res = ExecuteSqlQuery ( fout , b lob Qry- > data , PGRES_TUPLES_OK ) ;
res = ExecuteSqlQuery ( fout , loQry - > data , PGRES_TUPLES_OK ) ;
i_oid = PQfnumber ( res , " oid " ) ;
i_lomowner = PQfnumber ( res , " lomowner " ) ;
@ -3445,40 +3449,40 @@ getBlobs(Archive *fout)
ntups = PQntuples ( res ) ;
/*
* Each large object has its own BLOB archive entry .
* Each large object has its own " BLOB " archive entry .
*/
binfo = ( Blob Info * ) pg_malloc ( ntups * sizeof ( Blob Info) ) ;
loinfo = ( Lo Info * ) pg_malloc ( ntups * sizeof ( Lo Info) ) ;
for ( i = 0 ; i < ntups ; i + + )
{
b info[ i ] . dobj . objType = DO_B LOB ;
b info[ i ] . dobj . catId . tableoid = LargeObjectRelationId ;
b info[ i ] . dobj . catId . oid = atooid ( PQgetvalue ( res , i , i_oid ) ) ;
AssignDumpId ( & b info[ i ] . dobj ) ;
lo info[ i ] . dobj . objType = DO_LARGE_ OBJECT ;
lo info[ i ] . dobj . catId . tableoid = LargeObjectRelationId ;
lo info[ i ] . dobj . catId . oid = atooid ( PQgetvalue ( res , i , i_oid ) ) ;
AssignDumpId ( & lo info[ i ] . dobj ) ;
b info[ i ] . dobj . name = pg_strdup ( PQgetvalue ( res , i , i_oid ) ) ;
b info[ i ] . dacl . acl = pg_strdup ( PQgetvalue ( res , i , i_lomacl ) ) ;
b info[ i ] . dacl . acldefault = pg_strdup ( PQgetvalue ( res , i , i_acldefault ) ) ;
b info[ i ] . dacl . privtype = 0 ;
b info[ i ] . dacl . initprivs = NULL ;
b info[ i ] . rolname = getRoleName ( PQgetvalue ( res , i , i_lomowner ) ) ;
lo info[ i ] . dobj . name = pg_strdup ( PQgetvalue ( res , i , i_oid ) ) ;
lo info[ i ] . dacl . acl = pg_strdup ( PQgetvalue ( res , i , i_lomacl ) ) ;
lo info[ i ] . dacl . acldefault = pg_strdup ( PQgetvalue ( res , i , i_acldefault ) ) ;
lo info[ i ] . dacl . privtype = 0 ;
lo info[ i ] . dacl . initprivs = NULL ;
lo info[ i ] . rolname = getRoleName ( PQgetvalue ( res , i , i_lomowner ) ) ;
/* Blob s have data */
b info[ i ] . dobj . components | = DUMP_COMPONENT_DATA ;
/* LO s have data */
lo info[ i ] . dobj . components | = DUMP_COMPONENT_DATA ;
/* Mark whether blob has an ACL */
/* Mark whether LO has an ACL */
if ( ! PQgetisnull ( res , i , i_lomacl ) )
b info[ i ] . dobj . components | = DUMP_COMPONENT_ACL ;
lo info[ i ] . dobj . components | = DUMP_COMPONENT_ACL ;
/*
* In binary - upgrade mode for blob s, we do * not * dump out the blob
* In binary - upgrade mode for LO s, we do * not * dump out the LO
* data , as it will be copied by pg_upgrade , which simply copies the
* pg_largeobject table . We * do * however dump out anything but the
* data , as pg_upgrade copies just pg_largeobject , but not
* pg_largeobject_metadata , after the dump is restored .
*/
if ( dopt - > binary_upgrade )
b info[ i ] . dobj . dump & = ~ DUMP_COMPONENT_DATA ;
lo info[ i ] . dobj . dump & = ~ DUMP_COMPONENT_DATA ;
}
/*
@ -3487,77 +3491,77 @@ getBlobs(Archive *fout)
*/
if ( ntups > 0 )
{
b data = ( DumpableObject * ) pg_malloc ( sizeof ( DumpableObject ) ) ;
b data- > objType = DO_B LOB_DATA ;
b data- > catId = nilCatalogId ;
AssignDumpId ( b data) ;
b data- > name = pg_strdup ( " BLOBS " ) ;
b data- > components | = DUMP_COMPONENT_DATA ;
lo data = ( DumpableObject * ) pg_malloc ( sizeof ( DumpableObject ) ) ;
lo data- > objType = DO_LARGE_ OBJECT _DATA ;
lo data- > catId = nilCatalogId ;
AssignDumpId ( lo data) ;
lo data- > name = pg_strdup ( " BLOBS " ) ;
lo data- > components | = DUMP_COMPONENT_DATA ;
}
PQclear ( res ) ;
destroyPQExpBuffer ( b lob Qry) ;
destroyPQExpBuffer ( loQry ) ;
}
/*
* dumpBlob
* dumpLO
*
* dump the definition ( metadata ) of the given large object
*/
static void
dumpBlob ( Archive * fout , const BlobInfo * b info)
dumpLO ( Archive * fout , const LoInfo * lo info)
{
PQExpBuffer cquery = createPQExpBuffer ( ) ;
PQExpBuffer dquery = createPQExpBuffer ( ) ;
appendPQExpBuffer ( cquery ,
" SELECT pg_catalog.lo_create('%s'); \n " ,
b info- > dobj . name ) ;
lo info- > dobj . name ) ;
appendPQExpBuffer ( dquery ,
" SELECT pg_catalog.lo_unlink('%s'); \n " ,
b info- > dobj . name ) ;
lo info- > dobj . name ) ;
if ( b info- > dobj . dump & DUMP_COMPONENT_DEFINITION )
ArchiveEntry ( fout , b info- > dobj . catId , b info- > dobj . dumpId ,
ARCHIVE_OPTS ( . tag = b info- > dobj . name ,
. owner = b info- > rolname ,
if ( lo info- > dobj . dump & DUMP_COMPONENT_DEFINITION )
ArchiveEntry ( fout , lo info- > dobj . catId , lo info- > dobj . dumpId ,
ARCHIVE_OPTS ( . tag = lo info- > dobj . name ,
. owner = lo info- > rolname ,
. description = " BLOB " ,
. section = SECTION_PRE_DATA ,
. createStmt = cquery - > data ,
. dropStmt = dquery - > data ) ) ;
/* Dump comment if any */
if ( b info- > dobj . dump & DUMP_COMPONENT_COMMENT )
dumpComment ( fout , " LARGE OBJECT " , b info- > dobj . name ,
NULL , b info- > rolname ,
b info- > dobj . catId , 0 , b info- > dobj . dumpId ) ;
if ( lo info- > dobj . dump & DUMP_COMPONENT_COMMENT )
dumpComment ( fout , " LARGE OBJECT " , lo info- > dobj . name ,
NULL , lo info- > rolname ,
lo info- > dobj . catId , 0 , lo info- > dobj . dumpId ) ;
/* Dump security label if any */
if ( b info- > dobj . dump & DUMP_COMPONENT_SECLABEL )
dumpSecLabel ( fout , " LARGE OBJECT " , b info- > dobj . name ,
NULL , b info- > rolname ,
b info- > dobj . catId , 0 , b info- > dobj . dumpId ) ;
if ( lo info- > dobj . dump & DUMP_COMPONENT_SECLABEL )
dumpSecLabel ( fout , " LARGE OBJECT " , lo info- > dobj . name ,
NULL , lo info- > rolname ,
lo info- > dobj . catId , 0 , lo info- > dobj . dumpId ) ;
/* Dump ACL if any */
if ( b info- > dobj . dump & DUMP_COMPONENT_ACL )
dumpACL ( fout , b info- > dobj . dumpId , InvalidDumpId , " LARGE OBJECT " ,
b info- > dobj . name , NULL ,
NULL , b info- > rolname , & b info- > dacl ) ;
if ( lo info- > dobj . dump & DUMP_COMPONENT_ACL )
dumpACL ( fout , lo info- > dobj . dumpId , InvalidDumpId , " LARGE OBJECT " ,
lo info- > dobj . name , NULL ,
NULL , lo info- > rolname , & lo info- > dacl ) ;
destroyPQExpBuffer ( cquery ) ;
destroyPQExpBuffer ( dquery ) ;
}
/*
* dumpBlob s :
* dumpLO s :
* dump the data contents of all large objects
*/
static int
dumpBlob s ( Archive * fout , const void * arg )
dumpLO s ( Archive * fout , const void * arg )
{
const char * b lob Qry;
const char * b lob FetchQry;
const char * loQry ;
const char * loFetchQry ;
PGconn * conn = GetConnection ( fout ) ;
PGresult * res ;
char buf [ LOBBUFSIZE ] ;
@ -3568,38 +3572,38 @@ dumpBlobs(Archive *fout, const void *arg)
pg_log_info ( " saving large objects " ) ;
/*
* Currently , we re - fetch all B LOB OIDs using a cursor . Consider scanning
* Currently , we re - fetch all LO OIDs using a cursor . Consider scanning
* the already - in - memory dumpable objects instead . . .
*/
b lob Qry =
" DECLARE b lob oid CURSOR FOR "
loQry =
" DECLARE looid CURSOR FOR "
" SELECT oid FROM pg_largeobject_metadata ORDER BY 1 " ;
ExecuteSqlStatement ( fout , b lob Qry) ;
ExecuteSqlStatement ( fout , loQry ) ;
/* Command to fetch from cursor */
b lob FetchQry = " FETCH 1000 IN b lob oid " ;
loFetchQry = " FETCH 1000 IN looid " ;
do
{
/* Do a fetch */
res = ExecuteSqlQuery ( fout , b lob FetchQry, PGRES_TUPLES_OK ) ;
res = ExecuteSqlQuery ( fout , loFetchQry , PGRES_TUPLES_OK ) ;
/* Process the tuples, if any */
ntups = PQntuples ( res ) ;
for ( i = 0 ; i < ntups ; i + + )
{
Oid b lob Oid;
Oid loOid ;
int loFd ;
b lob Oid = atooid ( PQgetvalue ( res , i , 0 ) ) ;
/* Open the B LOB */
loFd = lo_open ( conn , b lob Oid, INV_READ ) ;
loOid = atooid ( PQgetvalue ( res , i , 0 ) ) ;
/* Open the LO */
loFd = lo_open ( conn , loOid , INV_READ ) ;
if ( loFd = = - 1 )
pg_fatal ( " could not open large object %u: %s " ,
b lob Oid, PQerrorMessage ( conn ) ) ;
loOid , PQerrorMessage ( conn ) ) ;
StartBlob ( fout , b lob Oid) ;
StartLO ( fout , loOid ) ;
/* Now read it in chunks, sending data to archive */
do
@ -3607,14 +3611,14 @@ dumpBlobs(Archive *fout, const void *arg)
cnt = lo_read ( conn , loFd , buf , LOBBUFSIZE ) ;
if ( cnt < 0 )
pg_fatal ( " error reading large object %u: %s " ,
b lob Oid, PQerrorMessage ( conn ) ) ;
loOid , PQerrorMessage ( conn ) ) ;
WriteData ( fout , buf , cnt ) ;
} while ( cnt > 0 ) ;
lo_close ( conn , loFd ) ;
EndBlob ( fout , b lob Oid) ;
EndLO ( fout , loOid ) ;
}
PQclear ( res ) ;
@ -9483,7 +9487,7 @@ dumpCommentExtended(Archive *fout, const char *type,
if ( dopt - > no_comments )
return ;
/* Comments are schema not data ... except blob comments are data */
/* Comments are schema not data ... except LO comments are data */
if ( strcmp ( type , " LARGE OBJECT " ) ! = 0 )
{
if ( dopt - > dataOnly )
@ -9491,7 +9495,7 @@ dumpCommentExtended(Archive *fout, const char *type,
}
else
{
/* We do dump blob comments in binary-upgrade mode */
/* We do dump LO comments in binary-upgrade mode */
if ( dopt - > schemaOnly & & ! dopt - > binary_upgrade )
return ;
}
@ -9971,10 +9975,10 @@ dumpDumpableObject(Archive *fout, DumpableObject *dobj)
case DO_DEFAULT_ACL :
dumpDefaultACL ( fout , ( const DefaultACLInfo * ) dobj ) ;
break ;
case DO_B LOB :
dumpBlob ( fout , ( const Blob Info * ) dobj ) ;
case DO_LARGE_ OBJECT :
dumpLO ( fout , ( const Lo Info * ) dobj ) ;
break ;
case DO_B LOB_DATA :
case DO_LARGE_ OBJECT _DATA :
if ( dobj - > dump & DUMP_COMPONENT_DATA )
{
TocEntry * te ;
@ -9983,19 +9987,19 @@ dumpDumpableObject(Archive *fout, DumpableObject *dobj)
ARCHIVE_OPTS ( . tag = dobj - > name ,
. description = " BLOBS " ,
. section = SECTION_DATA ,
. dumpFn = dumpBlob s ) ) ;
. dumpFn = dumpLO s ) ) ;
/*
* Set the TocEntry ' s dataLength in case we are doing a
* parallel dump and want to order dump jobs by table size .
* ( We need some size estimate for every TocEntry with a
* DataDumper function . ) We don ' t currently have any cheap
* way to estimate the size of blob s, but it doesn ' t matter ;
* way to estimate the size of LO s, but it doesn ' t matter ;
* let ' s just set the size to a large value so parallel dumps
* will launch this job first . If there ' s lots of blob s, we
* will launch this job first . If there ' s lots of LO s, we
* win , and if there aren ' t , we don ' t lose much . ( If you want
* to improve on this , really what you should be thinking
* about is allowing blob dumping to be parallelized , not just
* about is allowing LO dumping to be parallelized , not just
* getting a smarter estimate for the single TOC entry . )
*/
te - > dataLength = INT_MAX ;
@ -14467,7 +14471,7 @@ dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
if ( dopt - > aclsSkip )
return InvalidDumpId ;
/* --data-only skips ACLs *except* BLOB ACLs */
/* --data-only skips ACLs *except* large object ACLs */
if ( dopt - > dataOnly & & strcmp ( type , " LARGE OBJECT " ) ! = 0 )
return InvalidDumpId ;
@ -14589,7 +14593,7 @@ dumpSecLabel(Archive *fout, const char *type, const char *name,
if ( dopt - > no_security_labels )
return ;
/* Security labels are schema not data ... except b lob labels are data */
/* Security labels are schema not data ... except large object labels are data */
if ( strcmp ( type , " LARGE OBJECT " ) ! = 0 )
{
if ( dopt - > dataOnly )
@ -14597,7 +14601,7 @@ dumpSecLabel(Archive *fout, const char *type, const char *name,
}
else
{
/* We do dump b lob security labels in binary-upgrade mode */
/* We do dump large object security labels in binary-upgrade mode */
if ( dopt - > schemaOnly & & ! dopt - > binary_upgrade )
return ;
}
@ -17945,13 +17949,13 @@ addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
case DO_FDW :
case DO_FOREIGN_SERVER :
case DO_TRANSFORM :
case DO_B LOB :
case DO_LARGE_ OBJECT :
/* Pre-data objects: must come before the pre-data boundary */
addObjectDependency ( preDataBound , dobj - > dumpId ) ;
break ;
case DO_TABLE_DATA :
case DO_SEQUENCE_SET :
case DO_B LOB_DATA :
case DO_LARGE_ OBJECT _DATA :
/* Data objects: must come between the boundaries */
addObjectDependency ( dobj , preDataBound - > dumpId ) ;
addObjectDependency ( postDataBound , dobj - > dumpId ) ;