Post-PG 10 beta1 pgindent run

perltidy run not included.
pull/18/merge
Bruce Momjian 8 years ago
parent 8a94332478
commit a6fd7b7a5f
  1. 10
      contrib/bloom/blinsert.c
  2. 15
      contrib/dblink/dblink.c
  3. 6
      contrib/pgcrypto/openssl.c
  4. 4
      contrib/pgcrypto/pgcrypto.c
  5. 5
      contrib/pgrowlocks/pgrowlocks.c
  6. 8
      contrib/postgres_fdw/deparse.c
  7. 8
      contrib/postgres_fdw/postgres_fdw.c
  8. 3
      src/backend/access/brin/brin.c
  9. 14
      src/backend/access/gin/ginvacuum.c
  10. 26
      src/backend/access/hash/hash.c
  11. 10
      src/backend/access/hash/hash_xlog.c
  12. 29
      src/backend/access/hash/hashinsert.c
  13. 38
      src/backend/access/hash/hashpage.c
  14. 11
      src/backend/access/hash/hashutil.c
  15. 21
      src/backend/access/heap/heapam.c
  16. 10
      src/backend/access/nbtree/nbtree.c
  17. 8
      src/backend/access/spgist/spginsert.c
  18. 8
      src/backend/access/transam/clog.c
  19. 8
      src/backend/access/transam/commit_ts.c
  20. 12
      src/backend/access/transam/subtrans.c
  21. 47
      src/backend/access/transam/twophase.c
  22. 7
      src/backend/access/transam/xact.c
  23. 88
      src/backend/access/transam/xlog.c
  24. 3
      src/backend/access/transam/xlogfuncs.c
  25. 17
      src/backend/access/transam/xloginsert.c
  26. 4
      src/backend/access/transam/xlogreader.c
  27. 39
      src/backend/access/transam/xlogutils.c
  28. 6
      src/backend/catalog/dependency.c
  29. 8
      src/backend/catalog/heap.c
  30. 6
      src/backend/catalog/pg_depend.c
  31. 7
      src/backend/catalog/pg_subscription.c
  32. 3
      src/backend/commands/dbcommands.c
  33. 3
      src/backend/commands/dropcmds.c
  34. 4
      src/backend/commands/foreigncmds.c
  35. 5
      src/backend/commands/publicationcmds.c
  36. 45
      src/backend/commands/subscriptioncmds.c
  37. 14
      src/backend/commands/tablecmds.c
  38. 3
      src/backend/commands/trigger.c
  39. 1
      src/backend/executor/execAmi.c
  40. 6
      src/backend/executor/execParallel.c
  41. 8
      src/backend/executor/execReplication.c
  42. 4
      src/backend/executor/nodeAppend.c
  43. 5
      src/backend/executor/nodeBitmapHeapscan.c
  44. 18
      src/backend/executor/nodeGatherMerge.c
  45. 4
      src/backend/executor/nodeMergeAppend.c
  46. 10
      src/backend/executor/nodeModifyTable.c
  47. 4
      src/backend/executor/nodeTableFuncscan.c
  48. 12
      src/backend/libpq/auth.c
  49. 5
      src/backend/libpq/crypt.c
  50. 5
      src/backend/libpq/hba.c
  51. 3
      src/backend/nodes/nodeFuncs.c
  52. 4
      src/backend/nodes/tidbitmap.c
  53. 1
      src/backend/optimizer/path/allpaths.c
  54. 4
      src/backend/optimizer/path/costsize.c
  55. 4
      src/backend/optimizer/path/indxpath.c
  56. 18
      src/backend/optimizer/plan/planner.c
  57. 5
      src/backend/optimizer/plan/setrefs.c
  58. 11
      src/backend/optimizer/prep/prepunion.c
  59. 6
      src/backend/optimizer/util/relnode.c
  60. 6
      src/backend/parser/parse_clause.c
  61. 1
      src/backend/parser/parse_relation.c
  62. 54
      src/backend/parser/parse_utilcmd.c
  63. 4
      src/backend/postmaster/bgwriter.c
  64. 1
      src/backend/postmaster/pgstat.c
  65. 13
      src/backend/postmaster/postmaster.c
  66. 14
      src/backend/replication/basebackup.c
  67. 10
      src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
  68. 13
      src/backend/replication/logical/launcher.c
  69. 18
      src/backend/replication/logical/logicalfuncs.c
  70. 11
      src/backend/replication/logical/relation.c
  71. 23
      src/backend/replication/logical/snapbuild.c
  72. 59
      src/backend/replication/logical/tablesync.c
  73. 81
      src/backend/replication/logical/worker.c
  74. 22
      src/backend/replication/pgoutput/pgoutput.c
  75. 4
      src/backend/replication/slot.c
  76. 10
      src/backend/replication/slotfuncs.c
  77. 18
      src/backend/replication/syncrep.c
  78. 11
      src/backend/replication/walreceiver.c
  79. 61
      src/backend/replication/walsender.c
  80. 4
      src/backend/statistics/extended_stats.c
  81. 8
      src/backend/statistics/mvdistinct.c
  82. 4
      src/backend/storage/file/fd.c
  83. 4
      src/backend/storage/lmgr/condition_variable.c
  84. 2
      src/backend/tcop/utility.c
  85. 12
      src/backend/tsearch/to_tsany.c
  86. 6
      src/backend/utils/adt/cash.c
  87. 8
      src/backend/utils/adt/dbsize.c
  88. 51
      src/backend/utils/adt/formatting.c
  89. 52
      src/backend/utils/adt/jsonfuncs.c
  90. 6
      src/backend/utils/adt/like.c
  91. 14
      src/backend/utils/adt/pg_locale.c
  92. 26
      src/backend/utils/adt/selfuncs.c
  93. 8
      src/backend/utils/adt/txid.c
  94. 19
      src/backend/utils/adt/varlena.c
  95. 5
      src/backend/utils/adt/xml.c
  96. 15
      src/backend/utils/cache/inval.c
  97. 14
      src/backend/utils/cache/relcache.c
  98. 1
      src/backend/utils/mb/conv.c
  99. 4
      src/backend/utils/misc/backend_random.c
  100. 4
      src/backend/utils/sort/tuplesort.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -165,11 +165,11 @@ blbuildempty(Relation index)
BloomFillMetapage(index, metapage); BloomFillMetapage(index, metapage);
/* /*
* Write the page and log it. It might seem that an immediate sync * Write the page and log it. It might seem that an immediate sync would
* would be sufficient to guarantee that the file exists on disk, but * be sufficient to guarantee that the file exists on disk, but recovery
* recovery itself might remove it while replaying, for example, an * itself might remove it while replaying, for example, an
* XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we need
* need this even when wal_level=minimal. * this even when wal_level=minimal.
*/ */
PageSetChecksumInplace(metapage, BLOOM_METAPAGE_BLKNO); PageSetChecksumInplace(metapage, BLOOM_METAPAGE_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, BLOOM_METAPAGE_BLKNO, smgrwrite(index->rd_smgr, INIT_FORKNUM, BLOOM_METAPAGE_BLKNO,

@ -152,16 +152,19 @@ xpstrdup(const char *in)
return pstrdup(in); return pstrdup(in);
} }
static void pg_attribute_noreturn() static void
pg_attribute_noreturn()
dblink_res_internalerror(PGconn *conn, PGresult *res, const char *p2) dblink_res_internalerror(PGconn *conn, PGresult *res, const char *p2)
{ {
char *msg = pchomp(PQerrorMessage(conn)); char *msg = pchomp(PQerrorMessage(conn));
if (res) if (res)
PQclear(res); PQclear(res);
elog(ERROR, "%s: %s", p2, msg); elog(ERROR, "%s: %s", p2, msg);
} }
static void pg_attribute_noreturn() static void
pg_attribute_noreturn()
dblink_conn_not_avail(const char *conname) dblink_conn_not_avail(const char *conname)
{ {
if (conname) if (conname)
@ -201,6 +204,7 @@ dblink_get_conn(char *conname_or_str,
if (PQstatus(conn) == CONNECTION_BAD) if (PQstatus(conn) == CONNECTION_BAD)
{ {
char *msg = pchomp(PQerrorMessage(conn)); char *msg = pchomp(PQerrorMessage(conn));
PQfinish(conn); PQfinish(conn);
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
@ -223,6 +227,7 @@ static PGconn *
dblink_get_named_conn(const char *conname) dblink_get_named_conn(const char *conname)
{ {
remoteConn *rconn = getConnectionByName(conname); remoteConn *rconn = getConnectionByName(conname);
if (rconn) if (rconn)
return rconn->conn; return rconn->conn;
@ -2699,9 +2704,9 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
message_context = xpstrdup(pg_diag_context); message_context = xpstrdup(pg_diag_context);
/* /*
* If we don't get a message from the PGresult, try the PGconn. This * If we don't get a message from the PGresult, try the PGconn. This is
* is needed because for connection-level failures, PQexec may just * needed because for connection-level failures, PQexec may just return
* return NULL, not a PGresult at all. * NULL, not a PGresult at all.
*/ */
if (message_primary == NULL) if (message_primary == NULL)
message_primary = pchomp(PQerrorMessage(conn)); message_primary = pchomp(PQerrorMessage(conn));

@ -706,13 +706,15 @@ static const struct ossl_cipher ossl_cast_cbc = {
static const struct ossl_cipher ossl_aes_ecb = { static const struct ossl_cipher ossl_aes_ecb = {
ossl_aes_ecb_init, ossl_aes_ecb_init,
NULL, /* EVP_aes_XXX_ecb(), determined in init function */ NULL, /* EVP_aes_XXX_ecb(), determined in init
* function */
128 / 8, 256 / 8 128 / 8, 256 / 8
}; };
static const struct ossl_cipher ossl_aes_cbc = { static const struct ossl_cipher ossl_aes_cbc = {
ossl_aes_cbc_init, ossl_aes_cbc_init,
NULL, /* EVP_aes_XXX_cbc(), determined in init function */ NULL, /* EVP_aes_XXX_cbc(), determined in init
* function */
128 / 8, 256 / 8 128 / 8, 256 / 8
}; };

@ -454,8 +454,8 @@ pg_random_uuid(PG_FUNCTION_ARGS)
uint8 *buf = (uint8 *) palloc(UUID_LEN); uint8 *buf = (uint8 *) palloc(UUID_LEN);
/* /*
* Generate random bits. pg_backend_random() will do here, we don't * Generate random bits. pg_backend_random() will do here, we don't promis
* promis UUIDs to be cryptographically random, when built with * UUIDs to be cryptographically random, when built with
* --disable-strong-random. * --disable-strong-random.
*/ */
if (!pg_backend_random((char *) buf, UUID_LEN)) if (!pg_backend_random((char *) buf, UUID_LEN))

@ -99,7 +99,10 @@ pgrowlocks(PG_FUNCTION_ARGS)
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname)); relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = heap_openrv(relrv, AccessShareLock); rel = heap_openrv(relrv, AccessShareLock);
/* check permissions: must have SELECT on table or be in pg_stat_scan_tables */ /*
* check permissions: must have SELECT on table or be in
* pg_stat_scan_tables
*/
aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(), aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
ACL_SELECT); ACL_SELECT);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)

@ -1017,8 +1017,8 @@ deparseSelectSql(List *tlist, bool is_subquery, List **retrieved_attrs,
{ {
/* /*
* For a relation that is deparsed as a subquery, emit expressions * For a relation that is deparsed as a subquery, emit expressions
* specified in the relation's reltarget. Note that since this is * specified in the relation's reltarget. Note that since this is for
* for the subquery, no need to care about *retrieved_attrs. * the subquery, no need to care about *retrieved_attrs.
*/ */
deparseSubqueryTargetList(context); deparseSubqueryTargetList(context);
} }
@ -2189,8 +2189,8 @@ deparseVar(Var *node, deparse_expr_cxt *context)
/* /*
* If the Var belongs to the foreign relation that is deparsed as a * If the Var belongs to the foreign relation that is deparsed as a
* subquery, use the relation and column alias to the Var provided * subquery, use the relation and column alias to the Var provided by the
* by the subquery, instead of the remote name. * subquery, instead of the remote name.
*/ */
if (is_subquery_var(node, context->scanrel, &relno, &colno)) if (is_subquery_var(node, context->scanrel, &relno, &colno))
{ {

@ -4170,8 +4170,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
fpinfo->jointype = jointype; fpinfo->jointype = jointype;
/* /*
* By default, both the input relations are not required to be deparsed * By default, both the input relations are not required to be deparsed as
* as subqueries, but there might be some relations covered by the input * subqueries, but there might be some relations covered by the input
* relations that are required to be deparsed as subqueries, so save the * relations that are required to be deparsed as subqueries, so save the
* relids of those relations for later use by the deparser. * relids of those relations for later use by the deparser.
*/ */
@ -4227,8 +4227,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
case JOIN_FULL: case JOIN_FULL:
/* /*
* In this case, if any of the input relations has conditions, * In this case, if any of the input relations has conditions, we
* we need to deparse that relation as a subquery so that the * need to deparse that relation as a subquery so that the
* conditions can be evaluated before the join. Remember it in * conditions can be evaluated before the join. Remember it in
* the fpinfo of this relation so that the deparser can take * the fpinfo of this relation so that the deparser can take
* appropriate action. Also, save the relids of base relations * appropriate action. Also, save the relids of base relations

@ -977,7 +977,8 @@ brin_desummarize_range(PG_FUNCTION_ARGS)
RelationGetRelationName(indexRel)))); RelationGetRelationName(indexRel))));
/* the revmap does the hard work */ /* the revmap does the hard work */
do { do
{
done = brinRevmapDesummarizeRange(indexRel, heapBlk); done = brinRevmapDesummarizeRange(indexRel, heapBlk);
} }
while (!done); while (!done);

@ -140,9 +140,9 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
* exclusive cleanup lock. This guarantees that no insertions currently * exclusive cleanup lock. This guarantees that no insertions currently
* happen in this subtree. Caller also acquire Exclusive lock on deletable * happen in this subtree. Caller also acquire Exclusive lock on deletable
* page and is acquiring and releasing exclusive lock on left page before. * page and is acquiring and releasing exclusive lock on left page before.
* Left page was locked and released. Then parent and this page are locked. * Left page was locked and released. Then parent and this page are
* We acquire left page lock here only to mark page dirty after changing * locked. We acquire left page lock here only to mark page dirty after
* right pointer. * changing right pointer.
*/ */
lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno, lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,
RBM_NORMAL, gvs->strategy); RBM_NORMAL, gvs->strategy);
@ -354,8 +354,8 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1)); BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1));
/* /*
* Read all children BlockNumbers. * Read all children BlockNumbers. Not sure it is safe if there are
* Not sure it is safe if there are many concurrent vacuums. * many concurrent vacuums.
*/ */
for (i = FirstOffsetNumber; i <= maxoff; i++) for (i = FirstOffsetNumber; i <= maxoff; i++)
@ -380,8 +380,8 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
vacuum_delay_point(); vacuum_delay_point();
/* /*
* All subtree is empty - just return TRUE to indicate that parent must * All subtree is empty - just return TRUE to indicate that parent
* do a cleanup. Unless we are ROOT an there is way to go upper. * must do a cleanup. Unless we are ROOT an there is way to go upper.
*/ */
if (hasEmptyChild && !hasNonEmptyChild && !isRoot) if (hasEmptyChild && !hasNonEmptyChild && !isRoot)

@ -333,12 +333,12 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
if (scan->kill_prior_tuple) if (scan->kill_prior_tuple)
{ {
/* /*
* Yes, so remember it for later. (We'll deal with all such * Yes, so remember it for later. (We'll deal with all such tuples
* tuples at once right after leaving the index page or at * at once right after leaving the index page or at end of scan.)
* end of scan.) In case if caller reverses the indexscan * In case if caller reverses the indexscan direction it is quite
* direction it is quite possible that the same item might * possible that the same item might get entered multiple times.
* get entered multiple times. But, we don't detect that; * But, we don't detect that; instead, we just forget any excess
* instead, we just forget any excess entries. * entries.
*/ */
if (so->killedItems == NULL) if (so->killedItems == NULL)
so->killedItems = palloc(MaxIndexTuplesPerPage * so->killedItems = palloc(MaxIndexTuplesPerPage *
@ -477,9 +477,8 @@ hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
Relation rel = scan->indexRelation; Relation rel = scan->indexRelation;
/* /*
* Before leaving current page, deal with any killed items. * Before leaving current page, deal with any killed items. Also, ensure
* Also, ensure that we acquire lock on current page before * that we acquire lock on current page before calling _hash_kill_items.
* calling _hash_kill_items.
*/ */
if (so->numKilled > 0) if (so->numKilled > 0)
{ {
@ -516,9 +515,8 @@ hashendscan(IndexScanDesc scan)
Relation rel = scan->indexRelation; Relation rel = scan->indexRelation;
/* /*
* Before leaving current page, deal with any killed items. * Before leaving current page, deal with any killed items. Also, ensure
* Also, ensure that we acquire lock on current page before * that we acquire lock on current page before calling _hash_kill_items.
* calling _hash_kill_items.
*/ */
if (so->numKilled > 0) if (so->numKilled > 0)
{ {
@ -889,8 +887,8 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
/* /*
* Let us mark the page as clean if vacuum removes the DEAD tuples * Let us mark the page as clean if vacuum removes the DEAD tuples
* from an index page. We do this by clearing LH_PAGE_HAS_DEAD_TUPLES * from an index page. We do this by clearing
* flag. * LH_PAGE_HAS_DEAD_TUPLES flag.
*/ */
if (tuples_removed && *tuples_removed > 0 && if (tuples_removed && *tuples_removed > 0 &&
H_HAS_DEAD_TUPLES(opaque)) H_HAS_DEAD_TUPLES(opaque))

@ -984,9 +984,9 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
return latestRemovedXid; return latestRemovedXid;
/* /*
* Check if WAL replay has reached a consistent database state. If not, * Check if WAL replay has reached a consistent database state. If not, we
* we must PANIC. See the definition of btree_xlog_delete_get_latestRemovedXid * must PANIC. See the definition of
* for more details. * btree_xlog_delete_get_latestRemovedXid for more details.
*/ */
if (!reachedConsistency) if (!reachedConsistency)
elog(PANIC, "hash_xlog_vacuum_get_latestRemovedXid: cannot operate with inconsistent data"); elog(PANIC, "hash_xlog_vacuum_get_latestRemovedXid: cannot operate with inconsistent data");
@ -1146,8 +1146,8 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
} }
/* /*
* Mark the page as not containing any LP_DEAD items. See comments * Mark the page as not containing any LP_DEAD items. See comments in
* in _hash_vacuum_one_page() for details. * _hash_vacuum_one_page() for details.
*/ */
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES; pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;

@ -63,8 +63,8 @@ restart_insert:
/* /*
* Read the metapage. We don't lock it yet; HashMaxItemSize() will * Read the metapage. We don't lock it yet; HashMaxItemSize() will
* examine pd_pagesize_version, but that can't change so we can examine * examine pd_pagesize_version, but that can't change so we can examine it
* it without a lock. * without a lock.
*/ */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE); metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE);
metapage = BufferGetPage(metabuf); metapage = BufferGetPage(metabuf);
@ -126,10 +126,9 @@ restart_insert:
BlockNumber nextblkno; BlockNumber nextblkno;
/* /*
* Check if current page has any DEAD tuples. If yes, * Check if current page has any DEAD tuples. If yes, delete these
* delete these tuples and see if we can get a space for * tuples and see if we can get a space for the new item to be
* the new item to be inserted before moving to the next * inserted before moving to the next page in the bucket chain.
* page in the bucket chain.
*/ */
if (H_HAS_DEAD_TUPLES(pageopaque)) if (H_HAS_DEAD_TUPLES(pageopaque))
{ {
@ -360,8 +359,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
if (ndeletable > 0) if (ndeletable > 0)
{ {
/* /*
* Write-lock the meta page so that we can decrement * Write-lock the meta page so that we can decrement tuple count.
* tuple count.
*/ */
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
@ -374,8 +372,8 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
* Mark the page as not containing any LP_DEAD items. This is not * Mark the page as not containing any LP_DEAD items. This is not
* certainly true (there might be some that have recently been marked, * certainly true (there might be some that have recently been marked,
* but weren't included in our target-item list), but it will almost * but weren't included in our target-item list), but it will almost
* always be true and it doesn't seem worth an additional page scan * always be true and it doesn't seem worth an additional page scan to
* to check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint * check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint
* anyway. * anyway.
*/ */
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
@ -401,9 +399,9 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
XLogRegisterData((char *) &xlrec, SizeOfHashVacuumOnePage); XLogRegisterData((char *) &xlrec, SizeOfHashVacuumOnePage);
/* /*
* We need the target-offsets array whether or not we store the whole * We need the target-offsets array whether or not we store the
* buffer, to allow us to find the latestRemovedXid on a standby * whole buffer, to allow us to find the latestRemovedXid on a
* server. * standby server.
*/ */
XLogRegisterData((char *) deletable, XLogRegisterData((char *) deletable,
ndeletable * sizeof(OffsetNumber)); ndeletable * sizeof(OffsetNumber));
@ -417,9 +415,10 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
} }
END_CRIT_SECTION(); END_CRIT_SECTION();
/* /*
* Releasing write lock on meta page as we have updated * Releasing write lock on meta page as we have updated the tuple
* the tuple count. * count.
*/ */
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
} }

@ -177,8 +177,8 @@ _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag,
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
/* /*
* Set hasho_prevblkno with current hashm_maxbucket. This value will * Set hasho_prevblkno with current hashm_maxbucket. This value will be
* be used to validate cached HashMetaPageData. See * used to validate cached HashMetaPageData. See
* _hash_getbucketbuf_from_hashkey(). * _hash_getbucketbuf_from_hashkey().
*/ */
pageopaque->hasho_prevblkno = max_bucket; pageopaque->hasho_prevblkno = max_bucket;
@ -509,8 +509,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Choose the number of initial bucket pages to match the fill factor * Choose the number of initial bucket pages to match the fill factor
* given the estimated number of tuples. We round up the result to the * given the estimated number of tuples. We round up the result to the
* total number of buckets which has to be allocated before using its * total number of buckets which has to be allocated before using its
* _hashm_spare element. However always force at least 2 bucket pages. * _hashm_spare element. However always force at least 2 bucket pages. The
* The upper limit is determined by considerations explained in * upper limit is determined by considerations explained in
* _hash_expandtable(). * _hash_expandtable().
*/ */
dnumbuckets = num_tuples / ffactor; dnumbuckets = num_tuples / ffactor;
@ -568,8 +568,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
metap->hashm_maxbucket = num_buckets - 1; metap->hashm_maxbucket = num_buckets - 1;
/* /*
* Set highmask as next immediate ((2 ^ x) - 1), which should be sufficient * Set highmask as next immediate ((2 ^ x) - 1), which should be
* to cover num_buckets. * sufficient to cover num_buckets.
*/ */
metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1; metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
metap->hashm_lowmask = (metap->hashm_highmask >> 1); metap->hashm_lowmask = (metap->hashm_highmask >> 1);
@ -748,8 +748,8 @@ restart_expand:
{ {
/* /*
* Copy bucket mapping info now; refer to the comment in code below * Copy bucket mapping info now; refer to the comment in code below
* where we copy this information before calling _hash_splitbucket * where we copy this information before calling _hash_splitbucket to
* to see why this is okay. * see why this is okay.
*/ */
maxbucket = metap->hashm_maxbucket; maxbucket = metap->hashm_maxbucket;
highmask = metap->hashm_highmask; highmask = metap->hashm_highmask;
@ -792,8 +792,7 @@ restart_expand:
* We treat allocation of buckets as a separate WAL-logged action. * We treat allocation of buckets as a separate WAL-logged action.
* Even if we fail after this operation, won't leak bucket pages; * Even if we fail after this operation, won't leak bucket pages;
* rather, the next split will consume this space. In any case, even * rather, the next split will consume this space. In any case, even
* without failure we don't use all the space in one split * without failure we don't use all the space in one split operation.
* operation.
*/ */
buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket; buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add)) if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
@ -870,10 +869,9 @@ restart_expand:
/* /*
* Mark the old bucket to indicate that split is in progress. (At * Mark the old bucket to indicate that split is in progress. (At
* operation end, we will clear the split-in-progress flag.) Also, * operation end, we will clear the split-in-progress flag.) Also, for a
* for a primary bucket page, hasho_prevblkno stores the number of * primary bucket page, hasho_prevblkno stores the number of buckets that
* buckets that existed as of the last split, so we must update that * existed as of the last split, so we must update that value here.
* value here.
*/ */
oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT; oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
oopaque->hasho_prevblkno = maxbucket; oopaque->hasho_prevblkno = maxbucket;
@ -1008,8 +1006,8 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
/* /*
* Initialize the page. Just zeroing the page won't work; see * Initialize the page. Just zeroing the page won't work; see
* _hash_freeovflpage for similar usage. We take care to make the * _hash_freeovflpage for similar usage. We take care to make the special
* special space valid for the benefit of tools such as pageinspect. * space valid for the benefit of tools such as pageinspect.
*/ */
_hash_pageinit(page, BLCKSZ); _hash_pageinit(page, BLCKSZ);
@ -1479,10 +1477,10 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
char *cache = NULL; char *cache = NULL;
/* /*
* It's important that we don't set rd_amcache to an invalid * It's important that we don't set rd_amcache to an invalid value.
* value. Either MemoryContextAlloc or _hash_getbuf could fail, * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
* so don't install a pointer to the newly-allocated storage in the * install a pointer to the newly-allocated storage in the actual
* actual relcache entry until both have succeeeded. * relcache entry until both have succeeeded.
*/ */
if (rel->rd_amcache == NULL) if (rel->rd_amcache == NULL)
cache = MemoryContextAlloc(rel->rd_indexcxt, cache = MemoryContextAlloc(rel->rd_indexcxt,

@ -531,7 +531,8 @@ _hash_kill_items(IndexScanDesc scan)
HashScanOpaque so = (HashScanOpaque) scan->opaque; HashScanOpaque so = (HashScanOpaque) scan->opaque;
Page page; Page page;
HashPageOpaque opaque; HashPageOpaque opaque;
OffsetNumber offnum, maxoff; OffsetNumber offnum,
maxoff;
int numKilled = so->numKilled; int numKilled = so->numKilled;
int i; int i;
bool killedsomething = false; bool killedsomething = false;
@ -540,8 +541,8 @@ _hash_kill_items(IndexScanDesc scan)
Assert(so->killedItems != NULL); Assert(so->killedItems != NULL);
/* /*
* Always reset the scan state, so we don't look for same * Always reset the scan state, so we don't look for same items on other
* items on other pages. * pages.
*/ */
so->numKilled = 0; so->numKilled = 0;
@ -570,8 +571,8 @@ _hash_kill_items(IndexScanDesc scan)
} }
/* /*
* Since this can be redone later if needed, mark as dirty hint. * Since this can be redone later if needed, mark as dirty hint. Whenever
* Whenever we mark anything LP_DEAD, we also set the page's * we mark anything LP_DEAD, we also set the page's
* LH_PAGE_HAS_DEAD_TUPLES flag, which is likewise just a hint. * LH_PAGE_HAS_DEAD_TUPLES flag, which is likewise just a hint.
*/ */
if (killedsomething) if (killedsomething)

@ -3518,10 +3518,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
* *
* For HOT considerations, this is wasted effort if we fail to update or * For HOT considerations, this is wasted effort if we fail to update or
* have to put the new tuple on a different page. But we must compute the * have to put the new tuple on a different page. But we must compute the
* list before obtaining buffer lock --- in the worst case, if we are doing * list before obtaining buffer lock --- in the worst case, if we are
* an update on one of the relevant system catalogs, we could deadlock if * doing an update on one of the relevant system catalogs, we could
* we try to fetch the list later. In any case, the relcache caches the * deadlock if we try to fetch the list later. In any case, the relcache
* data so this is usually pretty cheap. * caches the data so this is usually pretty cheap.
* *
* We also need columns used by the replica identity and columns that are * We also need columns used by the replica identity and columns that are
* considered the "key" of rows in the table. * considered the "key" of rows in the table.
@ -3540,15 +3540,16 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
page = BufferGetPage(buffer); page = BufferGetPage(buffer);
interesting_attrs = NULL; interesting_attrs = NULL;
/* /*
* If the page is already full, there is hardly any chance of doing a HOT * If the page is already full, there is hardly any chance of doing a HOT
* update on this page. It might be wasteful effort to look for index * update on this page. It might be wasteful effort to look for index
* column updates only to later reject HOT updates for lack of space in the * column updates only to later reject HOT updates for lack of space in
* same page. So we be conservative and only fetch hot_attrs if the page is * the same page. So we be conservative and only fetch hot_attrs if the
* not already full. Since we are already holding a pin on the buffer, * page is not already full. Since we are already holding a pin on the
* there is no chance that the buffer can get cleaned up concurrently and * buffer, there is no chance that the buffer can get cleaned up
* even if that was possible, in the worst case we lose a chance to do a * concurrently and even if that was possible, in the worst case we lose a
* HOT update. * chance to do a HOT update.
*/ */
if (!PageIsFull(page)) if (!PageIsFull(page))
{ {

@ -289,11 +289,11 @@ btbuildempty(Relation index)
_bt_initmetapage(metapage, P_NONE, 0); _bt_initmetapage(metapage, P_NONE, 0);
/* /*
* Write the page and log it. It might seem that an immediate sync * Write the page and log it. It might seem that an immediate sync would
* would be sufficient to guarantee that the file exists on disk, but * be sufficient to guarantee that the file exists on disk, but recovery
* recovery itself might remove it while replaying, for example, an * itself might remove it while replaying, for example, an
* XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we need
* need this even when wal_level=minimal. * this even when wal_level=minimal.
*/ */
PageSetChecksumInplace(metapage, BTREE_METAPAGE); PageSetChecksumInplace(metapage, BTREE_METAPAGE);
smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE, smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,

@ -164,10 +164,10 @@ spgbuildempty(Relation index)
/* /*
* Write the page and log it unconditionally. This is important * Write the page and log it unconditionally. This is important
* particularly for indexes created on tablespaces and databases * particularly for indexes created on tablespaces and databases whose
* whose creation happened after the last redo pointer as recovery * creation happened after the last redo pointer as recovery removes any
* removes any of their existing content when the corresponding * of their existing content when the corresponding create records are
* create records are replayed. * replayed.
*/ */
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO); PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO, smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,

@ -683,10 +683,10 @@ TruncateCLOG(TransactionId oldestXact, Oid oldestxid_datoid)
ShmemVariableCache->oldestXid)); ShmemVariableCache->oldestXid));
/* /*
* Write XLOG record and flush XLOG to disk. We record the oldest xid we're * Write XLOG record and flush XLOG to disk. We record the oldest xid
* keeping information about here so we can ensure that it's always ahead * we're keeping information about here so we can ensure that it's always
* of clog truncation in case we crash, and so a standby finds out the new * ahead of clog truncation in case we crash, and so a standby finds out
* valid xid before the next checkpoint. * the new valid xid before the next checkpoint.
*/ */
WriteTruncateXlogRec(cutoffPage, oldestXact, oldestxid_datoid); WriteTruncateXlogRec(cutoffPage, oldestXact, oldestxid_datoid);

@ -748,8 +748,8 @@ ShutdownCommitTs(void)
SimpleLruFlush(CommitTsCtl, false); SimpleLruFlush(CommitTsCtl, false);
/* /*
* fsync pg_commit_ts to ensure that any files flushed previously are durably * fsync pg_commit_ts to ensure that any files flushed previously are
* on disk. * durably on disk.
*/ */
fsync_fname("pg_commit_ts", true); fsync_fname("pg_commit_ts", true);
} }
@ -764,8 +764,8 @@ CheckPointCommitTs(void)
SimpleLruFlush(CommitTsCtl, true); SimpleLruFlush(CommitTsCtl, true);
/* /*
* fsync pg_commit_ts to ensure that any files flushed previously are durably * fsync pg_commit_ts to ensure that any files flushed previously are
* on disk. * durably on disk.
*/ */
fsync_fname("pg_commit_ts", true); fsync_fname("pg_commit_ts", true);
} }

@ -87,9 +87,9 @@ SubTransSetParent(TransactionId xid, TransactionId parent)
ptr += entryno; ptr += entryno;
/* /*
* It's possible we'll try to set the parent xid multiple times * It's possible we'll try to set the parent xid multiple times but we
* but we shouldn't ever be changing the xid from one valid xid * shouldn't ever be changing the xid from one valid xid to another valid
* to another valid xid, which would corrupt the data structure. * xid, which would corrupt the data structure.
*/ */
if (*ptr != parent) if (*ptr != parent)
{ {
@ -162,9 +162,9 @@ SubTransGetTopmostTransaction(TransactionId xid)
parentXid = SubTransGetParent(parentXid); parentXid = SubTransGetParent(parentXid);
/* /*
* By convention the parent xid gets allocated first, so should * By convention the parent xid gets allocated first, so should always
* always precede the child xid. Anything else points to a corrupted * precede the child xid. Anything else points to a corrupted data
* data structure that could lead to an infinite loop, so exit. * structure that could lead to an infinite loop, so exit.
*/ */
if (!TransactionIdPrecedes(parentXid, previousXid)) if (!TransactionIdPrecedes(parentXid, previousXid))
elog(ERROR, "pg_subtrans contains invalid entry: xid %u points to parent xid %u", elog(ERROR, "pg_subtrans contains invalid entry: xid %u points to parent xid %u",

@ -1675,7 +1675,10 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
LWLockAcquire(TwoPhaseStateLock, LW_SHARED); LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++) for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{ {
/* Note that we are using gxact not pgxact so this works in recovery also */ /*
* Note that we are using gxact not pgxact so this works in recovery
* also
*/
GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
if ((gxact->valid || gxact->inredo) && if ((gxact->valid || gxact->inredo) &&
@ -1920,13 +1923,13 @@ RecoverPreparedTransactions(void)
xid = gxact->xid; xid = gxact->xid;
/* /*
* Reconstruct subtrans state for the transaction --- needed * Reconstruct subtrans state for the transaction --- needed because
* because pg_subtrans is not preserved over a restart. Note that * pg_subtrans is not preserved over a restart. Note that we are
* we are linking all the subtransactions directly to the * linking all the subtransactions directly to the top-level XID;
* top-level XID; there may originally have been a more complex * there may originally have been a more complex hierarchy, but
* hierarchy, but there's no need to restore that exactly. * there's no need to restore that exactly. It's possible that
* It's possible that SubTransSetParent has been set before, if * SubTransSetParent has been set before, if the prepared transaction
* the prepared transaction generated xid assignment records. * generated xid assignment records.
*/ */
buf = ProcessTwoPhaseBuffer(xid, buf = ProcessTwoPhaseBuffer(xid,
gxact->prepare_start_lsn, gxact->prepare_start_lsn,
@ -1949,9 +1952,8 @@ RecoverPreparedTransactions(void)
bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage)); bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
/* /*
* Recreate its GXACT and dummy PGPROC. But, check whether * Recreate its GXACT and dummy PGPROC. But, check whether it was
* it was added in redo and already has a shmem entry for * added in redo and already has a shmem entry for it.
* it.
*/ */
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE); LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
MarkAsPreparingGuts(gxact, xid, gid, MarkAsPreparingGuts(gxact, xid, gid,
@ -1980,9 +1982,8 @@ RecoverPreparedTransactions(void)
StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids); StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
/* /*
* We're done with recovering this transaction. Clear * We're done with recovering this transaction. Clear MyLockedGxact,
* MyLockedGxact, like we do in PrepareTransaction() during normal * like we do in PrepareTransaction() during normal operation.
* operation.
*/ */
PostPrepare_Twophase(); PostPrepare_Twophase();
@ -2098,8 +2099,8 @@ ProcessTwoPhaseBuffer(TransactionId xid,
} }
/* /*
* Examine subtransaction XIDs ... they should all follow main * Examine subtransaction XIDs ... they should all follow main XID, and
* XID, and they may force us to advance nextXid. * they may force us to advance nextXid.
*/ */
subxids = (TransactionId *) (buf + subxids = (TransactionId *) (buf +
MAXALIGN(sizeof(TwoPhaseFileHeader)) + MAXALIGN(sizeof(TwoPhaseFileHeader)) +
@ -2175,8 +2176,9 @@ RecordTransactionCommitPrepared(TransactionId xid,
MyPgXact->delayChkpt = true; MyPgXact->delayChkpt = true;
/* /*
* Emit the XLOG commit record. Note that we mark 2PC commits as potentially * Emit the XLOG commit record. Note that we mark 2PC commits as
* having AccessExclusiveLocks since we don't know whether or not they do. * potentially having AccessExclusiveLocks since we don't know whether or
* not they do.
*/ */
recptr = XactLogCommitRecord(committs, recptr = XactLogCommitRecord(committs,
nchildren, children, nrels, rels, nchildren, children, nrels, rels,
@ -2260,8 +2262,9 @@ RecordTransactionAbortPrepared(TransactionId xid,
START_CRIT_SECTION(); START_CRIT_SECTION();
/* /*
* Emit the XLOG commit record. Note that we mark 2PC aborts as potentially * Emit the XLOG commit record. Note that we mark 2PC aborts as
* having AccessExclusiveLocks since we don't know whether or not they do. * potentially having AccessExclusiveLocks since we don't know whether or
* not they do.
*/ */
recptr = XactLogAbortRecord(GetCurrentTimestamp(), recptr = XactLogAbortRecord(GetCurrentTimestamp(),
nchildren, children, nchildren, children,
@ -2315,8 +2318,8 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
* *
* This creates a gxact struct and puts it into the active array. * This creates a gxact struct and puts it into the active array.
* *
* In redo, this struct is mainly used to track PREPARE/COMMIT entries * In redo, this struct is mainly used to track PREPARE/COMMIT entries in
* in shared memory. Hence, we only fill up the bare minimum contents here. * shared memory. Hence, we only fill up the bare minimum contents here.
* The gxact also gets marked with gxact->inredo set to true to indicate * The gxact also gets marked with gxact->inredo set to true to indicate
* that it got added in the redo phase * that it got added in the redo phase
*/ */

@ -2641,7 +2641,8 @@ CleanupTransaction(void)
* do abort cleanup processing * do abort cleanup processing
*/ */
AtCleanup_Portals(); /* now safe to release portal memory */ AtCleanup_Portals(); /* now safe to release portal memory */
AtEOXact_Snapshot(false, true); /* and release the transaction's snapshots */ AtEOXact_Snapshot(false, true); /* and release the transaction's
* snapshots */
CurrentResourceOwner = NULL; /* and resource owner */ CurrentResourceOwner = NULL; /* and resource owner */
if (TopTransactionResourceOwner) if (TopTransactionResourceOwner)
@ -5646,8 +5647,8 @@ xact_redo(XLogReaderState *record)
else if (info == XLOG_XACT_PREPARE) else if (info == XLOG_XACT_PREPARE)
{ {
/* /*
* Store xid and start/end pointers of the WAL record in * Store xid and start/end pointers of the WAL record in TwoPhaseState
* TwoPhaseState gxact entry. * gxact entry.
*/ */
PrepareRedoAdd(XLogRecGetData(record), PrepareRedoAdd(XLogRecGetData(record),
record->ReadRecPtr, record->ReadRecPtr,

@ -550,13 +550,12 @@ typedef struct XLogCtlInsert
bool fullPageWrites; bool fullPageWrites;
/* /*
* exclusiveBackupState indicates the state of an exclusive backup * exclusiveBackupState indicates the state of an exclusive backup (see
* (see comments of ExclusiveBackupState for more details). * comments of ExclusiveBackupState for more details). nonExclusiveBackups
* nonExclusiveBackups is a counter indicating the number of streaming * is a counter indicating the number of streaming base backups currently
* base backups currently in progress. forcePageWrites is set to true * in progress. forcePageWrites is set to true when either of these is
* when either of these is non-zero. lastBackupStart is the latest * non-zero. lastBackupStart is the latest checkpoint redo location used
* checkpoint redo location used as a starting point for an online * as a starting point for an online backup.
* backup.
*/ */
ExclusiveBackupState exclusiveBackupState; ExclusiveBackupState exclusiveBackupState;
int nonExclusiveBackups; int nonExclusiveBackups;
@ -1405,7 +1404,8 @@ checkXLogConsistency(XLogReaderState *record)
/* /*
* If the block LSN is already ahead of this WAL record, we can't * If the block LSN is already ahead of this WAL record, we can't
* expect contents to match. This can happen if recovery is restarted. * expect contents to match. This can happen if recovery is
* restarted.
*/ */
if (PageGetLSN(replay_image_masked) > record->EndRecPtr) if (PageGetLSN(replay_image_masked) > record->EndRecPtr)
continue; continue;
@ -4975,10 +4975,10 @@ BootStrapXLOG(void)
sysidentifier |= getpid() & 0xFFF; sysidentifier |= getpid() & 0xFFF;
/* /*
* Generate a random nonce. This is used for authentication requests * Generate a random nonce. This is used for authentication requests that
* that will fail because the user does not exist. The nonce is used to * will fail because the user does not exist. The nonce is used to create
* create a genuine-looking password challenge for the non-existent user, * a genuine-looking password challenge for the non-existent user, in lieu
* in lieu of an actual stored password. * of an actual stored password.
*/ */
if (!pg_backend_random(mock_auth_nonce, MOCK_AUTH_NONCE_LEN)) if (!pg_backend_random(mock_auth_nonce, MOCK_AUTH_NONCE_LEN))
ereport(PANIC, ereport(PANIC,
@ -6352,8 +6352,8 @@ StartupXLOG(void)
xlogreader->system_identifier = ControlFile->system_identifier; xlogreader->system_identifier = ControlFile->system_identifier;
/* /*
* Allocate pages dedicated to WAL consistency checks, those had better * Allocate pages dedicated to WAL consistency checks, those had better be
* be aligned. * aligned.
*/ */
replay_image_masked = (char *) palloc(BLCKSZ); replay_image_masked = (char *) palloc(BLCKSZ);
master_image_masked = (char *) palloc(BLCKSZ); master_image_masked = (char *) palloc(BLCKSZ);
@ -6687,21 +6687,21 @@ StartupXLOG(void)
/* /*
* Copy any missing timeline history files between 'now' and the recovery * Copy any missing timeline history files between 'now' and the recovery
* target timeline from archive to pg_wal. While we don't need those * target timeline from archive to pg_wal. While we don't need those files
* files ourselves - the history file of the recovery target timeline * ourselves - the history file of the recovery target timeline covers all
* covers all the previous timelines in the history too - a cascading * the previous timelines in the history too - a cascading standby server
* standby server might be interested in them. Or, if you archive the WAL * might be interested in them. Or, if you archive the WAL from this
* from this server to a different archive than the master, it'd be good * server to a different archive than the master, it'd be good for all the
* for all the history files to get archived there after failover, so that * history files to get archived there after failover, so that you can use
* you can use one of the old timelines as a PITR target. Timeline history * one of the old timelines as a PITR target. Timeline history files are
* files are small, so it's better to copy them unnecessarily than not * small, so it's better to copy them unnecessarily than not copy them and
* copy them and regret later. * regret later.
*/ */
restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI); restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
/* /*
* Before running in recovery, scan pg_twophase and fill in its status * Before running in recovery, scan pg_twophase and fill in its status to
* to be able to work on entries generated by redo. Doing a scan before * be able to work on entries generated by redo. Doing a scan before
* taking any recovery action has the merit to discard any 2PC files that * taking any recovery action has the merit to discard any 2PC files that
* are newer than the first record to replay, saving from any conflicts at * are newer than the first record to replay, saving from any conflicts at
* replay. This avoids as well any subsequent scans when doing recovery * replay. This avoids as well any subsequent scans when doing recovery
@ -9645,6 +9645,7 @@ xlog_redo(XLogReaderState *record)
MultiXactAdvanceOldest(checkPoint.oldestMulti, MultiXactAdvanceOldest(checkPoint.oldestMulti,
checkPoint.oldestMultiDB); checkPoint.oldestMultiDB);
/* /*
* No need to set oldestClogXid here as well; it'll be set when we * No need to set oldestClogXid here as well; it'll be set when we
* redo an xl_clog_truncate if it changed since initialization. * redo an xl_clog_truncate if it changed since initialization.
@ -10238,8 +10239,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
if (exclusive) if (exclusive)
{ {
/* /*
* At first, mark that we're now starting an exclusive backup, * At first, mark that we're now starting an exclusive backup, to
* to ensure that there are no other sessions currently running * ensure that there are no other sessions currently running
* pg_start_backup() or pg_stop_backup(). * pg_start_backup() or pg_stop_backup().
*/ */
if (XLogCtl->Insert.exclusiveBackupState != EXCLUSIVE_BACKUP_NONE) if (XLogCtl->Insert.exclusiveBackupState != EXCLUSIVE_BACKUP_NONE)
@ -10505,8 +10506,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
{ {
/* /*
* Check for existing backup label --- implies a backup is already * Check for existing backup label --- implies a backup is already
* running. (XXX given that we checked exclusiveBackupState above, * running. (XXX given that we checked exclusiveBackupState
* maybe it would be OK to just unlink any such label file?) * above, maybe it would be OK to just unlink any such label
* file?)
*/ */
if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0) if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0)
{ {
@ -10727,8 +10729,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
if (exclusive) if (exclusive)
{ {
/* /*
* At first, mark that we're now stopping an exclusive backup, * At first, mark that we're now stopping an exclusive backup, to
* to ensure that there are no other sessions currently running * ensure that there are no other sessions currently running
* pg_start_backup() or pg_stop_backup(). * pg_start_backup() or pg_stop_backup().
*/ */
WALInsertLockAcquireExclusive(); WALInsertLockAcquireExclusive();
@ -10790,8 +10792,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
durable_unlink(BACKUP_LABEL_FILE, ERROR); durable_unlink(BACKUP_LABEL_FILE, ERROR);
/* /*
* Remove tablespace_map file if present, it is created only if there * Remove tablespace_map file if present, it is created only if
* are tablespaces. * there are tablespaces.
*/ */
durable_unlink(TABLESPACE_MAP, DEBUG1); durable_unlink(TABLESPACE_MAP, DEBUG1);
} }
@ -10978,9 +10980,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
* archived before returning. If archiving isn't enabled, the required WAL * archived before returning. If archiving isn't enabled, the required WAL
* needs to be transported via streaming replication (hopefully with * needs to be transported via streaming replication (hopefully with
* wal_keep_segments set high enough), or some more exotic mechanism like * wal_keep_segments set high enough), or some more exotic mechanism like
* polling and copying files from pg_wal with script. We have no * polling and copying files from pg_wal with script. We have no knowledge
* knowledge of those mechanisms, so it's up to the user to ensure that he * of those mechanisms, so it's up to the user to ensure that he gets all
* gets all the required WAL. * the required WAL.
* *
* We wait until both the last WAL file filled during backup and the * We wait until both the last WAL file filled during backup and the
* history file have been archived, and assume that the alphabetic sorting * history file have been archived, and assume that the alphabetic sorting
@ -10990,8 +10992,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
* We wait forever, since archive_command is supposed to work and we * We wait forever, since archive_command is supposed to work and we
* assume the admin wanted his backup to work completely. If you don't * assume the admin wanted his backup to work completely. If you don't
* wish to wait, then either waitforarchive should be passed in as false, * wish to wait, then either waitforarchive should be passed in as false,
* or you can set statement_timeout. Also, some notices are * or you can set statement_timeout. Also, some notices are issued to
* issued to clue in anyone who might be doing this interactively. * clue in anyone who might be doing this interactively.
*/ */
if (waitforarchive && XLogArchivingActive()) if (waitforarchive && XLogArchivingActive())
{ {
@ -11717,8 +11719,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
* little chance that the problem will just go away, but * little chance that the problem will just go away, but
* PANIC is not good for availability either, especially * PANIC is not good for availability either, especially
* in hot standby mode. So, we treat that the same as * in hot standby mode. So, we treat that the same as
* disconnection, and retry from archive/pg_wal again. * disconnection, and retry from archive/pg_wal again. The
* The WAL in the archive should be identical to what was * WAL in the archive should be identical to what was
* streamed, so it's unlikely that it helps, but one can * streamed, so it's unlikely that it helps, but one can
* hope... * hope...
*/ */
@ -11881,9 +11883,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
* not open already. Also read the timeline history * not open already. Also read the timeline history
* file if we haven't initialized timeline history * file if we haven't initialized timeline history
* yet; it should be streamed over and present in * yet; it should be streamed over and present in
* pg_wal by now. Use XLOG_FROM_STREAM so that * pg_wal by now. Use XLOG_FROM_STREAM so that source
* source info is set correctly and XLogReceiptTime * info is set correctly and XLogReceiptTime isn't
* isn't changed. * changed.
*/ */
if (readFile < 0) if (readFile < 0)
{ {

@ -156,7 +156,8 @@ pg_stop_backup(PG_FUNCTION_ARGS)
* Exclusive backups were typically started in a different connection, so * Exclusive backups were typically started in a different connection, so
* don't try to verify that status of backup is set to * don't try to verify that status of backup is set to
* SESSION_BACKUP_EXCLUSIVE in this function. Actual verification that an * SESSION_BACKUP_EXCLUSIVE in this function. Actual verification that an
* exclusive backup is in fact running is handled inside do_pg_stop_backup. * exclusive backup is in fact running is handled inside
* do_pg_stop_backup.
*/ */
stoppoint = do_pg_stop_backup(NULL, true, NULL); stoppoint = do_pg_stop_backup(NULL, true, NULL);

@ -507,10 +507,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
hdr_rdt.data = hdr_scratch; hdr_rdt.data = hdr_scratch;
/* /*
* Enforce consistency checks for this record if user is looking for * Enforce consistency checks for this record if user is looking for it.
* it. Do this before at the beginning of this routine to give the * Do this before at the beginning of this routine to give the possibility
* possibility for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
* directly for a record. * a record.
*/ */
if (wal_consistency_checking[rmid]) if (wal_consistency_checking[rmid])
info |= XLR_CHECK_CONSISTENCY; info |= XLR_CHECK_CONSISTENCY;
@ -576,9 +576,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
bkpb.fork_flags |= BKPBLOCK_WILL_INIT; bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
/* /*
* If needs_backup is true or WAL checking is enabled for * If needs_backup is true or WAL checking is enabled for current
* current resource manager, log a full-page write for the current * resource manager, log a full-page write for the current block.
* block.
*/ */
include_image = needs_backup || (info & XLR_CHECK_CONSISTENCY) != 0; include_image = needs_backup || (info & XLR_CHECK_CONSISTENCY) != 0;
@ -645,8 +644,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE; bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
/* /*
* If WAL consistency checking is enabled for the resource manager of * If WAL consistency checking is enabled for the resource manager
* this WAL record, a full-page image is included in the record * of this WAL record, a full-page image is included in the record
* for the block modified. During redo, the full-page is replayed * for the block modified. During redo, the full-page is replayed
* only if BKPIMAGE_APPLY is set. * only if BKPIMAGE_APPLY is set.
*/ */

@ -892,8 +892,8 @@ XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr)
* that, except when caller has explicitly specified the offset that * that, except when caller has explicitly specified the offset that
* falls somewhere there or when we are skipping multi-page * falls somewhere there or when we are skipping multi-page
* continuation record. It doesn't matter though because * continuation record. It doesn't matter though because
* ReadPageInternal() is prepared to handle that and will read at least * ReadPageInternal() is prepared to handle that and will read at
* short page-header worth of data * least short page-header worth of data
*/ */
targetRecOff = tmpRecPtr % XLOG_BLCKSZ; targetRecOff = tmpRecPtr % XLOG_BLCKSZ;

@ -805,11 +805,12 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ); Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
/* /*
* If the desired page is currently read in and valid, we have nothing to do. * If the desired page is currently read in and valid, we have nothing to
* do.
* *
* The caller should've ensured that it didn't previously advance readOff * The caller should've ensured that it didn't previously advance readOff
* past the valid limit of this timeline, so it doesn't matter if the current * past the valid limit of this timeline, so it doesn't matter if the
* TLI has since become historical. * current TLI has since become historical.
*/ */
if (lastReadPage == wantPage && if (lastReadPage == wantPage &&
state->readLen != 0 && state->readLen != 0 &&
@ -819,8 +820,8 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
/* /*
* If we're reading from the current timeline, it hasn't become historical * If we're reading from the current timeline, it hasn't become historical
* and the page we're reading is after the last page read, we can again * and the page we're reading is after the last page read, we can again
* just carry on. (Seeking backwards requires a check to make sure the older * just carry on. (Seeking backwards requires a check to make sure the
* page isn't on a prior timeline). * older page isn't on a prior timeline).
* *
* ThisTimeLineID might've become historical since we last looked, but the * ThisTimeLineID might've become historical since we last looked, but the
* caller is required not to read past the flush limit it saw at the time * caller is required not to read past the flush limit it saw at the time
@ -835,8 +836,8 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
/* /*
* If we're just reading pages from a previously validated historical * If we're just reading pages from a previously validated historical
* timeline and the timeline we're reading from is valid until the * timeline and the timeline we're reading from is valid until the end of
* end of the current segment we can just keep reading. * the current segment we can just keep reading.
*/ */
if (state->currTLIValidUntil != InvalidXLogRecPtr && if (state->currTLIValidUntil != InvalidXLogRecPtr &&
state->currTLI != ThisTimeLineID && state->currTLI != ThisTimeLineID &&
@ -845,10 +846,10 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
return; return;
/* /*
* If we reach this point we're either looking up a page for random access, * If we reach this point we're either looking up a page for random
* the current timeline just became historical, or we're reading from a new * access, the current timeline just became historical, or we're reading
* segment containing a timeline switch. In all cases we need to determine * from a new segment containing a timeline switch. In all cases we need
* the newest timeline on the segment. * to determine the newest timeline on the segment.
* *
* If it's the current timeline we can just keep reading from here unless * If it's the current timeline we can just keep reading from here unless
* we detect a timeline switch that makes the current timeline historical. * we detect a timeline switch that makes the current timeline historical.
@ -867,7 +868,10 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
Assert(wantPage / XLogSegSize == endOfSegment / XLogSegSize); Assert(wantPage / XLogSegSize == endOfSegment / XLogSegSize);
/* Find the timeline of the last LSN on the segment containing wantPage. */ /*
* Find the timeline of the last LSN on the segment containing
* wantPage.
*/
state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory); state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory, state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
&state->nextTLI); &state->nextTLI);
@ -929,8 +933,8 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
* *
* We have to do it each time through the loop because if we're in * We have to do it each time through the loop because if we're in
* recovery as a cascading standby, the current timeline might've * recovery as a cascading standby, the current timeline might've
* become historical. We can't rely on RecoveryInProgress() because * become historical. We can't rely on RecoveryInProgress() because in
* in a standby configuration like * a standby configuration like
* *
* A => B => C * A => B => C
* *
@ -938,12 +942,13 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
* timeline will change while we remain in recovery. * timeline will change while we remain in recovery.
* *
* We can't just keep reading from the old timeline as the last WAL * We can't just keep reading from the old timeline as the last WAL
* archive in the timeline will get renamed to .partial by StartupXLOG(). * archive in the timeline will get renamed to .partial by
* StartupXLOG().
* *
* If that happens after our caller updated ThisTimeLineID but before * If that happens after our caller updated ThisTimeLineID but before
* we actually read the xlog page, we might still try to read from the * we actually read the xlog page, we might still try to read from the
* old (now renamed) segment and fail. There's not much we can do about * old (now renamed) segment and fail. There's not much we can do
* this, but it can only happen when we're a leaf of a cascading * about this, but it can only happen when we're a leaf of a cascading
* standby whose master gets promoted while we're decoding, so a * standby whose master gets promoted while we're decoding, so a
* one-off ERROR isn't too bad. * one-off ERROR isn't too bad.
*/ */

@ -1125,8 +1125,10 @@ doDeletion(const ObjectAddress *object, int flags)
heap_drop_with_catalog(object->objectId); heap_drop_with_catalog(object->objectId);
} }
/* for a sequence, in addition to dropping the heap, also /*
* delete pg_sequence tuple */ * for a sequence, in addition to dropping the heap, also
* delete pg_sequence tuple
*/
if (relKind == RELKIND_SEQUENCE) if (relKind == RELKIND_SEQUENCE)
DeleteSequenceTuple(object->objectId); DeleteSequenceTuple(object->objectId);
break; break;

@ -1762,10 +1762,10 @@ heap_drop_with_catalog(Oid relid)
/* /*
* To drop a partition safely, we must grab exclusive lock on its parent, * To drop a partition safely, we must grab exclusive lock on its parent,
* because another backend might be about to execute a query on the parent * because another backend might be about to execute a query on the parent
* table. If it relies on previously cached partition descriptor, then * table. If it relies on previously cached partition descriptor, then it
* it could attempt to access the just-dropped relation as its partition. * could attempt to access the just-dropped relation as its partition. We
* We must therefore take a table lock strong enough to prevent all * must therefore take a table lock strong enough to prevent all queries
* queries on the table from proceeding until we commit and send out a * on the table from proceeding until we commit and send out a
* shared-cache-inval notice that will make them update their index lists. * shared-cache-inval notice that will make them update their index lists.
*/ */
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));

@ -577,9 +577,9 @@ getOwnedSequences(Oid relid, AttrNumber attnum)
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup); Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
/* /*
* We assume any auto or internal dependency of a sequence on a column must be * We assume any auto or internal dependency of a sequence on a column
* what we are looking for. (We need the relkind test because indexes * must be what we are looking for. (We need the relkind test because
* can also have auto dependencies on columns.) * indexes can also have auto dependencies on columns.)
*/ */
if (deprec->classid == RelationRelationId && if (deprec->classid == RelationRelationId &&
deprec->objsubid == 0 && deprec->objsubid == 0 &&

@ -207,7 +207,8 @@ static List *
textarray_to_stringlist(ArrayType *textarray) textarray_to_stringlist(ArrayType *textarray)
{ {
Datum *elems; Datum *elems;
int nelems, i; int nelems,
i;
List *res = NIL; List *res = NIL;
deconstruct_array(textarray, deconstruct_array(textarray,
@ -248,8 +249,8 @@ SetSubscriptionRelState(Oid subid, Oid relid, char state,
ObjectIdGetDatum(subid)); ObjectIdGetDatum(subid));
/* /*
* If the record for given table does not exist yet create new * If the record for given table does not exist yet create new record,
* record, otherwise update the existing one. * otherwise update the existing one.
*/ */
if (!HeapTupleIsValid(tup)) if (!HeapTupleIsValid(tup))
{ {

@ -2134,7 +2134,8 @@ dbase_redo(XLogReaderState *record)
* which can happen in some cases. * which can happen in some cases.
* *
* This will lock out walsenders trying to connect to db-specific * This will lock out walsenders trying to connect to db-specific
* slots for logical decoding too, so it's safe for us to drop slots. * slots for logical decoding too, so it's safe for us to drop
* slots.
*/ */
LockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock); LockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock);
ResolveRecoveryConflictWithDatabase(xlrec->db_id); ResolveRecoveryConflictWithDatabase(xlrec->db_id);

@ -328,6 +328,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
case OBJECT_FUNCTION: case OBJECT_FUNCTION:
{ {
ObjectWithArgs *owa = castNode(ObjectWithArgs, object); ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) && if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
!type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name)) !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
{ {
@ -340,6 +341,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
case OBJECT_AGGREGATE: case OBJECT_AGGREGATE:
{ {
ObjectWithArgs *owa = castNode(ObjectWithArgs, object); ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) && if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
!type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name)) !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
{ {
@ -352,6 +354,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
case OBJECT_OPERATOR: case OBJECT_OPERATOR:
{ {
ObjectWithArgs *owa = castNode(ObjectWithArgs, object); ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) && if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
!type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name)) !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
{ {

@ -878,8 +878,8 @@ CreateForeignServer(CreateForeignServerStmt *stmt)
ownerId = GetUserId(); ownerId = GetUserId();
/* /*
* Check that there is no other foreign server by this name. * Check that there is no other foreign server by this name. Do nothing if
* Do nothing if IF NOT EXISTS was enforced. * IF NOT EXISTS was enforced.
*/ */
if (GetForeignServerByName(stmt->servername, true) != NULL) if (GetForeignServerByName(stmt->servername, true) != NULL)
{ {

@ -358,6 +358,7 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
{ {
Relation oldrel = heap_open(oldrelid, Relation oldrel = heap_open(oldrelid,
ShareUpdateExclusiveLock); ShareUpdateExclusiveLock);
delrels = lappend(delrels, oldrel); delrels = lappend(delrels, oldrel);
} }
} }
@ -366,8 +367,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
PublicationDropTables(pubid, delrels, true); PublicationDropTables(pubid, delrels, true);
/* /*
* Don't bother calculating the difference for adding, we'll catch * Don't bother calculating the difference for adding, we'll catch and
* and skip existing ones when doing catalog update. * skip existing ones when doing catalog update.
*/ */
PublicationAddTables(pubid, rels, true, stmt); PublicationAddTables(pubid, rels, true, stmt);

@ -200,8 +200,8 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given,
} }
/* /*
* Do additional checking for disallowed combination when * Do additional checking for disallowed combination when slot_name = NONE
* slot_name = NONE was used. * was used.
*/ */
if (slot_name && *slot_name_given && !*slot_name) if (slot_name && *slot_name_given && !*slot_name)
{ {
@ -431,9 +431,9 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
(errmsg("synchronized table states"))); (errmsg("synchronized table states")));
/* /*
* If requested, create permanent slot for the subscription. * If requested, create permanent slot for the subscription. We
* We won't use the initial snapshot for anything, so no need * won't use the initial snapshot for anything, so no need to
* to export it. * export it.
*/ */
if (create_slot) if (create_slot)
{ {
@ -505,24 +505,24 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
subrel_states = GetSubscriptionRelations(sub->oid); subrel_states = GetSubscriptionRelations(sub->oid);
/* /*
* Build qsorted array of local table oids for faster lookup. * Build qsorted array of local table oids for faster lookup. This can
* This can potentially contain all tables in the database so * potentially contain all tables in the database so speed of lookup is
* speed of lookup is important. * important.
*/ */
subrel_local_oids = palloc(list_length(subrel_states) * sizeof(Oid)); subrel_local_oids = palloc(list_length(subrel_states) * sizeof(Oid));
off = 0; off = 0;
foreach(lc, subrel_states) foreach(lc, subrel_states)
{ {
SubscriptionRelState *relstate = (SubscriptionRelState *) lfirst(lc); SubscriptionRelState *relstate = (SubscriptionRelState *) lfirst(lc);
subrel_local_oids[off++] = relstate->relid; subrel_local_oids[off++] = relstate->relid;
} }
qsort(subrel_local_oids, list_length(subrel_states), qsort(subrel_local_oids, list_length(subrel_states),
sizeof(Oid), oid_cmp); sizeof(Oid), oid_cmp);
/* /*
* Walk over the remote tables and try to match them to locally * Walk over the remote tables and try to match them to locally known
* known tables. If the table is not known locally create a new state * tables. If the table is not known locally create a new state for it.
* for it.
* *
* Also builds array of local oids of remote tables for the next step. * Also builds array of local oids of remote tables for the next step.
*/ */
@ -556,8 +556,8 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
} }
/* /*
* Next remove state for tables we should not care about anymore using * Next remove state for tables we should not care about anymore using the
* the data we collected above * data we collected above
*/ */
qsort(pubrel_local_oids, list_length(pubrel_names), qsort(pubrel_local_oids, list_length(pubrel_names),
sizeof(Oid), oid_cmp); sizeof(Oid), oid_cmp);
@ -796,9 +796,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
StringInfoData cmd; StringInfoData cmd;
/* /*
* Lock pg_subscription with AccessExclusiveLock to ensure * Lock pg_subscription with AccessExclusiveLock to ensure that the
* that the launcher doesn't restart new worker during dropping * launcher doesn't restart new worker during dropping the subscription
* the subscription
*/ */
rel = heap_open(SubscriptionRelationId, AccessExclusiveLock); rel = heap_open(SubscriptionRelationId, AccessExclusiveLock);
@ -833,8 +832,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
InvokeObjectDropHook(SubscriptionRelationId, subid, 0); InvokeObjectDropHook(SubscriptionRelationId, subid, 0);
/* /*
* Lock the subscription so nobody else can do anything with it * Lock the subscription so nobody else can do anything with it (including
* (including the replication workers). * the replication workers).
*/ */
LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock); LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
@ -895,7 +894,10 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
if (originid != InvalidRepOriginId) if (originid != InvalidRepOriginId)
replorigin_drop(originid); replorigin_drop(originid);
/* If there is no slot associated with the subscription, we can finish here. */ /*
* If there is no slot associated with the subscription, we can finish
* here.
*/
if (!slotname) if (!slotname)
{ {
heap_close(rel, NoLock); heap_close(rel, NoLock);
@ -903,8 +905,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
} }
/* /*
* Otherwise drop the replication slot at the publisher node using * Otherwise drop the replication slot at the publisher node using the
* the replication connection. * replication connection.
*/ */
load_file("libpqwalreceiver", false); load_file("libpqwalreceiver", false);
@ -923,6 +925,7 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
PG_TRY(); PG_TRY();
{ {
WalRcvExecResult *res; WalRcvExecResult *res;
res = walrcv_exec(wrconn, cmd.data, 0, NULL); res = walrcv_exec(wrconn, cmd.data, 0, NULL);
if (res->status != WALRCV_OK_COMMAND) if (res->status != WALRCV_OK_COMMAND)

@ -643,8 +643,8 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
descriptor->tdhasoid = (localHasOids || parentOidCount > 0); descriptor->tdhasoid = (localHasOids || parentOidCount > 0);
/* /*
* If a partitioned table doesn't have the system OID column, then none * If a partitioned table doesn't have the system OID column, then none of
* of its partitions should have it. * its partitions should have it.
*/ */
if (stmt->partbound && parentOidCount == 0 && localHasOids) if (stmt->partbound && parentOidCount == 0 && localHasOids)
ereport(ERROR, ereport(ERROR,
@ -1112,9 +1112,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
} }
/* /*
* Similarly, if we previously locked some other partition's heap, and * Similarly, if we previously locked some other partition's heap, and the
* the name we're looking up no longer refers to that relation, release * name we're looking up no longer refers to that relation, release the
* the now-useless lock. * now-useless lock.
*/ */
if (relOid != oldRelOid && OidIsValid(state->partParentOid)) if (relOid != oldRelOid && OidIsValid(state->partParentOid))
{ {
@ -5589,8 +5589,8 @@ static void
ATPrepDropNotNull(Relation rel, bool recurse, bool recursing) ATPrepDropNotNull(Relation rel, bool recurse, bool recursing)
{ {
/* /*
* If the parent is a partitioned table, like check constraints, we do * If the parent is a partitioned table, like check constraints, we do not
* not support removing the NOT NULL while partitions exist. * support removing the NOT NULL while partitions exist.
*/ */
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{ {

@ -3412,7 +3412,8 @@ typedef struct AfterTriggersData
AfterTriggerEventList events; /* deferred-event list */ AfterTriggerEventList events; /* deferred-event list */
int query_depth; /* current query list index */ int query_depth; /* current query list index */
AfterTriggerEventList *query_stack; /* events pending from each query */ AfterTriggerEventList *query_stack; /* events pending from each query */
Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from each query */ Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from
* each query */
Tuplestorestate **old_tuplestores; /* all old tuples from each query */ Tuplestorestate **old_tuplestores; /* all old tuples from each query */
Tuplestorestate **new_tuplestores; /* all new tuples from each query */ Tuplestorestate **new_tuplestores; /* all new tuples from each query */
int maxquerydepth; /* allocated len of above array */ int maxquerydepth; /* allocated len of above array */

@ -415,6 +415,7 @@ ExecSupportsMarkRestore(Path *pathnode)
case T_CustomScan: case T_CustomScan:
{ {
CustomPath *customPath = castNode(CustomPath, pathnode); CustomPath *customPath = castNode(CustomPath, pathnode);
if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE) if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE)
return true; return true;
return false; return false;

@ -608,9 +608,9 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
/* /*
* Also store the per-worker detail. * Also store the per-worker detail.
* *
* Worker instrumentation should be allocated in the same context as * Worker instrumentation should be allocated in the same context as the
* the regular instrumentation information, which is the per-query * regular instrumentation information, which is the per-query context.
* context. Switch into per-query memory context. * Switch into per-query memory context.
*/ */
oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt); oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation)); ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));

@ -152,8 +152,8 @@ retry:
snap.xmin : snap.xmax; snap.xmin : snap.xmax;
/* /*
* If the tuple is locked, wait for locking transaction to finish * If the tuple is locked, wait for locking transaction to finish and
* and retry. * retry.
*/ */
if (TransactionIdIsValid(xwait)) if (TransactionIdIsValid(xwait))
{ {
@ -299,8 +299,8 @@ retry:
snap.xmin : snap.xmax; snap.xmin : snap.xmax;
/* /*
* If the tuple is locked, wait for locking transaction to finish * If the tuple is locked, wait for locking transaction to finish and
* and retry. * retry.
*/ */
if (TransactionIdIsValid(xwait)) if (TransactionIdIsValid(xwait))
{ {

@ -129,8 +129,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags)
Assert(!(eflags & EXEC_FLAG_MARK)); Assert(!(eflags & EXEC_FLAG_MARK));
/* /*
* Lock the non-leaf tables in the partition tree controlled by this * Lock the non-leaf tables in the partition tree controlled by this node.
* node. It's a no-op for non-partitioned parent tables. * It's a no-op for non-partitioned parent tables.
*/ */
ExecLockNonLeafAppendTables(node->partitioned_rels, estate); ExecLockNonLeafAppendTables(node->partitioned_rels, estate);

@ -506,8 +506,9 @@ BitmapAdjustPrefetchIterator(BitmapHeapScanState *node,
* In case of shared mode, we can not ensure that the current * In case of shared mode, we can not ensure that the current
* blockno of the main iterator and that of the prefetch iterator * blockno of the main iterator and that of the prefetch iterator
* are same. It's possible that whatever blockno we are * are same. It's possible that whatever blockno we are
* prefetching will be processed by another process. Therefore, we * prefetching will be processed by another process. Therefore,
* don't validate the blockno here as we do in non-parallel case. * we don't validate the blockno here as we do in non-parallel
* case.
*/ */
if (prefetch_iterator) if (prefetch_iterator)
tbm_shared_iterate(prefetch_iterator); tbm_shared_iterate(prefetch_iterator);

@ -230,17 +230,17 @@ ExecGatherMerge(GatherMergeState *node)
ResetExprContext(econtext); ResetExprContext(econtext);
/* /*
* Get next tuple, either from one of our workers, or by running the * Get next tuple, either from one of our workers, or by running the plan
* plan ourselves. * ourselves.
*/ */
slot = gather_merge_getnext(node); slot = gather_merge_getnext(node);
if (TupIsNull(slot)) if (TupIsNull(slot))
return NULL; return NULL;
/* /*
* form the result tuple using ExecProject(), and return it --- unless * form the result tuple using ExecProject(), and return it --- unless the
* the projection produces an empty set, in which case we must loop * projection produces an empty set, in which case we must loop back
* back around for another tuple * around for another tuple
*/ */
econtext->ecxt_outertuple = slot; econtext->ecxt_outertuple = slot;
return ExecProject(node->ps.ps_ProjInfo); return ExecProject(node->ps.ps_ProjInfo);
@ -534,8 +534,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
HeapTuple tup = NULL; HeapTuple tup = NULL;
/* /*
* If we're being asked to generate a tuple from the leader, then we * If we're being asked to generate a tuple from the leader, then we just
* just call ExecProcNode as normal to produce one. * call ExecProcNode as normal to produce one.
*/ */
if (gm_state->nreaders == reader) if (gm_state->nreaders == reader)
{ {
@ -582,8 +582,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
&tuple_buffer->done)); &tuple_buffer->done));
/* /*
* Attempt to read more tuples in nowait mode and store them in * Attempt to read more tuples in nowait mode and store them in the
* the tuple array. * tuple array.
*/ */
if (HeapTupleIsValid(tup)) if (HeapTupleIsValid(tup))
form_tuple_array(gm_state, reader); form_tuple_array(gm_state, reader);

@ -72,8 +72,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
/* /*
* Lock the non-leaf tables in the partition tree controlled by this * Lock the non-leaf tables in the partition tree controlled by this node.
* node. It's a no-op for non-partitioned parent tables. * It's a no-op for non-partitioned parent tables.
*/ */
ExecLockNonLeafAppendTables(node->partitioned_rels, estate); ExecLockNonLeafAppendTables(node->partitioned_rels, estate);

@ -1815,11 +1815,11 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
} }
/* /*
* Build WITH CHECK OPTION constraints for each leaf partition rel. * Build WITH CHECK OPTION constraints for each leaf partition rel. Note
* Note that we didn't build the withCheckOptionList for each partition * that we didn't build the withCheckOptionList for each partition within
* within the planner, but simple translation of the varattnos for each * the planner, but simple translation of the varattnos for each partition
* partition will suffice. This only occurs for the INSERT case; * will suffice. This only occurs for the INSERT case; UPDATE/DELETE
* UPDATE/DELETE cases are handled above. * cases are handled above.
*/ */
if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0) if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
{ {

@ -443,8 +443,8 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
ExecClearTuple(tstate->ss.ss_ScanTupleSlot); ExecClearTuple(tstate->ss.ss_ScanTupleSlot);
/* /*
* Obtain the value of each column for this row, installing them into the * Obtain the value of each column for this row, installing them into
* slot; then add the tuple to the tuplestore. * the slot; then add the tuple to the tuplestore.
*/ */
for (colno = 0; colno < natts; colno++) for (colno = 0; colno < natts; colno++)
{ {

@ -757,10 +757,10 @@ CheckPWChallengeAuth(Port *port, char **logdetail)
* If the user does not exist, or has no password or it's expired, we * If the user does not exist, or has no password or it's expired, we
* still go through the motions of authentication, to avoid revealing to * still go through the motions of authentication, to avoid revealing to
* the client that the user didn't exist. If 'md5' is allowed, we choose * the client that the user didn't exist. If 'md5' is allowed, we choose
* whether to use 'md5' or 'scram-sha-256' authentication based on * whether to use 'md5' or 'scram-sha-256' authentication based on current
* current password_encryption setting. The idea is that most genuine * password_encryption setting. The idea is that most genuine users
* users probably have a password of that type, and if we pretend that * probably have a password of that type, and if we pretend that this user
* this user had a password of that type, too, it "blends in" best. * had a password of that type, too, it "blends in" best.
*/ */
if (!shadow_pass) if (!shadow_pass)
pwtype = Password_encryption; pwtype = Password_encryption;
@ -770,8 +770,8 @@ CheckPWChallengeAuth(Port *port, char **logdetail)
/* /*
* If 'md5' authentication is allowed, decide whether to perform 'md5' or * If 'md5' authentication is allowed, decide whether to perform 'md5' or
* 'scram-sha-256' authentication based on the type of password the user * 'scram-sha-256' authentication based on the type of password the user
* has. If it's an MD5 hash, we must do MD5 authentication, and if it's * has. If it's an MD5 hash, we must do MD5 authentication, and if it's a
* a SCRAM verifier, we must do SCRAM authentication. * SCRAM verifier, we must do SCRAM authentication.
* *
* If MD5 authentication is not allowed, always use SCRAM. If the user * If MD5 authentication is not allowed, always use SCRAM. If the user
* had an MD5 password, CheckSCRAMAuth() will fail. * had an MD5 password, CheckSCRAMAuth() will fail.

@ -122,8 +122,8 @@ encrypt_password(PasswordType target_type, const char *role,
if (guessed_type != PASSWORD_TYPE_PLAINTEXT) if (guessed_type != PASSWORD_TYPE_PLAINTEXT)
{ {
/* /*
* Cannot convert an already-encrypted password from one * Cannot convert an already-encrypted password from one format to
* format to another, so return it as it is. * another, so return it as it is.
*/ */
return pstrdup(password); return pstrdup(password);
} }
@ -274,6 +274,7 @@ plain_crypt_verify(const char *role, const char *shadow_pass,
break; break;
case PASSWORD_TYPE_PLAINTEXT: case PASSWORD_TYPE_PLAINTEXT:
/* /*
* We never store passwords in plaintext, so this shouldn't * We never store passwords in plaintext, so this shouldn't
* happen. * happen.

@ -617,7 +617,10 @@ check_db(const char *dbname, const char *role, Oid roleid, List *tokens)
tok = lfirst(cell); tok = lfirst(cell);
if (am_walsender && !am_db_walsender) if (am_walsender && !am_db_walsender)
{ {
/* physical replication walsender connections can only match replication keyword */ /*
* physical replication walsender connections can only match
* replication keyword
*/
if (token_is_keyword(tok, "replication")) if (token_is_keyword(tok, "replication"))
return true; return true;
} }

@ -1129,7 +1129,8 @@ exprSetCollation(Node *expr, Oid collation)
Assert(!OidIsValid(collation)); /* result is always boolean */ Assert(!OidIsValid(collation)); /* result is always boolean */
break; break;
case T_NextValueExpr: case T_NextValueExpr:
Assert(!OidIsValid(collation)); /* result is always an integer type */ Assert(!OidIsValid(collation)); /* result is always an integer
* type */
break; break;
default: default:
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr)); elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr));

@ -905,8 +905,8 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
/* /*
* For every shared iterator, referring to pagetable and iterator array, * For every shared iterator, referring to pagetable and iterator array,
* increase the refcount by 1 so that while freeing the shared iterator * increase the refcount by 1 so that while freeing the shared iterator we
* we don't free pagetable and iterator array until its refcount becomes 0. * don't free pagetable and iterator array until its refcount becomes 0.
*/ */
if (ptbase != NULL) if (ptbase != NULL)
pg_atomic_add_fetch_u32(&ptbase->refcount, 1); pg_atomic_add_fetch_u32(&ptbase->refcount, 1);

@ -648,6 +648,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
return; return;
case RTE_NAMEDTUPLESTORE: case RTE_NAMEDTUPLESTORE:
/* /*
* tuplestore cannot be shared, at least without more * tuplestore cannot be shared, at least without more
* infrastructure to support that. * infrastructure to support that.

@ -664,8 +664,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
{ {
/* /*
* For index only scans compute workers based on number of index pages * For index only scans compute workers based on number of index pages
* fetched; the number of heap pages we fetch might be so small as * fetched; the number of heap pages we fetch might be so small as to
* to effectively rule out parallelism, which we don't want to do. * effectively rule out parallelism, which we don't want to do.
*/ */
if (indexonly) if (indexonly)
rand_heap_pages = -1; rand_heap_pages = -1;

@ -1073,8 +1073,8 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
true); true);
/* /*
* if, after costing the path, we find that it's not worth * if, after costing the path, we find that it's not worth using
* using parallel workers, just free it. * parallel workers, just free it.
*/ */
if (ipath->path.parallel_workers > 0) if (ipath->path.parallel_workers > 0)
add_partial_path(rel, (Path *) ipath); add_partial_path(rel, (Path *) ipath);

@ -1102,10 +1102,10 @@ inheritance_planner(PlannerInfo *root)
/* /*
* If the parent RTE is a partitioned table, we should use that as the * If the parent RTE is a partitioned table, we should use that as the
* nominal relation, because the RTEs added for partitioned tables * nominal relation, because the RTEs added for partitioned tables
* (including the root parent) as child members of the inheritance set * (including the root parent) as child members of the inheritance set do
* do not appear anywhere else in the plan. The situation is exactly * not appear anywhere else in the plan. The situation is exactly the
* the opposite in the case of non-partitioned inheritance parent as * opposite in the case of non-partitioned inheritance parent as described
* described below. * below.
*/ */
parent_rte = rt_fetch(parentRTindex, root->parse->rtable); parent_rte = rt_fetch(parentRTindex, root->parse->rtable);
if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE) if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE)
@ -1278,9 +1278,9 @@ inheritance_planner(PlannerInfo *root)
* is used elsewhere in the plan, so using the original parent RTE * is used elsewhere in the plan, so using the original parent RTE
* would give rise to confusing use of multiple aliases in EXPLAIN * would give rise to confusing use of multiple aliases in EXPLAIN
* output for what the user will think is the "same" table. OTOH, * output for what the user will think is the "same" table. OTOH,
* it's not a problem in the partitioned inheritance case, because * it's not a problem in the partitioned inheritance case, because the
* the duplicate child RTE added for the parent does not appear * duplicate child RTE added for the parent does not appear anywhere
* anywhere else in the plan tree. * else in the plan tree.
*/ */
if (nominalRelation < 0) if (nominalRelation < 0)
nominalRelation = appinfo->child_relid; nominalRelation = appinfo->child_relid;
@ -4336,8 +4336,8 @@ consider_groupingsets_paths(PlannerInfo *root,
/* /*
* We treat this as a knapsack problem: the knapsack capacity * We treat this as a knapsack problem: the knapsack capacity
* represents work_mem, the item weights are the estimated memory * represents work_mem, the item weights are the estimated memory
* usage of the hashtables needed to implement a single rollup, and * usage of the hashtables needed to implement a single rollup,
* we really ought to use the cost saving as the item value; * and we really ought to use the cost saving as the item value;
* however, currently the costs assigned to sort nodes don't * however, currently the costs assigned to sort nodes don't
* reflect the comparison costs well, and so we treat all items as * reflect the comparison costs well, and so we treat all items as
* of equal value (each rollup we hash instead saves us one sort). * of equal value (each rollup we hash instead saves us one sort).

@ -883,8 +883,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
* If the main target relation is a partitioned table, the * If the main target relation is a partitioned table, the
* following list contains the RT indexes of partitioned child * following list contains the RT indexes of partitioned child
* relations including the root, which are not included in the * relations including the root, which are not included in the
* above list. We also keep RT indexes of the roots separately * above list. We also keep RT indexes of the roots
* to be identitied as such during the executor initialization. * separately to be identitied as such during the executor
* initialization.
*/ */
if (splan->partitioned_rels != NIL) if (splan->partitioned_rels != NIL)
{ {

@ -1555,9 +1555,10 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
newrc->waitPolicy = oldrc->waitPolicy; newrc->waitPolicy = oldrc->waitPolicy;
/* /*
* We mark RowMarks for partitioned child tables as parent RowMarks * We mark RowMarks for partitioned child tables as parent
* so that the executor ignores them (except their existence means * RowMarks so that the executor ignores them (except their
* that the child tables be locked using appropriate mode). * existence means that the child tables be locked using
* appropriate mode).
*/ */
newrc->isParent = (childrte->relkind == RELKIND_PARTITIONED_TABLE); newrc->isParent = (childrte->relkind == RELKIND_PARTITIONED_TABLE);
@ -1593,8 +1594,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
* parent RT index to the list of RT indexes of its partitioned child * parent RT index to the list of RT indexes of its partitioned child
* tables. When creating an Append or a ModifyTable path for the parent, * tables. When creating an Append or a ModifyTable path for the parent,
* we copy the child RT index list verbatim to the path so that it could * we copy the child RT index list verbatim to the path so that it could
* be carried over to the executor so that the latter could identify * be carried over to the executor so that the latter could identify the
* the partitioned child tables. * partitioned child tables.
*/ */
if (partitioned_child_rels != NIL) if (partitioned_child_rels != NIL)
{ {

@ -149,9 +149,9 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
/* /*
* Pass top parent's relids down the inheritance hierarchy. If the parent * Pass top parent's relids down the inheritance hierarchy. If the parent
* has top_parent_relids set, it's a direct or an indirect child of the top * has top_parent_relids set, it's a direct or an indirect child of the
* parent indicated by top_parent_relids. By extension this child is also * top parent indicated by top_parent_relids. By extension this child is
* an indirect child of that parent. * also an indirect child of that parent.
*/ */
if (parent) if (parent)
{ {

@ -792,9 +792,8 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
makeString(pstrdup(rawc->colname))); makeString(pstrdup(rawc->colname)));
/* /*
* Determine the type and typmod for the new column. FOR * Determine the type and typmod for the new column. FOR ORDINALITY
* ORDINALITY columns are INTEGER per spec; the others are * columns are INTEGER per spec; the others are user-specified.
* user-specified.
*/ */
if (rawc->for_ordinality) if (rawc->for_ordinality)
{ {
@ -1050,7 +1049,6 @@ transformRangeTableSample(ParseState *pstate, RangeTableSample *rts)
static RangeTblEntry * static RangeTblEntry *
getRTEForSpecialRelationTypes(ParseState *pstate, RangeVar *rv) getRTEForSpecialRelationTypes(ParseState *pstate, RangeVar *rv)
{ {
CommonTableExpr *cte; CommonTableExpr *cte;
Index levelsup; Index levelsup;
RangeTblEntry *rte = NULL; RangeTblEntry *rte = NULL;

@ -1164,6 +1164,7 @@ parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockmode)
*/ */
if (get_visible_ENR_metadata(pstate->p_queryEnv, relation->relname)) if (get_visible_ENR_metadata(pstate->p_queryEnv, relation->relname))
rel = NULL; rel = NULL;
/* /*
* An unqualified name might have been meant as a reference to * An unqualified name might have been meant as a reference to
* some not-yet-in-scope CTE. The bare "does not exist" message * some not-yet-in-scope CTE. The bare "does not exist" message

@ -378,12 +378,12 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
* used by pg_dump. Else, generate a name. * used by pg_dump. Else, generate a name.
* *
* Although we use ChooseRelationName, it's not guaranteed that the * Although we use ChooseRelationName, it's not guaranteed that the
* selected sequence name won't conflict; given sufficiently long * selected sequence name won't conflict; given sufficiently long field
* field names, two different serial columns in the same table could * names, two different serial columns in the same table could be assigned
* be assigned the same sequence name, and we'd not notice since we * the same sequence name, and we'd not notice since we aren't creating
* aren't creating the sequence quite yet. In practice this seems * the sequence quite yet. In practice this seems quite unlikely to be a
* quite unlikely to be a problem, especially since few people would * problem, especially since few people would need two serial columns in
* need two serial columns in one table. * one table.
*/ */
foreach(option, seqoptions) foreach(option, seqoptions)
@ -403,6 +403,7 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
if (nameEl) if (nameEl)
{ {
RangeVar *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg)); RangeVar *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg));
snamespace = rv->schemaname; snamespace = rv->schemaname;
sname = rv->relname; sname = rv->relname;
seqoptions = list_delete_ptr(seqoptions, nameEl); seqoptions = list_delete_ptr(seqoptions, nameEl);
@ -429,14 +430,14 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
cxt->relation->relname, column->colname))); cxt->relation->relname, column->colname)));
/* /*
* Build a CREATE SEQUENCE command to create the sequence object, and * Build a CREATE SEQUENCE command to create the sequence object, and add
* add it to the list of things to be done before this CREATE/ALTER * it to the list of things to be done before this CREATE/ALTER TABLE.
* TABLE.
*/ */
seqstmt = makeNode(CreateSeqStmt); seqstmt = makeNode(CreateSeqStmt);
seqstmt->for_identity = for_identity; seqstmt->for_identity = for_identity;
seqstmt->sequence = makeRangeVar(snamespace, sname, -1); seqstmt->sequence = makeRangeVar(snamespace, sname, -1);
seqstmt->options = seqoptions; seqstmt->options = seqoptions;
/* /*
* If a sequence data type was specified, add it to the options. Prepend * If a sequence data type was specified, add it to the options. Prepend
* to the list rather than append; in case a user supplied their own AS * to the list rather than append; in case a user supplied their own AS
@ -448,11 +449,11 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
seqstmt->options); seqstmt->options);
/* /*
* If this is ALTER ADD COLUMN, make sure the sequence will be owned * If this is ALTER ADD COLUMN, make sure the sequence will be owned by
* by the table's owner. The current user might be someone else * the table's owner. The current user might be someone else (perhaps a
* (perhaps a superuser, or someone who's only a member of the owning * superuser, or someone who's only a member of the owning role), but the
* role), but the SEQUENCE OWNED BY mechanisms will bleat unless table * SEQUENCE OWNED BY mechanisms will bleat unless table and sequence have
* and sequence have exactly the same owning role. * exactly the same owning role.
*/ */
if (cxt->rel) if (cxt->rel)
seqstmt->ownerId = cxt->rel->rd_rel->relowner; seqstmt->ownerId = cxt->rel->rd_rel->relowner;
@ -462,9 +463,9 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
cxt->blist = lappend(cxt->blist, seqstmt); cxt->blist = lappend(cxt->blist, seqstmt);
/* /*
* Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence * Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence as
* as owned by this column, and add it to the list of things to be * owned by this column, and add it to the list of things to be done after
* done after this CREATE/ALTER TABLE. * this CREATE/ALTER TABLE.
*/ */
altseqstmt = makeNode(AlterSeqStmt); altseqstmt = makeNode(AlterSeqStmt);
altseqstmt->sequence = makeRangeVar(snamespace, sname, -1); altseqstmt->sequence = makeRangeVar(snamespace, sname, -1);
@ -2766,7 +2767,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
* change the data type of the sequence. * change the data type of the sequence.
*/ */
attnum = get_attnum(relid, cmd->name); attnum = get_attnum(relid, cmd->name);
/* if attribute not found, something will error about it later */
/*
* if attribute not found, something will error about it
* later
*/
if (attnum != InvalidAttrNumber && get_attidentity(relid, attnum)) if (attnum != InvalidAttrNumber && get_attidentity(relid, attnum))
{ {
Oid seq_relid = getOwnedSequence(relid, attnum); Oid seq_relid = getOwnedSequence(relid, attnum);
@ -2796,7 +2801,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
cmd->def = (Node *) newdef; cmd->def = (Node *) newdef;
attnum = get_attnum(relid, cmd->name); attnum = get_attnum(relid, cmd->name);
/* if attribute not found, something will error about it later */
/*
* if attribute not found, something will error about it
* later
*/
if (attnum != InvalidAttrNumber) if (attnum != InvalidAttrNumber)
generateSerialExtraStmts(&cxt, newdef, generateSerialExtraStmts(&cxt, newdef,
get_atttype(relid, attnum), get_atttype(relid, attnum),
@ -2854,8 +2863,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
cxt.alist = lappend(cxt.alist, seqstmt); cxt.alist = lappend(cxt.alist, seqstmt);
} }
} }
/* If column was not found or was not an identity column, we
* just let the ALTER TABLE command error out later. */ /*
* If column was not found or was not an identity column,
* we just let the ALTER TABLE command error out later.
*/
cmd->def = (Node *) newdef; cmd->def = (Node *) newdef;
newcmds = lappend(newcmds, cmd); newcmds = lappend(newcmds, cmd);

@ -310,8 +310,8 @@ BackgroundWriterMain(void)
* check whether there has been any WAL inserted since the last time * check whether there has been any WAL inserted since the last time
* we've logged a running xacts. * we've logged a running xacts.
* *
* We do this logging in the bgwriter as it is the only process that is * We do this logging in the bgwriter as it is the only process that
* run regularly and returns to its mainloop all the time. E.g. * is run regularly and returns to its mainloop all the time. E.g.
* Checkpointer, when active, is barely ever in its mainloop and thus * Checkpointer, when active, is barely ever in its mainloop and thus
* makes it hard to log regularly. * makes it hard to log regularly.
*/ */

@ -4061,6 +4061,7 @@ pgstat_get_backend_desc(BackendType backendType)
return backendDesc; return backendDesc;
} }
/* ------------------------------------------------------------ /* ------------------------------------------------------------
* Local support functions follow * Local support functions follow
* ------------------------------------------------------------ * ------------------------------------------------------------

@ -5149,11 +5149,12 @@ RandomCancelKey(int32 *cancel_key)
#ifdef HAVE_STRONG_RANDOM #ifdef HAVE_STRONG_RANDOM
return pg_strong_random((char *) cancel_key, sizeof(int32)); return pg_strong_random((char *) cancel_key, sizeof(int32));
#else #else
/* /*
* If built with --disable-strong-random, use plain old erand48. * If built with --disable-strong-random, use plain old erand48.
* *
* We cannot use pg_backend_random() in postmaster, because it stores * We cannot use pg_backend_random() in postmaster, because it stores its
* its state in shared memory. * state in shared memory.
*/ */
static unsigned short seed[3]; static unsigned short seed[3];
@ -5348,10 +5349,10 @@ StartAutovacuumWorker(void)
if (canAcceptConnections() == CAC_OK) if (canAcceptConnections() == CAC_OK)
{ {
/* /*
* Compute the cancel key that will be assigned to this session. * Compute the cancel key that will be assigned to this session. We
* We probably don't need cancel keys for autovac workers, but * probably don't need cancel keys for autovac workers, but we'd
* we'd better have something random in the field to prevent * better have something random in the field to prevent unfriendly
* unfriendly people from sending cancels to them. * people from sending cancels to them.
*/ */
if (!RandomCancelKey(&MyCancelKey)) if (!RandomCancelKey(&MyCancelKey))
{ {

@ -106,15 +106,15 @@ static const char *excludeDirContents[] =
{ {
/* /*
* Skip temporary statistics files. PG_STAT_TMP_DIR must be skipped even * Skip temporary statistics files. PG_STAT_TMP_DIR must be skipped even
* when stats_temp_directory is set because PGSS_TEXT_FILE is always created * when stats_temp_directory is set because PGSS_TEXT_FILE is always
* there. * created there.
*/ */
PG_STAT_TMP_DIR, PG_STAT_TMP_DIR,
/* /*
* It is generally not useful to backup the contents of this directory even * It is generally not useful to backup the contents of this directory
* if the intention is to restore to another master. See backup.sgml for a * even if the intention is to restore to another master. See backup.sgml
* more detailed description. * for a more detailed description.
*/ */
"pg_replslot", "pg_replslot",
@ -404,8 +404,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
qsort(walFiles, nWalFiles, sizeof(char *), compareWalFileNames); qsort(walFiles, nWalFiles, sizeof(char *), compareWalFileNames);
/* /*
* There must be at least one xlog file in the pg_wal directory, * There must be at least one xlog file in the pg_wal directory, since
* since we are doing backup-including-xlog. * we are doing backup-including-xlog.
*/ */
if (nWalFiles < 1) if (nWalFiles < 1)
ereport(ERROR, ereport(ERROR,

@ -435,8 +435,8 @@ libpqrcv_endstreaming(WalReceiverConn *conn, TimeLineID *next_tli)
* next timeline's ID, or just CommandComplete if the server was shut * next timeline's ID, or just CommandComplete if the server was shut
* down. * down.
* *
* If we had not yet received CopyDone from the backend, PGRES_COPY_OUT * If we had not yet received CopyDone from the backend, PGRES_COPY_OUT is
* is also possible in case we aborted the copy in mid-stream. * also possible in case we aborted the copy in mid-stream.
*/ */
res = PQgetResult(conn->streamConn); res = PQgetResult(conn->streamConn);
if (PQresultStatus(res) == PGRES_TUPLES_OK) if (PQresultStatus(res) == PGRES_TUPLES_OK)
@ -545,9 +545,9 @@ libpqrcv_PQexec(PGconn *streamConn, const char *query)
/* /*
* PQexec() silently discards any prior query results on the connection. * PQexec() silently discards any prior query results on the connection.
* This is not required for this function as it's expected that the * This is not required for this function as it's expected that the caller
* caller (which is this library in all cases) will behave correctly and * (which is this library in all cases) will behave correctly and we don't
* we don't have to be backwards compatible with old libpq. * have to be backwards compatible with old libpq.
*/ */
/* /*

@ -233,6 +233,7 @@ logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
for (i = 0; i < max_logical_replication_workers; i++) for (i = 0; i < max_logical_replication_workers; i++)
{ {
LogicalRepWorker *w = &LogicalRepCtx->workers[i]; LogicalRepWorker *w = &LogicalRepCtx->workers[i];
if (w->in_use && w->subid == subid && w->relid == relid && if (w->in_use && w->subid == subid && w->relid == relid &&
(!only_running || w->proc)) (!only_running || w->proc))
{ {
@ -660,6 +661,7 @@ logicalrep_sync_worker_count(Oid subid)
for (i = 0; i < max_logical_replication_workers; i++) for (i = 0; i < max_logical_replication_workers; i++)
{ {
LogicalRepWorker *w = &LogicalRepCtx->workers[i]; LogicalRepWorker *w = &LogicalRepCtx->workers[i];
if (w->subid == subid && OidIsValid(w->relid)) if (w->subid == subid && OidIsValid(w->relid))
res++; res++;
} }
@ -864,9 +866,9 @@ ApplyLauncherMain(Datum main_arg)
{ {
/* /*
* The wait in previous cycle was interrupted in less than * The wait in previous cycle was interrupted in less than
* wal_retrieve_retry_interval since last worker was started, * wal_retrieve_retry_interval since last worker was started, this
* this usually means crash of the worker, so we should retry * usually means crash of the worker, so we should retry in
* in wal_retrieve_retry_interval again. * wal_retrieve_retry_interval again.
*/ */
wait_time = wal_retrieve_retry_interval; wait_time = wal_retrieve_retry_interval;
} }
@ -992,7 +994,10 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS)
tuplestore_putvalues(tupstore, tupdesc, values, nulls); tuplestore_putvalues(tupstore, tupdesc, values, nulls);
/* If only a single subscription was requested, and we found it, break. */ /*
* If only a single subscription was requested, and we found it,
* break.
*/
if (OidIsValid(subid)) if (OidIsValid(subid))
break; break;
} }

@ -328,17 +328,19 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin
if (ctx->reader->EndRecPtr != InvalidXLogRecPtr && confirm) if (ctx->reader->EndRecPtr != InvalidXLogRecPtr && confirm)
{ {
LogicalConfirmReceivedLocation(ctx->reader->EndRecPtr); LogicalConfirmReceivedLocation(ctx->reader->EndRecPtr);
/* /*
* If only the confirmed_flush_lsn has changed the slot won't get * If only the confirmed_flush_lsn has changed the slot won't get
* marked as dirty by the above. Callers on the walsender interface * marked as dirty by the above. Callers on the walsender
* are expected to keep track of their own progress and don't need * interface are expected to keep track of their own progress and
* it written out. But SQL-interface users cannot specify their own * don't need it written out. But SQL-interface users cannot
* start positions and it's harder for them to keep track of their * specify their own start positions and it's harder for them to
* progress, so we should make more of an effort to save it for them. * keep track of their progress, so we should make more of an
* effort to save it for them.
* *
* Dirty the slot so it's written out at the next checkpoint. We'll * Dirty the slot so it's written out at the next checkpoint.
* still lose its position on crash, as documented, but it's better * We'll still lose its position on crash, as documented, but it's
* than always losing the position even on clean restart. * better than always losing the position even on clean restart.
*/ */
ReplicationSlotMarkDirty(); ReplicationSlotMarkDirty();
} }

@ -246,6 +246,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
TupleDesc desc; TupleDesc desc;
LogicalRepRelation *remoterel; LogicalRepRelation *remoterel;
MemoryContext oldctx; MemoryContext oldctx;
remoterel = &entry->remoterel; remoterel = &entry->remoterel;
/* Try to find and lock the relation by name. */ /* Try to find and lock the relation by name. */
@ -265,8 +266,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
/* /*
* Build the mapping of local attribute numbers to remote attribute * Build the mapping of local attribute numbers to remote attribute
* numbers and validate that we don't miss any replicated columns * numbers and validate that we don't miss any replicated columns as
* as that would result in potentially unwanted data loss. * that would result in potentially unwanted data loss.
*/ */
desc = RelationGetDescr(entry->localrel); desc = RelationGetDescr(entry->localrel);
oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext); oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext);
@ -278,6 +279,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
{ {
int attnum = logicalrep_rel_att_by_name(remoterel, int attnum = logicalrep_rel_att_by_name(remoterel,
NameStr(desc->attrs[i]->attname)); NameStr(desc->attrs[i]->attname));
entry->attrmap[i] = attnum; entry->attrmap[i] = attnum;
if (attnum >= 0) if (attnum >= 0)
found++; found++;
@ -299,8 +301,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
* but in the opposite scenario it will. * but in the opposite scenario it will.
* *
* Don't throw any error here just mark the relation entry as not * Don't throw any error here just mark the relation entry as not
* updatable, as replica identity is only for updates and deletes * updatable, as replica identity is only for updates and deletes but
* but inserts can be replicated even without it. * inserts can be replicated even without it.
*/ */
entry->updatable = true; entry->updatable = true;
idkey = RelationGetIndexAttrBitmap(entry->localrel, idkey = RelationGetIndexAttrBitmap(entry->localrel,
@ -310,6 +312,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
{ {
idkey = RelationGetIndexAttrBitmap(entry->localrel, idkey = RelationGetIndexAttrBitmap(entry->localrel,
INDEX_ATTR_BITMAP_PRIMARY_KEY); INDEX_ATTR_BITMAP_PRIMARY_KEY);
/* /*
* If no replica identity index and no PK, the published table * If no replica identity index and no PK, the published table
* must have replica identity FULL. * must have replica identity FULL.

@ -986,6 +986,7 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
if (NormalTransactionIdFollows(subxid, xmax)) if (NormalTransactionIdFollows(subxid, xmax))
xmax = subxid; xmax = subxid;
} }
/* /*
* If we're forcing timetravel we also need visibility information * If we're forcing timetravel we also need visibility information
* about subtransaction, so keep track of subtransaction's state, even * about subtransaction, so keep track of subtransaction's state, even
@ -1031,8 +1032,8 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
/* /*
* Adjust xmax of the snapshot builder, we only do that for committed, * Adjust xmax of the snapshot builder, we only do that for committed,
* catalog modifying, transactions, everything else isn't interesting * catalog modifying, transactions, everything else isn't interesting for
* for us since we'll never look at the respective rows. * us since we'll never look at the respective rows.
*/ */
if (needs_timetravel && if (needs_timetravel &&
(!TransactionIdIsValid(builder->xmax) || (!TransactionIdIsValid(builder->xmax) ||
@ -1130,8 +1131,8 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact
running->oldestRunningXid); running->oldestRunningXid);
/* /*
* Increase shared memory limits, so vacuum can work on tuples we prevented * Increase shared memory limits, so vacuum can work on tuples we
* from being pruned till now. * prevented from being pruned till now.
*/ */
LogicalIncreaseXminForSlot(lsn, running->oldestRunningXid); LogicalIncreaseXminForSlot(lsn, running->oldestRunningXid);
@ -1271,6 +1272,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
/* there won't be any state to cleanup */ /* there won't be any state to cleanup */
return false; return false;
} }
/* /*
* c) transition from START to BUILDING_SNAPSHOT. * c) transition from START to BUILDING_SNAPSHOT.
* *
@ -1308,6 +1310,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
SnapBuildWaitSnapshot(running, running->nextXid); SnapBuildWaitSnapshot(running, running->nextXid);
} }
/* /*
* c) transition from BUILDING_SNAPSHOT to FULL_SNAPSHOT. * c) transition from BUILDING_SNAPSHOT to FULL_SNAPSHOT.
* *
@ -1331,6 +1334,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
SnapBuildWaitSnapshot(running, running->nextXid); SnapBuildWaitSnapshot(running, running->nextXid);
} }
/* /*
* c) transition from FULL_SNAPSHOT to CONSISTENT. * c) transition from FULL_SNAPSHOT to CONSISTENT.
* *
@ -1383,9 +1387,9 @@ SnapBuildWaitSnapshot(xl_running_xacts *running, TransactionId cutoff)
TransactionId xid = running->xids[off]; TransactionId xid = running->xids[off];
/* /*
* Upper layers should prevent that we ever need to wait on * Upper layers should prevent that we ever need to wait on ourselves.
* ourselves. Check anyway, since failing to do so would either * Check anyway, since failing to do so would either result in an
* result in an endless wait or an Assert() failure. * endless wait or an Assert() failure.
*/ */
if (TransactionIdIsCurrentTransactionId(xid)) if (TransactionIdIsCurrentTransactionId(xid))
elog(ERROR, "waiting for ourselves"); elog(ERROR, "waiting for ourselves");
@ -1864,8 +1868,9 @@ CheckPointSnapBuild(void)
char path[MAXPGPATH + 21]; char path[MAXPGPATH + 21];
/* /*
* We start off with a minimum of the last redo pointer. No new replication * We start off with a minimum of the last redo pointer. No new
* slot will start before that, so that's a safe upper bound for removal. * replication slot will start before that, so that's a safe upper bound
* for removal.
*/ */
redo = GetRedoRecPtr(); redo = GetRedoRecPtr();

@ -113,7 +113,8 @@ StringInfo copybuf = NULL;
/* /*
* Exit routine for synchronization worker. * Exit routine for synchronization worker.
*/ */
static void pg_attribute_noreturn() static void
pg_attribute_noreturn()
finish_sync_worker(void) finish_sync_worker(void)
{ {
/* /*
@ -324,6 +325,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
last_start_times = hash_create("Logical replication table sync worker start times", last_start_times = hash_create("Logical replication table sync worker start times",
256, &ctl, HASH_ELEM | HASH_BLOBS); 256, &ctl, HASH_ELEM | HASH_BLOBS);
} }
/* /*
* Clean up the hash table when we're done with all tables (just to * Clean up the hash table when we're done with all tables (just to
* release the bit of memory). * release the bit of memory).
@ -342,9 +344,9 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
if (rstate->state == SUBREL_STATE_SYNCDONE) if (rstate->state == SUBREL_STATE_SYNCDONE)
{ {
/* /*
* Apply has caught up to the position where the table sync * Apply has caught up to the position where the table sync has
* has finished. Time to mark the table as ready so that * finished. Time to mark the table as ready so that apply will
* apply will just continue to replicate it normally. * just continue to replicate it normally.
*/ */
if (current_lsn >= rstate->lsn) if (current_lsn >= rstate->lsn)
{ {
@ -376,6 +378,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
SpinLockRelease(&syncworker->relmutex); SpinLockRelease(&syncworker->relmutex);
} }
else else
/* /*
* If no sync worker for this table yet, count running sync * If no sync worker for this table yet, count running sync
* workers for this subscription, while we have the lock, for * workers for this subscription, while we have the lock, for
@ -398,12 +401,12 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
* *
* b) Apply is behind the table sync: We tell the table sync * b) Apply is behind the table sync: We tell the table sync
* to mark the table as SYNCDONE and finish. * to mark the table as SYNCDONE and finish.
*
* c) Apply and table sync are at the same position: We tell * c) Apply and table sync are at the same position: We tell
* table sync to mark the table as READY and finish. * table sync to mark the table as READY and finish.
* *
* In any case we'll need to wait for table sync to change * In any case we'll need to wait for table sync to change the
* the state in catalog and only then continue ourselves. * state in catalog and only then continue ourselves.
*/ */
if (current_lsn > rstate->lsn) if (current_lsn > rstate->lsn)
{ {
@ -427,16 +430,15 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
logicalrep_worker_wakeup_ptr(syncworker); logicalrep_worker_wakeup_ptr(syncworker);
/* /*
* Enter busy loop and wait for synchronization status * Enter busy loop and wait for synchronization status change.
* change.
*/ */
wait_for_sync_status_change(rstate->relid, rstate->state); wait_for_sync_status_change(rstate->relid, rstate->state);
} }
/* /*
* If there is no sync worker registered for the table and * If there is no sync worker registered for the table and there
* there is some free sync worker slot, start new sync worker * is some free sync worker slot, start new sync worker for the
* for the table. * table.
*/ */
else if (!syncworker && nsyncworkers < max_sync_workers_per_subscription) else if (!syncworker && nsyncworkers < max_sync_workers_per_subscription)
{ {
@ -818,24 +820,23 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
pgstat_report_stat(false); pgstat_report_stat(false);
/* /*
* We want to do the table data sync in single * We want to do the table data sync in single transaction.
* transaction.
*/ */
StartTransactionCommand(); StartTransactionCommand();
/* /*
* Use standard write lock here. It might be better to * Use standard write lock here. It might be better to
* disallow access to table while it's being synchronized. * disallow access to table while it's being synchronized. But
* But we don't want to block the main apply process from * we don't want to block the main apply process from working
* working and it has to open relation in RowExclusiveLock * and it has to open relation in RowExclusiveLock when
* when remapping remote relation id to local one. * remapping remote relation id to local one.
*/ */
rel = heap_open(MyLogicalRepWorker->relid, RowExclusiveLock); rel = heap_open(MyLogicalRepWorker->relid, RowExclusiveLock);
/* /*
* Create temporary slot for the sync process. * Create temporary slot for the sync process. We do this
* We do this inside transaction so that we can use the * inside transaction so that we can use the snapshot made by
* snapshot made by the slot to get existing data. * the slot to get existing data.
*/ */
res = walrcv_exec(wrconn, res = walrcv_exec(wrconn,
"BEGIN READ ONLY ISOLATION LEVEL " "BEGIN READ ONLY ISOLATION LEVEL "
@ -849,10 +850,10 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
/* /*
* Create new temporary logical decoding slot. * Create new temporary logical decoding slot.
* *
* We'll use slot for data copy so make sure the snapshot * We'll use slot for data copy so make sure the snapshot is
* is used for the transaction, that way the COPY will get * used for the transaction, that way the COPY will get data
* data that is consistent with the lsn used by the slot * that is consistent with the lsn used by the slot to start
* to start decoding. * decoding.
*/ */
walrcv_create_slot(wrconn, slotname, true, walrcv_create_slot(wrconn, slotname, true,
CRS_USE_SNAPSHOT, origin_startpos); CRS_USE_SNAPSHOT, origin_startpos);
@ -872,8 +873,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
CommandCounterIncrement(); CommandCounterIncrement();
/* /*
* We are done with the initial data synchronization, * We are done with the initial data synchronization, update
* update the state. * the state.
*/ */
SpinLockAcquire(&MyLogicalRepWorker->relmutex); SpinLockAcquire(&MyLogicalRepWorker->relmutex);
MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT; MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT;
@ -881,8 +882,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
SpinLockRelease(&MyLogicalRepWorker->relmutex); SpinLockRelease(&MyLogicalRepWorker->relmutex);
/* /*
* Wait for main apply worker to either tell us to * Wait for main apply worker to either tell us to catchup or
* catchup or that we are done. * that we are done.
*/ */
wait_for_sync_status_change(MyLogicalRepWorker->relid, wait_for_sync_status_change(MyLogicalRepWorker->relid,
MyLogicalRepWorker->relstate); MyLogicalRepWorker->relstate);

@ -476,8 +476,8 @@ static void
apply_handle_origin(StringInfo s) apply_handle_origin(StringInfo s)
{ {
/* /*
* ORIGIN message can only come inside remote transaction and before * ORIGIN message can only come inside remote transaction and before any
* any actual writes. * actual writes.
*/ */
if (!in_remote_transaction || if (!in_remote_transaction ||
(IsTransactionState() && !am_tablesync_worker())) (IsTransactionState() && !am_tablesync_worker()))
@ -607,8 +607,8 @@ check_relation_updatable(LogicalRepRelMapEntry *rel)
return; return;
/* /*
* We are in error mode so it's fine this is somewhat slow. * We are in error mode so it's fine this is somewhat slow. It's better to
* It's better to give user correct error. * give user correct error.
*/ */
if (OidIsValid(GetRelationIdentityOrPK(rel->localrel))) if (OidIsValid(GetRelationIdentityOrPK(rel->localrel)))
{ {
@ -685,8 +685,8 @@ apply_handle_update(StringInfo s)
MemoryContextSwitchTo(oldctx); MemoryContextSwitchTo(oldctx);
/* /*
* Try to find tuple using either replica identity index, primary key * Try to find tuple using either replica identity index, primary key or
* or if needed, sequential scan. * if needed, sequential scan.
*/ */
idxoid = GetRelationIdentityOrPK(rel->localrel); idxoid = GetRelationIdentityOrPK(rel->localrel);
Assert(OidIsValid(idxoid) || Assert(OidIsValid(idxoid) ||
@ -802,8 +802,8 @@ apply_handle_delete(StringInfo s)
MemoryContextSwitchTo(oldctx); MemoryContextSwitchTo(oldctx);
/* /*
* Try to find tuple using either replica identity index, primary key * Try to find tuple using either replica identity index, primary key or
* or if needed, sequential scan. * if needed, sequential scan.
*/ */
idxoid = GetRelationIdentityOrPK(rel->localrel); idxoid = GetRelationIdentityOrPK(rel->localrel);
Assert(OidIsValid(idxoid) || Assert(OidIsValid(idxoid) ||
@ -995,8 +995,8 @@ static void
LogicalRepApplyLoop(XLogRecPtr last_received) LogicalRepApplyLoop(XLogRecPtr last_received)
{ {
/* /*
* Init the ApplyMessageContext which we clean up after each * Init the ApplyMessageContext which we clean up after each replication
* replication protocol message. * protocol message.
*/ */
ApplyMessageContext = AllocSetContextCreate(ApplyContext, ApplyMessageContext = AllocSetContextCreate(ApplyContext,
"ApplyMessageContext", "ApplyMessageContext",
@ -1108,7 +1108,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
{ {
/* /*
* If we didn't get any transactions for a while there might be * If we didn't get any transactions for a while there might be
* unconsumed invalidation messages in the queue, consume them now. * unconsumed invalidation messages in the queue, consume them
* now.
*/ */
AcceptInvalidationMessages(); AcceptInvalidationMessages();
if (!MySubscriptionValid) if (!MySubscriptionValid)
@ -1126,6 +1127,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
if (endofstream) if (endofstream)
{ {
TimeLineID tli; TimeLineID tli;
walrcv_endstreaming(wrconn, &tli); walrcv_endstreaming(wrconn, &tli);
break; break;
} }
@ -1152,19 +1154,18 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
if (rc & WL_TIMEOUT) if (rc & WL_TIMEOUT)
{ {
/* /*
* We didn't receive anything new. If we haven't heard * We didn't receive anything new. If we haven't heard anything
* anything from the server for more than * from the server for more than wal_receiver_timeout / 2, ping
* wal_receiver_timeout / 2, ping the server. Also, if * the server. Also, if it's been longer than
* it's been longer than wal_receiver_status_interval * wal_receiver_status_interval since the last update we sent,
* since the last update we sent, send a status update to * send a status update to the master anyway, to report any
* the master anyway, to report any progress in applying * progress in applying WAL.
* WAL.
*/ */
bool requestReply = false; bool requestReply = false;
/* /*
* Check if time since last receive from standby has * Check if time since last receive from standby has reached the
* reached the configured limit. * configured limit.
*/ */
if (wal_receiver_timeout > 0) if (wal_receiver_timeout > 0)
{ {
@ -1180,8 +1181,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
(errmsg("terminating logical replication worker due to timeout"))); (errmsg("terminating logical replication worker due to timeout")));
/* /*
* We didn't receive anything new, for half of * We didn't receive anything new, for half of receiver
* receiver replication timeout. Ping the server. * replication timeout. Ping the server.
*/ */
if (!ping_sent) if (!ping_sent)
{ {
@ -1237,8 +1238,8 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
get_flush_position(&writepos, &flushpos, &have_pending_txes); get_flush_position(&writepos, &flushpos, &have_pending_txes);
/* /*
* No outstanding transactions to flush, we can report the latest * No outstanding transactions to flush, we can report the latest received
* received position. This is important for synchronous replication. * position. This is important for synchronous replication.
*/ */
if (!have_pending_txes) if (!have_pending_txes)
flushpos = writepos = recvpos; flushpos = writepos = recvpos;
@ -1263,6 +1264,7 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
if (!reply_message) if (!reply_message)
{ {
MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext); MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext);
reply_message = makeStringInfo(); reply_message = makeStringInfo();
MemoryContextSwitchTo(oldctx); MemoryContextSwitchTo(oldctx);
} }
@ -1317,9 +1319,8 @@ reread_subscription(void)
newsub = GetSubscription(MyLogicalRepWorker->subid, true); newsub = GetSubscription(MyLogicalRepWorker->subid, true);
/* /*
* Exit if the subscription was removed. * Exit if the subscription was removed. This normally should not happen
* This normally should not happen as the worker gets killed * as the worker gets killed during DROP SUBSCRIPTION.
* during DROP SUBSCRIPTION.
*/ */
if (!newsub) if (!newsub)
{ {
@ -1333,9 +1334,8 @@ reread_subscription(void)
} }
/* /*
* Exit if the subscription was disabled. * Exit if the subscription was disabled. This normally should not happen
* This normally should not happen as the worker gets killed * as the worker gets killed during ALTER SUBSCRIPTION ... DISABLE.
* during ALTER SUBSCRIPTION ... DISABLE.
*/ */
if (!newsub->enabled) if (!newsub->enabled)
{ {
@ -1349,8 +1349,8 @@ reread_subscription(void)
} }
/* /*
* Exit if connection string was changed. The launcher will start * Exit if connection string was changed. The launcher will start new
* new worker. * worker.
*/ */
if (strcmp(newsub->conninfo, MySubscription->conninfo) != 0) if (strcmp(newsub->conninfo, MySubscription->conninfo) != 0)
{ {
@ -1382,8 +1382,8 @@ reread_subscription(void)
Assert(newsub->slotname); Assert(newsub->slotname);
/* /*
* We need to make new connection to new slot if slot name has changed * We need to make new connection to new slot if slot name has changed so
* so exit here as well if that's the case. * exit here as well if that's the case.
*/ */
if (strcmp(newsub->slotname, MySubscription->slotname) != 0) if (strcmp(newsub->slotname, MySubscription->slotname) != 0)
{ {
@ -1397,8 +1397,8 @@ reread_subscription(void)
} }
/* /*
* Exit if publication list was changed. The launcher will start * Exit if publication list was changed. The launcher will start new
* new worker. * worker.
*/ */
if (!equal(newsub->publications, MySubscription->publications)) if (!equal(newsub->publications, MySubscription->publications))
{ {
@ -1570,9 +1570,8 @@ ApplyWorkerMain(Datum main_arg)
(errmsg("could not connect to the publisher: %s", err))); (errmsg("could not connect to the publisher: %s", err)));
/* /*
* We don't really use the output identify_system for anything * We don't really use the output identify_system for anything but it
* but it does some initializations on the upstream so let's still * does some initializations on the upstream so let's still call it.
* call it.
*/ */
(void) walrcv_identify_system(wrconn, &startpointTLI, (void) walrcv_identify_system(wrconn, &startpointTLI,
&server_version); &server_version);
@ -1580,8 +1579,8 @@ ApplyWorkerMain(Datum main_arg)
} }
/* /*
* Setup callback for syscache so that we know when something * Setup callback for syscache so that we know when something changes in
* changes in the subscription relation state. * the subscription relation state.
*/ */
CacheRegisterSyscacheCallback(SUBSCRIPTIONRELMAP, CacheRegisterSyscacheCallback(SUBSCRIPTIONRELMAP,
invalidate_syncing_table_states, invalidate_syncing_table_states,

@ -417,9 +417,8 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
publications_valid = false; publications_valid = false;
/* /*
* Also invalidate per-relation cache so that next time the filtering * Also invalidate per-relation cache so that next time the filtering info
* info is checked it will be updated with the new publication * is checked it will be updated with the new publication settings.
* settings.
*/ */
rel_sync_cache_publication_cb(arg, cacheid, hashvalue); rel_sync_cache_publication_cb(arg, cacheid, hashvalue);
} }
@ -499,9 +498,9 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
} }
/* /*
* Build publication cache. We can't use one provided by relcache * Build publication cache. We can't use one provided by relcache as
* as relcache considers all publications given relation is in, but * relcache considers all publications given relation is in, but here
* here we only need to consider ones that the subscriber requested. * we only need to consider ones that the subscriber requested.
*/ */
entry->pubactions.pubinsert = entry->pubactions.pubupdate = entry->pubactions.pubinsert = entry->pubactions.pubupdate =
entry->pubactions.pubdelete = false; entry->pubactions.pubdelete = false;
@ -558,15 +557,14 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
* safe point. * safe point.
* *
* Getting invalidations for relations that aren't in the table is * Getting invalidations for relations that aren't in the table is
* entirely normal, since there's no way to unregister for an * entirely normal, since there's no way to unregister for an invalidation
* invalidation event. So we don't care if it's found or not. * event. So we don't care if it's found or not.
*/ */
entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid, entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid,
HASH_FIND, NULL); HASH_FIND, NULL);
/* /*
* Reset schema sent status as the relation definition may have * Reset schema sent status as the relation definition may have changed.
* changed.
*/ */
if (entry != NULL) if (entry != NULL)
entry->schema_sent = false; entry->schema_sent = false;
@ -590,8 +588,8 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
return; return;
/* /*
* There is no way to find which entry in our cache the hash belongs to * There is no way to find which entry in our cache the hash belongs to so
* so mark the whole cache as invalid. * mark the whole cache as invalid.
*/ */
hash_seq_init(&status, RelationSyncCache); hash_seq_init(&status, RelationSyncCache);
while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL) while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)

@ -502,8 +502,8 @@ ReplicationSlotDropPtr(ReplicationSlot *slot)
/* /*
* Rename the slot directory on disk, so that we'll no longer recognize * Rename the slot directory on disk, so that we'll no longer recognize
* this as a valid slot. Note that if this fails, we've got to mark the * this as a valid slot. Note that if this fails, we've got to mark the
* slot inactive before bailing out. If we're dropping an ephemeral or * slot inactive before bailing out. If we're dropping an ephemeral or a
* a temporary slot, we better never fail hard as the caller won't expect * temporary slot, we better never fail hard as the caller won't expect
* the slot to survive and this might get called during error handling. * the slot to survive and this might get called during error handling.
*/ */
if (rename(path, tmppath) == 0) if (rename(path, tmppath) == 0)

@ -119,11 +119,11 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
/* /*
* Acquire a logical decoding slot, this will check for conflicting names. * Acquire a logical decoding slot, this will check for conflicting names.
* Initially create persistent slot as ephemeral - that allows us to nicely * Initially create persistent slot as ephemeral - that allows us to
* handle errors during initialization because it'll get dropped if this * nicely handle errors during initialization because it'll get dropped if
* transaction fails. We'll make it persistent at the end. * this transaction fails. We'll make it persistent at the end. Temporary
* Temporary slots can be created as temporary from beginning as they get * slots can be created as temporary from beginning as they get dropped on
* dropped on error as well. * error as well.
*/ */
ReplicationSlotCreate(NameStr(*name), true, ReplicationSlotCreate(NameStr(*name), true,
temporary ? RS_TEMPORARY : RS_EPHEMERAL); temporary ? RS_TEMPORARY : RS_EPHEMERAL);

@ -542,9 +542,9 @@ SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
* oldest ones among sync standbys. In a quorum-based, they are the Nth * oldest ones among sync standbys. In a quorum-based, they are the Nth
* latest ones. * latest ones.
* *
* SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest positions. * SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest
* But we use SyncRepGetOldestSyncRecPtr() for that calculation because * positions. But we use SyncRepGetOldestSyncRecPtr() for that calculation
* it's a bit more efficient. * because it's a bit more efficient.
* *
* XXX If the numbers of current and requested sync standbys are the same, * XXX If the numbers of current and requested sync standbys are the same,
* we can use SyncRepGetOldestSyncRecPtr() to calculate the synced * we can use SyncRepGetOldestSyncRecPtr() to calculate the synced
@ -575,8 +575,8 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
ListCell *cell; ListCell *cell;
/* /*
* Scan through all sync standbys and calculate the oldest * Scan through all sync standbys and calculate the oldest Write, Flush
* Write, Flush and Apply positions. * and Apply positions.
*/ */
foreach(cell, sync_standbys) foreach(cell, sync_standbys)
{ {
@ -730,8 +730,8 @@ SyncRepGetSyncStandbysQuorum(bool *am_sync)
continue; continue;
/* /*
* Consider this standby as a candidate for quorum sync standbys * Consider this standby as a candidate for quorum sync standbys and
* and append it to the result. * append it to the result.
*/ */
result = lappend_int(result, i); result = lappend_int(result, i);
if (am_sync != NULL && walsnd == MyWalSnd) if (am_sync != NULL && walsnd == MyWalSnd)
@ -955,8 +955,8 @@ SyncRepGetStandbyPriority(void)
return 0; return 0;
/* /*
* In quorum-based sync replication, all the standbys in the list * In quorum-based sync replication, all the standbys in the list have the
* have the same priority, one. * same priority, one.
*/ */
return (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY) ? priority : 1; return (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY) ? priority : 1;
} }

@ -1176,9 +1176,12 @@ XLogWalRcvSendHSFeedback(bool immed)
{ {
TimestampTz now; TimestampTz now;
TransactionId nextXid; TransactionId nextXid;
uint32 xmin_epoch, catalog_xmin_epoch; uint32 xmin_epoch,
TransactionId xmin, catalog_xmin; catalog_xmin_epoch;
TransactionId xmin,
catalog_xmin;
static TimestampTz sendTime = 0; static TimestampTz sendTime = 0;
/* initially true so we always send at least one feedback message */ /* initially true so we always send at least one feedback message */
static bool master_has_standby_xmin = true; static bool master_has_standby_xmin = true;
@ -1211,8 +1214,8 @@ XLogWalRcvSendHSFeedback(bool immed)
* *
* Bailing out here also ensures that we don't send feedback until we've * Bailing out here also ensures that we don't send feedback until we've
* read our own replication slot state, so we don't tell the master to * read our own replication slot state, so we don't tell the master to
* discard needed xmin or catalog_xmin from any slots that may exist * discard needed xmin or catalog_xmin from any slots that may exist on
* on this replica. * this replica.
*/ */
if (!HotStandbyActive()) if (!HotStandbyActive())
return; return;

@ -580,8 +580,8 @@ StartReplication(StartReplicationCmd *cmd)
sendTimeLineIsHistoric = true; sendTimeLineIsHistoric = true;
/* /*
* Check that the timeline the client requested exists, and * Check that the timeline the client requested exists, and the
* the requested start location is on that timeline. * requested start location is on that timeline.
*/ */
timeLineHistory = readTimeLineHistory(ThisTimeLineID); timeLineHistory = readTimeLineHistory(ThisTimeLineID);
switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory, switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
@ -599,8 +599,8 @@ StartReplication(StartReplicationCmd *cmd)
* request to start replication from the beginning of the WAL * request to start replication from the beginning of the WAL
* segment that contains switchpoint, but on the new timeline, so * segment that contains switchpoint, but on the new timeline, so
* that it doesn't end up with a partial segment. If you ask for * that it doesn't end up with a partial segment. If you ask for
* too old a starting point, you'll get an error later when we fail * too old a starting point, you'll get an error later when we
* to find the requested WAL segment in pg_wal. * fail to find the requested WAL segment in pg_wal.
* *
* XXX: we could be more strict here and only allow a startpoint * XXX: we could be more strict here and only allow a startpoint
* that's older than the switchpoint, if it's still in the same * that's older than the switchpoint, if it's still in the same
@ -717,9 +717,9 @@ StartReplication(StartReplicationCmd *cmd)
MemSet(nulls, false, sizeof(nulls)); MemSet(nulls, false, sizeof(nulls));
/* /*
* Need a tuple descriptor representing two columns. * Need a tuple descriptor representing two columns. int8 may seem
* int8 may seem like a surprising data type for this, but in theory * like a surprising data type for this, but in theory int4 would not
* int4 would not be wide enough for this, as TimeLineID is unsigned. * be wide enough for this, as TimeLineID is unsigned.
*/ */
tupdesc = CreateTemplateTupleDesc(2, false); tupdesc = CreateTemplateTupleDesc(2, false);
TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli", TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli",
@ -1255,8 +1255,8 @@ WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId
TimestampTz now = GetCurrentTimestamp(); TimestampTz now = GetCurrentTimestamp();
/* /*
* Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS to
* to avoid flooding the lag tracker when we commit frequently. * avoid flooding the lag tracker when we commit frequently.
*/ */
#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000 #define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000
if (!TimestampDifferenceExceeds(sendTime, now, if (!TimestampDifferenceExceeds(sendTime, now,
@ -1474,8 +1474,8 @@ exec_replication_command(const char *cmd_string)
SnapBuildClearExportedSnapshot(); SnapBuildClearExportedSnapshot();
/* /*
* For aborted transactions, don't allow anything except pure SQL, * For aborted transactions, don't allow anything except pure SQL, the
* the exec_simple_query() will handle it correctly. * exec_simple_query() will handle it correctly.
*/ */
if (IsAbortedTransactionBlockState() && !IsA(cmd_node, SQLCmd)) if (IsAbortedTransactionBlockState() && !IsA(cmd_node, SQLCmd))
ereport(ERROR, ereport(ERROR,
@ -1974,8 +1974,8 @@ ProcessStandbyHSFeedbackMessage(void)
* *
* If we're using a replication slot we reserve the xmin via that, * If we're using a replication slot we reserve the xmin via that,
* otherwise via the walsender's PGXACT entry. We can only track the * otherwise via the walsender's PGXACT entry. We can only track the
* catalog xmin separately when using a slot, so we store the least * catalog xmin separately when using a slot, so we store the least of the
* of the two provided when not using a slot. * two provided when not using a slot.
* *
* XXX: It might make sense to generalize the ephemeral slot concept and * XXX: It might make sense to generalize the ephemeral slot concept and
* always use the slot mechanism to handle the feedback xmin. * always use the slot mechanism to handle the feedback xmin.
@ -2155,8 +2155,8 @@ WalSndLoop(WalSndSendDataCallback send_data)
} }
/* /*
* At the reception of SIGUSR2, switch the WAL sender to the stopping * At the reception of SIGUSR2, switch the WAL sender to the
* state. * stopping state.
*/ */
if (got_SIGUSR2) if (got_SIGUSR2)
WalSndSetState(WALSNDSTATE_STOPPING); WalSndSetState(WALSNDSTATE_STOPPING);
@ -2588,18 +2588,18 @@ XLogSendPhysical(void)
* it seems good enough to capture the time here. We should reach this * it seems good enough to capture the time here. We should reach this
* after XLogFlush() runs WalSndWakeupProcessRequests(), and although that * after XLogFlush() runs WalSndWakeupProcessRequests(), and although that
* may take some time, we read the WAL flush pointer and take the time * may take some time, we read the WAL flush pointer and take the time
* very close to together here so that we'll get a later position if it * very close to together here so that we'll get a later position if it is
* is still moving. * still moving.
* *
* Because LagTrackerWriter ignores samples when the LSN hasn't advanced, * Because LagTrackerWriter ignores samples when the LSN hasn't advanced,
* this gives us a cheap approximation for the WAL flush time for this * this gives us a cheap approximation for the WAL flush time for this
* LSN. * LSN.
* *
* Note that the LSN is not necessarily the LSN for the data contained in * Note that the LSN is not necessarily the LSN for the data contained in
* the present message; it's the end of the WAL, which might be * the present message; it's the end of the WAL, which might be further
* further ahead. All the lag tracking machinery cares about is finding * ahead. All the lag tracking machinery cares about is finding out when
* out when that arbitrary LSN is eventually reported as written, flushed * that arbitrary LSN is eventually reported as written, flushed and
* and applied, so that it can measure the elapsed time. * applied, so that it can measure the elapsed time.
*/ */
LagTrackerWrite(SendRqstPtr, GetCurrentTimestamp()); LagTrackerWrite(SendRqstPtr, GetCurrentTimestamp());
@ -2758,8 +2758,8 @@ XLogSendLogical(void)
if (record != NULL) if (record != NULL)
{ {
/* /*
* Note the lack of any call to LagTrackerWrite() which is handled * Note the lack of any call to LagTrackerWrite() which is handled by
* by WalSndUpdateProgress which is called by output plugin through * WalSndUpdateProgress which is called by output plugin through
* logical decoding write api. * logical decoding write api.
*/ */
LogicalDecodingProcessRecord(logical_decoding_ctx, logical_decoding_ctx->reader); LogicalDecodingProcessRecord(logical_decoding_ctx, logical_decoding_ctx->reader);
@ -2805,9 +2805,8 @@ WalSndDone(WalSndSendDataCallback send_data)
/* /*
* To figure out whether all WAL has successfully been replicated, check * To figure out whether all WAL has successfully been replicated, check
* flush location if valid, write otherwise. Tools like pg_receivewal * flush location if valid, write otherwise. Tools like pg_receivewal will
* will usually (unless in synchronous mode) return an invalid flush * usually (unless in synchronous mode) return an invalid flush location.
* location.
*/ */
replicatedPtr = XLogRecPtrIsInvalid(MyWalSnd->flush) ? replicatedPtr = XLogRecPtrIsInvalid(MyWalSnd->flush) ?
MyWalSnd->write : MyWalSnd->flush; MyWalSnd->write : MyWalSnd->flush;
@ -3448,11 +3447,11 @@ LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now)
/* /*
* We didn't cross a time. If there is a future sample that we * We didn't cross a time. If there is a future sample that we
* haven't reached yet, and we've already reached at least one sample, * haven't reached yet, and we've already reached at least one sample,
* let's interpolate the local flushed time. This is mainly useful for * let's interpolate the local flushed time. This is mainly useful
* reporting a completely stuck apply position as having increasing * for reporting a completely stuck apply position as having
* lag, since otherwise we'd have to wait for it to eventually start * increasing lag, since otherwise we'd have to wait for it to
* moving again and cross one of our samples before we can show the * eventually start moving again and cross one of our samples before
* lag increasing. * we can show the lag increasing.
*/ */
if (LagTracker.read_heads[head] != LagTracker.write_head && if (LagTracker.read_heads[head] != LagTracker.write_head &&
LagTracker.last_read[head].time != 0) LagTracker.last_read[head].time != 0)

@ -90,8 +90,8 @@ BuildRelationExtStatistics(Relation onerel, double totalrows,
ListCell *lc2; ListCell *lc2;
/* /*
* Check if we can build these stats based on the column analyzed. * Check if we can build these stats based on the column analyzed. If
* If not, report this fact (except in autovacuum) and move on. * not, report this fact (except in autovacuum) and move on.
*/ */
stats = lookup_var_attr_stats(onerel, stat->columns, stats = lookup_var_attr_stats(onerel, stat->columns,
natts, vacattrstats); natts, vacattrstats);

@ -279,8 +279,8 @@ statext_ndistinct_deserialize(bytea *data)
VARSIZE_ANY_EXHDR(data), minimum_size))); VARSIZE_ANY_EXHDR(data), minimum_size)));
/* /*
* Allocate space for the ndistinct items (no space for each item's attnos: * Allocate space for the ndistinct items (no space for each item's
* those live in bitmapsets allocated separately) * attnos: those live in bitmapsets allocated separately)
*/ */
ndistinct = palloc0(MAXALIGN(SizeOfMVNDistinct) + ndistinct = palloc0(MAXALIGN(SizeOfMVNDistinct) +
(ndist.nitems * sizeof(MVNDistinctItem))); (ndist.nitems * sizeof(MVNDistinctItem)));
@ -449,8 +449,8 @@ ndistinct_for_combination(double totalrows, int numrows, HeapTuple *rows,
} }
/* /*
* For each dimension, set up sort-support and fill in the values from * For each dimension, set up sort-support and fill in the values from the
* the sample data. * sample data.
*/ */
for (i = 0; i < k; i++) for (i = 0; i < k; i++)
{ {

@ -685,8 +685,8 @@ durable_unlink(const char *fname, int elevel)
} }
/* /*
* To guarantee that the removal of the file is persistent, fsync * To guarantee that the removal of the file is persistent, fsync its
* its parent directory. * parent directory.
*/ */
if (fsync_parent_path(fname, elevel) != 0) if (fsync_parent_path(fname, elevel) != 0)
return -1; return -1;

@ -218,8 +218,8 @@ ConditionVariableBroadcast(ConditionVariable *cv)
/* /*
* Let's just do this the dumbest way possible. We could try to dequeue * Let's just do this the dumbest way possible. We could try to dequeue
* all the sleepers at once to save spinlock cycles, but it's a bit hard * all the sleepers at once to save spinlock cycles, but it's a bit hard
* to get that right in the face of possible sleep cancelations, and * to get that right in the face of possible sleep cancelations, and we
* we don't want to loop holding the mutex. * don't want to loop holding the mutex.
*/ */
while (ConditionVariableSignal(cv)) while (ConditionVariableSignal(cv))
++nwoken; ++nwoken;

@ -1452,6 +1452,7 @@ ProcessUtilitySlow(ParseState *pstate,
break; break;
case T_RefreshMatViewStmt: case T_RefreshMatViewStmt:
/* /*
* REFRESH CONCURRENTLY executes some DDL commands internally. * REFRESH CONCURRENTLY executes some DDL commands internally.
* Inhibit DDL command collection here to avoid those commands * Inhibit DDL command collection here to avoid those commands
@ -1610,6 +1611,7 @@ ProcessUtilitySlow(ParseState *pstate,
case T_AlterPublicationStmt: case T_AlterPublicationStmt:
AlterPublication((AlterPublicationStmt *) parsetree); AlterPublication((AlterPublicationStmt *) parsetree);
/* /*
* AlterPublication calls EventTriggerCollectSimpleCommand * AlterPublication calls EventTriggerCollectSimpleCommand
* directly * directly

@ -284,8 +284,10 @@ jsonb_to_tsvector_byid(PG_FUNCTION_ARGS)
if (state.result == NULL) if (state.result == NULL)
{ {
/* There weren't any string elements in jsonb, /*
* so wee need to return an empty vector */ * There weren't any string elements in jsonb, so wee need to return
* an empty vector
*/
if (prs->words != NULL) if (prs->words != NULL)
pfree(prs->words); pfree(prs->words);
@ -328,8 +330,10 @@ json_to_tsvector_byid(PG_FUNCTION_ARGS)
PG_FREE_IF_COPY(json, 1); PG_FREE_IF_COPY(json, 1);
if (state.result == NULL) if (state.result == NULL)
{ {
/* There weren't any string elements in json, /*
* so wee need to return an empty vector */ * There weren't any string elements in json, so wee need to return an
* empty vector
*/
if (prs->words != NULL) if (prs->words != NULL)
pfree(prs->words); pfree(prs->words);

@ -279,8 +279,10 @@ cash_in(PG_FUNCTION_ARGS)
"money", str))); "money", str)));
} }
/* If the value is supposed to be positive, flip the sign, but check for /*
* the most negative number. */ * If the value is supposed to be positive, flip the sign, but check for
* the most negative number.
*/
if (sgn > 0) if (sgn > 0)
{ {
result = -value; result = -value;

@ -90,8 +90,8 @@ calculate_database_size(Oid dbOid)
AclResult aclresult; AclResult aclresult;
/* /*
* User must have connect privilege for target database * User must have connect privilege for target database or be a member of
* or be a member of pg_read_all_stats * pg_read_all_stats
*/ */
aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT); aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT);
if (aclresult != ACLCHECK_OK && if (aclresult != ACLCHECK_OK &&
@ -180,8 +180,8 @@ calculate_tablespace_size(Oid tblspcOid)
/* /*
* User must be a member of pg_read_all_stats or have CREATE privilege for * User must be a member of pg_read_all_stats or have CREATE privilege for
* target tablespace, either explicitly granted or implicitly because * target tablespace, either explicitly granted or implicitly because it
* it is default for current database. * is default for current database.
*/ */
if (tblspcOid != MyDatabaseTableSpace && if (tblspcOid != MyDatabaseTableSpace &&
!is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS)) !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS))

@ -1592,7 +1592,10 @@ str_tolower(const char *buff, size_t nbytes, Oid collid)
workspace[curr_char] = towlower(workspace[curr_char]); workspace[curr_char] = towlower(workspace[curr_char]);
} }
/* Make result large enough; case change might change number of bytes */ /*
* Make result large enough; case change might change number
* of bytes
*/
result_size = curr_char * pg_database_encoding_max_length() + 1; result_size = curr_char * pg_database_encoding_max_length() + 1;
result = palloc(result_size); result = palloc(result_size);
@ -1607,11 +1610,11 @@ str_tolower(const char *buff, size_t nbytes, Oid collid)
result = pnstrdup(buff, nbytes); result = pnstrdup(buff, nbytes);
/* /*
* Note: we assume that tolower_l() will not be so broken as to need * Note: we assume that tolower_l() will not be so broken as
* an isupper_l() guard test. When using the default collation, we * to need an isupper_l() guard test. When using the default
* apply the traditional Postgres behavior that forces ASCII-style * collation, we apply the traditional Postgres behavior that
* treatment of I/i, but in non-default collations you get exactly * forces ASCII-style treatment of I/i, but in non-default
* what the collation says. * collations you get exactly what the collation says.
*/ */
for (p = result; *p; p++) for (p = result; *p; p++)
{ {
@ -1672,7 +1675,8 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
#ifdef USE_ICU #ifdef USE_ICU
if (mylocale && mylocale->provider == COLLPROVIDER_ICU) if (mylocale && mylocale->provider == COLLPROVIDER_ICU)
{ {
int32_t len_uchar, len_conv; int32_t len_uchar,
len_conv;
UChar *buff_uchar; UChar *buff_uchar;
UChar *buff_conv; UChar *buff_conv;
@ -1711,7 +1715,10 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
workspace[curr_char] = towupper(workspace[curr_char]); workspace[curr_char] = towupper(workspace[curr_char]);
} }
/* Make result large enough; case change might change number of bytes */ /*
* Make result large enough; case change might change number
* of bytes
*/
result_size = curr_char * pg_database_encoding_max_length() + 1; result_size = curr_char * pg_database_encoding_max_length() + 1;
result = palloc(result_size); result = palloc(result_size);
@ -1726,11 +1733,11 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
result = pnstrdup(buff, nbytes); result = pnstrdup(buff, nbytes);
/* /*
* Note: we assume that toupper_l() will not be so broken as to need * Note: we assume that toupper_l() will not be so broken as
* an islower_l() guard test. When using the default collation, we * to need an islower_l() guard test. When using the default
* apply the traditional Postgres behavior that forces ASCII-style * collation, we apply the traditional Postgres behavior that
* treatment of I/i, but in non-default collations you get exactly * forces ASCII-style treatment of I/i, but in non-default
* what the collation says. * collations you get exactly what the collation says.
*/ */
for (p = result; *p; p++) for (p = result; *p; p++)
{ {
@ -1792,7 +1799,8 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
#ifdef USE_ICU #ifdef USE_ICU
if (mylocale && mylocale->provider == COLLPROVIDER_ICU) if (mylocale && mylocale->provider == COLLPROVIDER_ICU)
{ {
int32_t len_uchar, len_conv; int32_t len_uchar,
len_conv;
UChar *buff_uchar; UChar *buff_uchar;
UChar *buff_conv; UChar *buff_conv;
@ -1843,7 +1851,10 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
} }
} }
/* Make result large enough; case change might change number of bytes */ /*
* Make result large enough; case change might change number
* of bytes
*/
result_size = curr_char * pg_database_encoding_max_length() + 1; result_size = curr_char * pg_database_encoding_max_length() + 1;
result = palloc(result_size); result = palloc(result_size);
@ -1858,11 +1869,11 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
result = pnstrdup(buff, nbytes); result = pnstrdup(buff, nbytes);
/* /*
* Note: we assume that toupper_l()/tolower_l() will not be so broken * Note: we assume that toupper_l()/tolower_l() will not be so
* as to need guard tests. When using the default collation, we apply * broken as to need guard tests. When using the default
* the traditional Postgres behavior that forces ASCII-style treatment * collation, we apply the traditional Postgres behavior that
* of I/i, but in non-default collations you get exactly what the * forces ASCII-style treatment of I/i, but in non-default
* collation says. * collations you get exactly what the collation says.
*/ */
for (p = result; *p; p++) for (p = result; *p; p++)
{ {

@ -57,8 +57,8 @@ typedef struct OkeysState
typedef struct IterateJsonStringValuesState typedef struct IterateJsonStringValuesState
{ {
JsonLexContext *lex; JsonLexContext *lex;
JsonIterateStringValuesAction action; /* an action that will be applied JsonIterateStringValuesAction action; /* an action that will be
to each json value */ * applied to each json value */
void *action_state; /* any necessary context for iteration */ void *action_state; /* any necessary context for iteration */
} IterateJsonStringValuesState; } IterateJsonStringValuesState;
@ -67,8 +67,8 @@ typedef struct TransformJsonStringValuesState
{ {
JsonLexContext *lex; JsonLexContext *lex;
StringInfo strval; /* resulting json */ StringInfo strval; /* resulting json */
JsonTransformStringValuesAction action; /* an action that will be applied JsonTransformStringValuesAction action; /* an action that will be
to each json value */ * applied to each json value */
void *action_state; /* any necessary context for transformation */ void *action_state; /* any necessary context for transformation */
} TransformJsonStringValuesState; } TransformJsonStringValuesState;
@ -163,8 +163,8 @@ typedef struct ArrayIOData
typedef struct CompositeIOData typedef struct CompositeIOData
{ {
/* /*
* We use pointer to a RecordIOData here because variable-length * We use pointer to a RecordIOData here because variable-length struct
* struct RecordIOData can't be used directly in ColumnIOData.io union * RecordIOData can't be used directly in ColumnIOData.io union
*/ */
RecordIOData *record_io; /* metadata cache for populate_record() */ RecordIOData *record_io; /* metadata cache for populate_record() */
TupleDesc tupdesc; /* cached tuple descriptor */ TupleDesc tupdesc; /* cached tuple descriptor */
@ -203,7 +203,8 @@ struct ColumnIOData
ArrayIOData array; ArrayIOData array;
CompositeIOData composite; CompositeIOData composite;
DomainIOData domain; DomainIOData domain;
} io; /* metadata cache for various column type categories */ } io; /* metadata cache for various column type
* categories */
}; };
/* structure to cache record metadata needed for populate_record() */ /* structure to cache record metadata needed for populate_record() */
@ -257,7 +258,8 @@ typedef struct PopulateArrayState
JsonLexContext *lex; /* json lexer */ JsonLexContext *lex; /* json lexer */
PopulateArrayContext *ctx; /* context */ PopulateArrayContext *ctx; /* context */
char *element_start; /* start of the current array element */ char *element_start; /* start of the current array element */
char *element_scalar; /* current array element token if it is a scalar */ char *element_scalar; /* current array element token if it is a
* scalar */
JsonTokenType element_type; /* current array element type */ JsonTokenType element_type; /* current array element type */
} PopulateArrayState; } PopulateArrayState;
@ -2567,9 +2569,9 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */
tok = JsonbIteratorNext(&it, &val, true); tok = JsonbIteratorNext(&it, &val, true);
/* /*
* If the number of dimensions is not yet known and * If the number of dimensions is not yet known and we have found end of
* we have found end of the array, or the first child element is not * the array, or the first child element is not an array, then assign the
* an array, then assign the number of dimensions now. * number of dimensions now.
*/ */
if (ctx->ndims <= 0 && if (ctx->ndims <= 0 &&
(tok == WJB_END_ARRAY || (tok == WJB_END_ARRAY ||
@ -2585,8 +2587,8 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */
while (tok == WJB_ELEM) while (tok == WJB_ELEM)
{ {
/* /*
* Recurse only if the dimensions of dimensions is still unknown or * Recurse only if the dimensions of dimensions is still unknown or if
* if it is not the innermost dimension. * it is not the innermost dimension.
*/ */
if (ctx->ndims > 0 && ndim >= ctx->ndims) if (ctx->ndims > 0 && ndim >= ctx->ndims)
populate_array_element(ctx, ndim, &jsv); populate_array_element(ctx, ndim, &jsv);
@ -2750,8 +2752,8 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
jsv->val.json.type == JSON_TOKEN_STRING) jsv->val.json.type == JSON_TOKEN_STRING)
{ {
/* /*
* Add quotes around string value (should be already escaped) * Add quotes around string value (should be already escaped) if
* if converting to json/jsonb. * converting to json/jsonb.
*/ */
if (len < 0) if (len < 0)
@ -2780,6 +2782,7 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
if (typid == JSONBOID) if (typid == JSONBOID)
{ {
Jsonb *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */ Jsonb *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */
return JsonbGetDatum(jsonb); return JsonbGetDatum(jsonb);
} }
/* convert jsonb to string for typio call */ /* convert jsonb to string for typio call */
@ -2790,6 +2793,7 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
* to json string, preserving quotes around top-level strings. * to json string, preserving quotes around top-level strings.
*/ */
Jsonb *jsonb = JsonbValueToJsonb(jbv); Jsonb *jsonb = JsonbValueToJsonb(jbv);
str = JsonbToCString(NULL, &jsonb->root, VARSIZE(jsonb)); str = JsonbToCString(NULL, &jsonb->root, VARSIZE(jsonb));
} }
else if (jbv->type == jbvString) /* quotes are stripped */ else if (jbv->type == jbvString) /* quotes are stripped */
@ -3017,9 +3021,9 @@ populate_record(TupleDesc tupdesc,
int i; int i;
/* /*
* if the input json is empty, we can only skip the rest if we were * if the input json is empty, we can only skip the rest if we were passed
* passed in a non-null record, since otherwise there may be issues * in a non-null record, since otherwise there may be issues with domain
* with domain nulls. * nulls.
*/ */
if (defaultval && JsObjectIsEmpty(obj)) if (defaultval && JsObjectIsEmpty(obj))
return defaultval; return defaultval;
@ -3210,7 +3214,8 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
jsv.val.json.str = VARDATA_ANY(json); jsv.val.json.str = VARDATA_ANY(json);
jsv.val.json.len = VARSIZE_ANY_EXHDR(json); jsv.val.json.len = VARSIZE_ANY_EXHDR(json);
jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */ jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in
* populate_composite() */
} }
else else
{ {
@ -4837,6 +4842,7 @@ static void
iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype) iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype)
{ {
IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state; IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state;
if (tokentype == JSON_TOKEN_STRING) if (tokentype == JSON_TOKEN_STRING)
(*_state->action) (_state->action_state, token, strlen(token)); (*_state->action) (_state->action_state, token, strlen(token));
} }
@ -4852,7 +4858,8 @@ transform_jsonb_string_values(Jsonb *jsonb, void *action_state,
JsonTransformStringValuesAction transform_action) JsonTransformStringValuesAction transform_action)
{ {
JsonbIterator *it; JsonbIterator *it;
JsonbValue v, *res = NULL; JsonbValue v,
*res = NULL;
JsonbIteratorToken type; JsonbIteratorToken type;
JsonbParseState *st = NULL; JsonbParseState *st = NULL;
text *out; text *out;
@ -4928,6 +4935,7 @@ static void
transform_string_values_object_start(void *state) transform_string_values_object_start(void *state)
{ {
TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
appendStringInfoCharMacro(_state->strval, '{'); appendStringInfoCharMacro(_state->strval, '{');
} }
@ -4935,6 +4943,7 @@ static void
transform_string_values_object_end(void *state) transform_string_values_object_end(void *state)
{ {
TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
appendStringInfoCharMacro(_state->strval, '}'); appendStringInfoCharMacro(_state->strval, '}');
} }
@ -4942,6 +4951,7 @@ static void
transform_string_values_array_start(void *state) transform_string_values_array_start(void *state)
{ {
TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
appendStringInfoCharMacro(_state->strval, '['); appendStringInfoCharMacro(_state->strval, '[');
} }
@ -4949,6 +4959,7 @@ static void
transform_string_values_array_end(void *state) transform_string_values_array_end(void *state)
{ {
TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
appendStringInfoCharMacro(_state->strval, ']'); appendStringInfoCharMacro(_state->strval, ']');
} }
@ -4985,6 +4996,7 @@ transform_string_values_scalar(void *state, char *token, JsonTokenType tokentype
if (tokentype == JSON_TOKEN_STRING) if (tokentype == JSON_TOKEN_STRING)
{ {
text *out = (*_state->action) (_state->action_state, token, strlen(token)); text *out = (*_state->action) (_state->action_state, token, strlen(token));
escape_json(_state->strval, text_to_cstring(out)); escape_json(_state->strval, text_to_cstring(out));
} }
else else

@ -189,9 +189,9 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation)
/* /*
* For efficiency reasons, in the single byte case we don't call lower() * For efficiency reasons, in the single byte case we don't call lower()
* on the pattern and text, but instead call SB_lower_char on each * on the pattern and text, but instead call SB_lower_char on each
* character. In the multi-byte case we don't have much choice :-(. * character. In the multi-byte case we don't have much choice :-(. Also,
* Also, ICU does not support single-character case folding, so we go the * ICU does not support single-character case folding, so we go the long
* long way. * way.
*/ */
if (pg_database_encoding_max_length() > 1 || (locale && locale->provider == COLLPROVIDER_ICU)) if (pg_database_encoding_max_length() > 1 || (locale && locale->provider == COLLPROVIDER_ICU))

@ -1330,9 +1330,9 @@ pg_newlocale_from_collation(Oid collid)
#else #else
/* /*
* XXX The _create_locale() API doesn't appear to support this. * XXX The _create_locale() API doesn't appear to support
* Could perhaps be worked around by changing pg_locale_t to * this. Could perhaps be worked around by changing
* contain two separate fields. * pg_locale_t to contain two separate fields.
*/ */
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@ -1382,9 +1382,11 @@ pg_newlocale_from_collation(Oid collid)
actual_versionstr = get_collation_actual_version(collform->collprovider, collcollate); actual_versionstr = get_collation_actual_version(collform->collprovider, collcollate);
if (!actual_versionstr) if (!actual_versionstr)
{ {
/* This could happen when specifying a version in CREATE /*
* COLLATION for a libc locale, or manually creating a mess * This could happen when specifying a version in CREATE
* in the catalogs. */ * COLLATION for a libc locale, or manually creating a mess in
* the catalogs.
*/
ereport(ERROR, ereport(ERROR,
(errmsg("collation \"%s\" has no actual version, but a version was specified", (errmsg("collation \"%s\" has no actual version, but a version was specified",
NameStr(collform->collname)))); NameStr(collform->collname))));

@ -3364,8 +3364,8 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
List *relvarinfos = NIL; List *relvarinfos = NIL;
/* /*
* Split the list of varinfos in two - one for the current rel, * Split the list of varinfos in two - one for the current rel, one
* one for remaining Vars on other rels. * for remaining Vars on other rels.
*/ */
relvarinfos = lcons(varinfo1, relvarinfos); relvarinfos = lcons(varinfo1, relvarinfos);
for_each_cell(l, lnext(list_head(varinfos))) for_each_cell(l, lnext(list_head(varinfos)))
@ -3388,9 +3388,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
* Get the numdistinct estimate for the Vars of this rel. We * Get the numdistinct estimate for the Vars of this rel. We
* iteratively search for multivariate n-distinct with maximum number * iteratively search for multivariate n-distinct with maximum number
* of vars; assuming that each var group is independent of the others, * of vars; assuming that each var group is independent of the others,
* we multiply them together. Any remaining relvarinfos after * we multiply them together. Any remaining relvarinfos after no more
* no more multivariate matches are found are assumed independent too, * multivariate matches are found are assumed independent too, so
* so their individual ndistinct estimates are multiplied also. * their individual ndistinct estimates are multiplied also.
* *
* While iterating, count how many separate numdistinct values we * While iterating, count how many separate numdistinct values we
* apply. We apply a fudge factor below, but only if we multiplied * apply. We apply a fudge factor below, but only if we multiplied
@ -7766,8 +7766,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
* *
* Because we can use all index quals equally when scanning, we can use * Because we can use all index quals equally when scanning, we can use
* the largest correlation (in absolute value) among columns used by the * the largest correlation (in absolute value) among columns used by the
* query. Start at zero, the worst possible case. If we cannot find * query. Start at zero, the worst possible case. If we cannot find any
* any correlation statistics, we will keep it as 0. * correlation statistics, we will keep it as 0.
*/ */
*indexCorrelation = 0; *indexCorrelation = 0;
@ -7816,8 +7816,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
(*get_index_stats_hook) (root, index->indexoid, attnum, &vardata)) (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
{ {
/* /*
* The hook took control of acquiring a stats tuple. If it did * The hook took control of acquiring a stats tuple. If it
* supply a tuple, it'd better have supplied a freefunc. * did supply a tuple, it'd better have supplied a freefunc.
*/ */
if (HeapTupleIsValid(vardata.statsTuple) && if (HeapTupleIsValid(vardata.statsTuple) &&
!vardata.freefunc) !vardata.freefunc)
@ -7872,8 +7872,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
/* /*
* Now estimate the number of ranges that we'll touch by using the * Now estimate the number of ranges that we'll touch by using the
* indexCorrelation from the stats. Careful not to divide by zero * indexCorrelation from the stats. Careful not to divide by zero (note
* (note we're using the absolute value of the correlation). * we're using the absolute value of the correlation).
*/ */
if (*indexCorrelation < 1.0e-10) if (*indexCorrelation < 1.0e-10)
estimatedRanges = indexRanges; estimatedRanges = indexRanges;
@ -7888,8 +7888,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
*indexSelectivity = selec; *indexSelectivity = selec;
/* /*
* Compute the index qual costs, much as in genericcostestimate, to add * Compute the index qual costs, much as in genericcostestimate, to add to
* to the index costs. * the index costs.
*/ */
qual_arg_cost = other_operands_eval_cost(root, qinfos) + qual_arg_cost = other_operands_eval_cost(root, qinfos) +
orderby_operands_eval_cost(root, path); orderby_operands_eval_cost(root, path);

@ -147,8 +147,8 @@ TransactionIdInRecentPast(uint64 xid_with_epoch, TransactionId *extracted_xid)
/* /*
* If the transaction ID has wrapped around, it's definitely too old to * If the transaction ID has wrapped around, it's definitely too old to
* determine the commit status. Otherwise, we can compare it to * determine the commit status. Otherwise, we can compare it to
* ShmemVariableCache->oldestClogXid to determine whether the relevant CLOG * ShmemVariableCache->oldestClogXid to determine whether the relevant
* entry is guaranteed to still exist. * CLOG entry is guaranteed to still exist.
*/ */
if (xid_epoch + 1 < now_epoch if (xid_epoch + 1 < now_epoch
|| (xid_epoch + 1 == now_epoch && xid < now_epoch_last_xid) || (xid_epoch + 1 == now_epoch && xid < now_epoch_last_xid)
@ -770,8 +770,8 @@ txid_status(PG_FUNCTION_ARGS)
* it's aborted if it isn't committed and is older than our * it's aborted if it isn't committed and is older than our
* snapshot xmin. * snapshot xmin.
* *
* Otherwise it must be in-progress (or have been at the time * Otherwise it must be in-progress (or have been at the time we
* we checked commit/abort status). * checked commit/abort status).
*/ */
if (TransactionIdPrecedes(xid, GetActiveSnapshot()->xmin)) if (TransactionIdPrecedes(xid, GetActiveSnapshot()->xmin))
status = gettext_noop("aborted"); status = gettext_noop("aborted");

@ -1557,8 +1557,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
else else
#endif #endif
{ {
int32_t ulen1, ulen2; int32_t ulen1,
UChar *uchar1, *uchar2; ulen2;
UChar *uchar1,
*uchar2;
ulen1 = icu_to_uchar(&uchar1, arg1, len1); ulen1 = icu_to_uchar(&uchar1, arg1, len1);
ulen2 = icu_to_uchar(&uchar2, arg2, len2); ulen2 = icu_to_uchar(&uchar2, arg2, len2);
@ -2141,8 +2143,10 @@ varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup)
else else
#endif #endif
{ {
int32_t ulen1, ulen2; int32_t ulen1,
UChar *uchar1, *uchar2; ulen2;
UChar *uchar1,
*uchar2;
ulen1 = icu_to_uchar(&uchar1, a1p, len1); ulen1 = icu_to_uchar(&uchar1, a1p, len1);
ulen2 = icu_to_uchar(&uchar2, a2p, len2); ulen2 = icu_to_uchar(&uchar2, a2p, len2);
@ -2300,8 +2304,11 @@ varstr_abbrev_convert(Datum original, SortSupport ssup)
} }
memcpy(sss->buf1, authoritative_data, len); memcpy(sss->buf1, authoritative_data, len);
/* Just like strcoll(), strxfrm() expects a NUL-terminated string.
* Not necessary for ICU, but doesn't hurt. */ /*
* Just like strcoll(), strxfrm() expects a NUL-terminated string. Not
* necessary for ICU, but doesn't hurt.
*/
sss->buf1[len] = '\0'; sss->buf1[len] = '\0';
sss->last_len1 = len; sss->last_len1 = len;

@ -4518,9 +4518,8 @@ XmlTableGetValue(TableFuncScanState *state, int colnum,
* This line ensure mapping of empty tags to PostgreSQL * This line ensure mapping of empty tags to PostgreSQL
* value. Usually we would to map a empty tag to empty * value. Usually we would to map a empty tag to empty
* string. But this mapping can create empty string when * string. But this mapping can create empty string when
* user doesn't expect it - when empty tag is enforced * user doesn't expect it - when empty tag is enforced by
* by libxml2 - when user uses a text() function for * libxml2 - when user uses a text() function for example.
* example.
*/ */
cstr = ""; cstr = "";
} }

@ -386,10 +386,9 @@ AddRelcacheInvalidationMessage(InvalidationListHeader *hdr,
SharedInvalidationMessage msg; SharedInvalidationMessage msg;
/* /*
* Don't add a duplicate item. * Don't add a duplicate item. We assume dbId need not be checked because
* We assume dbId need not be checked because it will never change. * it will never change. InvalidOid for relId means all relations so we
* InvalidOid for relId means all relations so we don't need to add * don't need to add individual ones when it is present.
* individual ones when it is present.
*/ */
ProcessMessageList(hdr->rclist, ProcessMessageList(hdr->rclist,
if (msg->rc.id == SHAREDINVALRELCACHE_ID && if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
@ -523,8 +522,8 @@ RegisterRelcacheInvalidation(Oid dbId, Oid relId)
/* /*
* If the relation being invalidated is one of those cached in the local * If the relation being invalidated is one of those cached in the local
* relcache init file, mark that we need to zap that file at commit. * relcache init file, mark that we need to zap that file at commit. Same
* Same is true when we are invalidating whole relcache. * is true when we are invalidating whole relcache.
*/ */
if (OidIsValid(dbId) && if (OidIsValid(dbId) &&
(RelationIdIsInInitFile(relId) || relId == InvalidOid)) (RelationIdIsInInitFile(relId) || relId == InvalidOid))
@ -1139,8 +1138,8 @@ CacheInvalidateHeapTuple(Relation relation,
RegisterCatcacheInvalidation); RegisterCatcacheInvalidation);
/* /*
* Now, is this tuple one of the primary definers of a relcache entry? * Now, is this tuple one of the primary definers of a relcache entry? See
* See comments in file header for deeper explanation. * comments in file header for deeper explanation.
* *
* Note we ignore newtuple here; we assume an update cannot move a tuple * Note we ignore newtuple here; we assume an update cannot move a tuple
* from being part of one relcache entry to being part of another. * from being part of one relcache entry to being part of another.

@ -4504,7 +4504,10 @@ RelationGetStatExtList(Relation relation)
*/ */
result = NIL; result = NIL;
/* Prepare to scan pg_statistic_ext for entries having stxrelid = this rel. */ /*
* Prepare to scan pg_statistic_ext for entries having stxrelid = this
* rel.
*/
ScanKeyInit(&skey, ScanKeyInit(&skey,
Anum_pg_statistic_ext_stxrelid, Anum_pg_statistic_ext_stxrelid,
BTEqualStrategyNumber, F_OIDEQ, BTEqualStrategyNumber, F_OIDEQ,
@ -4603,9 +4606,10 @@ RelationSetIndexList(Relation relation, List *indexIds, Oid oidIndex)
list_free(relation->rd_indexlist); list_free(relation->rd_indexlist);
relation->rd_indexlist = indexIds; relation->rd_indexlist = indexIds;
relation->rd_oidindex = oidIndex; relation->rd_oidindex = oidIndex;
/* /*
* For the moment, assume the target rel hasn't got a pk or replica * For the moment, assume the target rel hasn't got a pk or replica index.
* index. We'll load them on demand in the API that wraps access to them. * We'll load them on demand in the API that wraps access to them.
*/ */
relation->rd_pkindex = InvalidOid; relation->rd_pkindex = InvalidOid;
relation->rd_replidindex = InvalidOid; relation->rd_replidindex = InvalidOid;
@ -5200,8 +5204,8 @@ GetRelationPublicationActions(Relation relation)
ReleaseSysCache(tup); ReleaseSysCache(tup);
/* /*
* If we know everything is replicated, there is no point to check * If we know everything is replicated, there is no point to check for
* for other publications. * other publications.
*/ */
if (pubactions->pubinsert && pubactions->pubupdate && if (pubactions->pubinsert && pubactions->pubupdate &&
pubactions->pubdelete) pubactions->pubdelete)

@ -608,6 +608,7 @@ UtfToLocal(const unsigned char *utf, int len,
if (map) if (map)
{ {
uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4); uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4);
if (converted) if (converted)
{ {
iso = store_coded_char(iso, converted); iso = store_coded_char(iso, converted);

@ -124,8 +124,8 @@ pg_backend_random(char *dst, int len)
BackendRandomShmem->seed[2] = (unsigned short) (now.tv_usec >> 16); BackendRandomShmem->seed[2] = (unsigned short) (now.tv_usec >> 16);
/* /*
* Mix in the cancel key, generated by the postmaster. This adds * Mix in the cancel key, generated by the postmaster. This adds what
* what little entropy the postmaster had to the seed. * little entropy the postmaster had to the seed.
*/ */
BackendRandomShmem->seed[0] ^= (MyCancelKey); BackendRandomShmem->seed[0] ^= (MyCancelKey);
BackendRandomShmem->seed[1] ^= (MyCancelKey >> 16); BackendRandomShmem->seed[1] ^= (MyCancelKey >> 16);

@ -2327,8 +2327,8 @@ tuplesort_merge_order(int64 allowedMem)
* which in turn can cause the same sort to need more runs, which makes * which in turn can cause the same sort to need more runs, which makes
* merging slower even if it can still be done in a single pass. Also, * merging slower even if it can still be done in a single pass. Also,
* high order merges are quite slow due to CPU cache effects; it can be * high order merges are quite slow due to CPU cache effects; it can be
* faster to pay the I/O cost of a polyphase merge than to perform a single * faster to pay the I/O cost of a polyphase merge than to perform a
* merge pass across many hundreds of tapes. * single merge pass across many hundreds of tapes.
*/ */
mOrder = Max(mOrder, MINORDER); mOrder = Max(mOrder, MINORDER);
mOrder = Min(mOrder, MAXORDER); mOrder = Min(mOrder, MAXORDER);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save