Post-feature-freeze pgindent run.

Discussion: https://postgr.es/m/15719.1523984266@sss.pgh.pa.us
pull/32/head
Tom Lane 7 years ago
parent f83bf385c1
commit bdf46af748
  1. 164
      contrib/amcheck/verify_nbtree.c
  2. 11
      contrib/btree_gin/btree_gin.c
  3. 14
      contrib/cube/cube.c
  4. 4
      contrib/jsonb_plperl/jsonb_plperl.c
  5. 11
      contrib/jsonb_plpython/jsonb_plpython.c
  6. 6
      contrib/pg_trgm/trgm_gist.c
  7. 32
      contrib/pg_trgm/trgm_op.c
  8. 4
      contrib/postgres_fdw/postgres_fdw.c
  9. 2
      contrib/tcn/tcn.c
  10. 6
      contrib/test_decoding/test_decoding.c
  11. 2
      src/backend/access/brin/brin.c
  12. 4
      src/backend/access/common/heaptuple.c
  13. 4
      src/backend/access/common/indextuple.c
  14. 8
      src/backend/access/common/reloptions.c
  15. 12
      src/backend/access/gin/ginbtree.c
  16. 4
      src/backend/access/gin/gindatapage.c
  17. 16
      src/backend/access/gin/ginget.c
  18. 10
      src/backend/access/gin/gininsert.c
  19. 8
      src/backend/access/gist/gist.c
  20. 42
      src/backend/access/heap/heapam.c
  21. 55
      src/backend/access/nbtree/nbtinsert.c
  22. 8
      src/backend/access/nbtree/nbtpage.c
  23. 22
      src/backend/access/nbtree/nbtree.c
  24. 20
      src/backend/access/nbtree/nbtsort.c
  25. 37
      src/backend/access/nbtree/nbtutils.c
  26. 11
      src/backend/access/spgist/spgdoinsert.c
  27. 8
      src/backend/access/spgist/spgvalidate.c
  28. 6
      src/backend/access/transam/twophase.c
  29. 10
      src/backend/access/transam/xact.c
  30. 39
      src/backend/access/transam/xlog.c
  31. 17
      src/backend/catalog/aclchk.c
  32. 6
      src/backend/catalog/dependency.c
  33. 52
      src/backend/catalog/index.c
  34. 10
      src/backend/catalog/objectaddress.c
  35. 2
      src/backend/catalog/partition.c
  36. 34
      src/backend/catalog/pg_constraint.c
  37. 2
      src/backend/catalog/pg_inherits.c
  38. 2
      src/backend/commands/alter.c
  39. 4
      src/backend/commands/cluster.c
  40. 2
      src/backend/commands/copy.c
  41. 6
      src/backend/commands/event_trigger.c
  42. 2
      src/backend/commands/functioncmds.c
  43. 75
      src/backend/commands/indexcmds.c
  44. 2
      src/backend/commands/lockcmds.c
  45. 2
      src/backend/commands/policy.c
  46. 6
      src/backend/commands/portalcmds.c
  47. 7
      src/backend/commands/statscmds.c
  48. 83
      src/backend/commands/tablecmds.c
  49. 13
      src/backend/commands/trigger.c
  50. 2
      src/backend/executor/execExprInterp.c
  51. 1
      src/backend/executor/execMain.c
  52. 7
      src/backend/executor/execProcnode.c
  53. 2
      src/backend/executor/execTuples.c
  54. 2
      src/backend/executor/nodeAgg.c
  55. 2
      src/backend/executor/nodeGather.c
  56. 2
      src/backend/executor/nodeGatherMerge.c
  57. 3
      src/backend/executor/nodeHashjoin.c
  58. 3
      src/backend/executor/nodeMergejoin.c
  59. 10
      src/backend/executor/nodeModifyTable.c
  60. 4
      src/backend/executor/nodeSamplescan.c
  61. 4
      src/backend/executor/nodeSort.c
  62. 2
      src/backend/executor/nodeSubplan.c
  63. 4
      src/backend/executor/nodeValuesscan.c
  64. 4
      src/backend/jit/llvm/llvmjit_expr.c
  65. 3
      src/backend/lib/bloomfilter.c
  66. 2
      src/backend/libpq/be-secure-common.c
  67. 5
      src/backend/libpq/be-secure-openssl.c
  68. 1
      src/backend/nodes/bitmapset.c
  69. 6
      src/backend/nodes/read.c
  70. 17
      src/backend/optimizer/path/allpaths.c
  71. 3
      src/backend/optimizer/path/indxpath.c
  72. 14
      src/backend/optimizer/path/joinrels.c
  73. 8
      src/backend/optimizer/plan/planner.c
  74. 6
      src/backend/optimizer/prep/prepunion.c
  75. 6
      src/backend/optimizer/util/plancat.c
  76. 2
      src/backend/parser/analyze.c
  77. 30
      src/backend/parser/parse_utilcmd.c
  78. 20
      src/backend/partitioning/partprune.c
  79. 8
      src/backend/port/win32_shmem.c
  80. 39
      src/backend/replication/basebackup.c
  81. 6
      src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
  82. 2
      src/backend/replication/logical/logical.c
  83. 4
      src/backend/replication/logical/proto.c
  84. 52
      src/backend/replication/logical/reorderbuffer.c
  85. 22
      src/backend/replication/logical/worker.c
  86. 4
      src/backend/replication/pgoutput/pgoutput.c
  87. 4
      src/backend/replication/slotfuncs.c
  88. 4
      src/backend/replication/walreceiver.c
  89. 8
      src/backend/replication/walsender.c
  90. 6
      src/backend/storage/file/buffile.c
  91. 7
      src/backend/storage/ipc/shm_mq.c
  92. 10
      src/backend/tcop/utility.c
  93. 2
      src/backend/tsearch/to_tsany.c
  94. 14
      src/backend/utils/adt/amutils.c
  95. 2
      src/backend/utils/adt/formatting.c
  96. 10
      src/backend/utils/adt/geo_spgist.c
  97. 17
      src/backend/utils/adt/jsonb.c
  98. 30
      src/backend/utils/adt/jsonfuncs.c
  99. 38
      src/backend/utils/adt/tsquery.c
  100. 25
      src/backend/utils/cache/relcache.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -227,12 +227,12 @@ bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed)
* with heap relation locked first to prevent deadlocking). In hot
* standby mode this will raise an error when parentcheck is true.
*
* There is no need for the usual indcheckxmin usability horizon test here,
* even in the heapallindexed case, because index undergoing verification
* only needs to have entries for a new transaction snapshot. (If this is
* a parentcheck verification, there is no question about committed or
* recently dead heap tuples lacking index entries due to concurrent
* activity.)
* There is no need for the usual indcheckxmin usability horizon test
* here, even in the heapallindexed case, because index undergoing
* verification only needs to have entries for a new transaction snapshot.
* (If this is a parentcheck verification, there is no question about
* committed or recently dead heap tuples lacking index entries due to
* concurrent activity.)
*/
indrel = index_open(indrelid, lockmode);
@ -366,8 +366,8 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
* index fingerprinting should have reached all tuples returned by
* IndexBuildHeapScan().
*
* In readonly case, we also check for problems with missing downlinks.
* A second Bloom filter is used for this.
* In readonly case, we also check for problems with missing
* downlinks. A second Bloom filter is used for this.
*/
if (!state->readonly)
{
@ -378,13 +378,13 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
* READ COMMITTED mode. A new snapshot is guaranteed to have all
* the entries it requires in the index.
*
* We must defend against the possibility that an old xact snapshot
* was returned at higher isolation levels when that snapshot is
* not safe for index scans of the target index. This is possible
* when the snapshot sees tuples that are before the index's
* indcheckxmin horizon. Throwing an error here should be very
* rare. It doesn't seem worth using a secondary snapshot to avoid
* this.
* We must defend against the possibility that an old xact
* snapshot was returned at higher isolation levels when that
* snapshot is not safe for index scans of the target index. This
* is possible when the snapshot sees tuples that are before the
* index's indcheckxmin horizon. Throwing an error here should be
* very rare. It doesn't seem worth using a secondary snapshot to
* avoid this.
*/
if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(rel->rd_indextuple->t_data),
@ -396,13 +396,13 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
}
else
{
int64 total_pages;
int64 total_pages;
/*
* Extra readonly downlink check.
*
* In readonly case, we know that there cannot be a concurrent page
* split or a concurrent page deletion, which gives us the
* In readonly case, we know that there cannot be a concurrent
* page split or a concurrent page deletion, which gives us the
* opportunity to verify that every non-ignorable page had a
* downlink one level up. We must be tolerant of interrupted page
* splits and page deletions, though. This is taken care of in
@ -491,9 +491,9 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
}
/*
* Create our own scan for IndexBuildHeapScan(), rather than getting it
* to do so for us. This is required so that we can actually use the
* MVCC snapshot registered earlier in !readonly case.
* Create our own scan for IndexBuildHeapScan(), rather than getting
* it to do so for us. This is required so that we can actually use
* the MVCC snapshot registered earlier in !readonly case.
*
* Note that IndexBuildHeapScan() calls heap_endscan() for us.
*/
@ -607,10 +607,10 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
{
/*
* Since there cannot be a concurrent VACUUM operation in readonly
* mode, and since a page has no links within other pages (siblings
* and parent) once it is marked fully deleted, it should be
* impossible to land on a fully deleted page in readonly mode.
* See bt_downlink_check() for further details.
* mode, and since a page has no links within other pages
* (siblings and parent) once it is marked fully deleted, it
* should be impossible to land on a fully deleted page in
* readonly mode. See bt_downlink_check() for further details.
*
* The bt_downlink_check() P_ISDELETED() check is repeated here so
* that pages that are only reachable through sibling links get
@ -799,8 +799,8 @@ bt_target_page_check(BtreeCheckState *state)
P_ISLEAF(topaque) ? "leaf" : "internal", state->targetblock);
/*
* Check the number of attributes in high key. Note, rightmost page doesn't
* contain a high key, so nothing to check
* Check the number of attributes in high key. Note, rightmost page
* doesn't contain a high key, so nothing to check
*/
if (!P_RIGHTMOST(topaque) &&
!_bt_check_natts(state->rel, state->target, P_HIKEY))
@ -845,8 +845,8 @@ bt_target_page_check(BtreeCheckState *state)
/*
* lp_len should match the IndexTuple reported length exactly, since
* lp_len is completely redundant in indexes, and both sources of tuple
* length are MAXALIGN()'d. nbtree does not use lp_len all that
* lp_len is completely redundant in indexes, and both sources of
* tuple length are MAXALIGN()'d. nbtree does not use lp_len all that
* frequently, and is surprisingly tolerant of corrupt lp_len fields.
*/
if (tupsize != ItemIdGetLength(itemid))
@ -1441,13 +1441,13 @@ bt_downlink_check(BtreeCheckState *state, BlockNumber childblock,
static void
bt_downlink_missing_check(BtreeCheckState *state)
{
BTPageOpaque topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
ItemId itemid;
IndexTuple itup;
Page child;
BTPageOpaque copaque;
uint32 level;
BlockNumber childblk;
BTPageOpaque topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
ItemId itemid;
IndexTuple itup;
Page child;
BTPageOpaque copaque;
uint32 level;
BlockNumber childblk;
Assert(state->heapallindexed && state->readonly);
Assert(!P_IGNORE(topaque));
@ -1462,14 +1462,15 @@ bt_downlink_missing_check(BtreeCheckState *state)
* page split in passing, when it notices that the left sibling page is
* P_INCOMPLETE_SPLIT().
*
* In general, VACUUM is not prepared for there to be no downlink to a page
* that it deletes. This is the main reason why the lack of a downlink can
* be reported as corruption here. It's not obvious that an invalid
* missing downlink can result in wrong answers to queries, though, since
* index scans that land on the child may end up consistently moving right.
* The handling of concurrent page splits (and page deletions) within
* _bt_moveright() cannot distinguish inconsistencies that last for a
* moment from inconsistencies that are permanent and irrecoverable.
* In general, VACUUM is not prepared for there to be no downlink to a
* page that it deletes. This is the main reason why the lack of a
* downlink can be reported as corruption here. It's not obvious that an
* invalid missing downlink can result in wrong answers to queries,
* though, since index scans that land on the child may end up
* consistently moving right. The handling of concurrent page splits (and
* page deletions) within _bt_moveright() cannot distinguish
* inconsistencies that last for a moment from inconsistencies that are
* permanent and irrecoverable.
*
* VACUUM isn't even prepared to delete pages that have no downlink due to
* an incomplete page split, but it can detect and reason about that case
@ -1498,8 +1499,8 @@ bt_downlink_missing_check(BtreeCheckState *state)
/*
* Target is probably the "top parent" of a multi-level page deletion.
* We'll need to descend the subtree to make sure that descendant pages are
* consistent with that, though.
* We'll need to descend the subtree to make sure that descendant pages
* are consistent with that, though.
*
* If the target page (which must be non-ignorable) is a leaf page, then
* clearly it can't be the top parent. The lack of a downlink is probably
@ -1562,14 +1563,14 @@ bt_downlink_missing_check(BtreeCheckState *state)
* bt_downlink_check() does not visit pages reachable through negative
* infinity items. Besides, bt_downlink_check() is unwilling to descend
* multiple levels. (The similar bt_downlink_check() P_ISDELETED() check
* within bt_check_level_from_leftmost() won't reach the page either, since
* the leaf's live siblings should have their sibling links updated to
* bypass the deletion target page when it is marked fully dead.)
* within bt_check_level_from_leftmost() won't reach the page either,
* since the leaf's live siblings should have their sibling links updated
* to bypass the deletion target page when it is marked fully dead.)
*
* If this error is raised, it might be due to a previous multi-level page
* deletion that failed to realize that it wasn't yet safe to mark the leaf
* page as fully dead. A "dangling downlink" will still remain when this
* happens. The fact that the dangling downlink's page (the leaf's
* deletion that failed to realize that it wasn't yet safe to mark the
* leaf page as fully dead. A "dangling downlink" will still remain when
* this happens. The fact that the dangling downlink's page (the leaf's
* parent/ancestor page) lacked a downlink is incidental.
*/
if (P_ISDELETED(copaque))
@ -1583,14 +1584,14 @@ bt_downlink_missing_check(BtreeCheckState *state)
(uint32) state->targetlsn)));
/*
* Iff leaf page is half-dead, its high key top parent link should point to
* what VACUUM considered to be the top parent page at the instant it was
* interrupted. Provided the high key link actually points to the target
* page, the missing downlink we detected is consistent with there having
* been an interrupted multi-level page deletion. This means that the
* subtree with the target page at its root (a page deletion chain) is in a
* consistent state, enabling VACUUM to resume deleting the entire chain
* the next time it encounters the half-dead leaf page.
* Iff leaf page is half-dead, its high key top parent link should point
* to what VACUUM considered to be the top parent page at the instant it
* was interrupted. Provided the high key link actually points to the
* target page, the missing downlink we detected is consistent with there
* having been an interrupted multi-level page deletion. This means that
* the subtree with the target page at its root (a page deletion chain) is
* in a consistent state, enabling VACUUM to resume deleting the entire
* chain the next time it encounters the half-dead leaf page.
*/
if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque))
{
@ -1681,16 +1682,17 @@ bt_tuple_present_callback(Relation index, HeapTuple htup, Datum *values,
* are assumed immutable. While the LP_DEAD bit is mutable in leaf pages,
* that's ItemId metadata, which was not fingerprinted. (There will often
* be some dead-to-everyone IndexTuples fingerprinted by the Bloom filter,
* but we only try to detect the absence of needed tuples, so that's okay.)
* but we only try to detect the absence of needed tuples, so that's
* okay.)
*
* Note that we rely on deterministic index_form_tuple() TOAST compression.
* If index_form_tuple() was ever enhanced to compress datums out-of-line,
* or otherwise varied when or how compression was applied, our assumption
* would break, leading to false positive reports of corruption. It's also
* possible that non-pivot tuples could in the future have alternative
* equivalent representations (e.g. by using the INDEX_ALT_TID_MASK bit).
* For now, we don't decompress/normalize toasted values as part of
* fingerprinting.
* Note that we rely on deterministic index_form_tuple() TOAST
* compression. If index_form_tuple() was ever enhanced to compress datums
* out-of-line, or otherwise varied when or how compression was applied,
* our assumption would break, leading to false positive reports of
* corruption. It's also possible that non-pivot tuples could in the
* future have alternative equivalent representations (e.g. by using the
* INDEX_ALT_TID_MASK bit). For now, we don't decompress/normalize toasted
* values as part of fingerprinting.
*/
itup = index_form_tuple(RelationGetDescr(index), values, isnull);
itup->t_tid = htup->t_self;
@ -1905,19 +1907,19 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
* Sanity checks for number of items on page.
*
* As noted at the beginning of _bt_binsrch(), an internal page must have
* children, since there must always be a negative infinity downlink (there
* may also be a highkey). In the case of non-rightmost leaf pages, there
* must be at least a highkey.
* children, since there must always be a negative infinity downlink
* (there may also be a highkey). In the case of non-rightmost leaf
* pages, there must be at least a highkey.
*
* This is correct when pages are half-dead, since internal pages are never
* half-dead, and leaf pages must have a high key when half-dead (the
* rightmost page can never be deleted). It's also correct with fully
* deleted pages: _bt_unlink_halfdead_page() doesn't change anything about
* the target page other than setting the page as fully dead, and setting
* its xact field. In particular, it doesn't change the sibling links in
* the deletion target itself, since they're required when index scans land
* on the deletion target, and then need to move right (or need to move
* left, in the case of backward index scans).
* This is correct when pages are half-dead, since internal pages are
* never half-dead, and leaf pages must have a high key when half-dead
* (the rightmost page can never be deleted). It's also correct with
* fully deleted pages: _bt_unlink_halfdead_page() doesn't change anything
* about the target page other than setting the page as fully dead, and
* setting its xact field. In particular, it doesn't change the sibling
* links in the deletion target itself, since they're required when index
* scans land on the deletion target, and then need to move right (or need
* to move left, in the case of backward index scans).
*/
maxoffset = PageGetMaxOffsetNumber(page);
if (maxoffset > MaxIndexTuplesPerPage)

@ -483,8 +483,12 @@ GIN_SUPPORT(anyenum, false, leftmostvalue_enum, gin_enum_cmp)
static Datum
leftmostvalue_uuid(void)
{
/* palloc0 will create the UUID with all zeroes: "00000000-0000-0000-0000-000000000000" */
pg_uuid_t *retval = (pg_uuid_t *) palloc0(sizeof(pg_uuid_t));
/*
* palloc0 will create the UUID with all zeroes:
* "00000000-0000-0000-0000-000000000000"
*/
pg_uuid_t *retval = (pg_uuid_t *) palloc0(sizeof(pg_uuid_t));
return UUIDPGetDatum(retval);
}
@ -493,7 +497,8 @@ GIN_SUPPORT(uuid, false, leftmostvalue_uuid, uuid_cmp)
static Datum
leftmostvalue_name(void)
{
NameData* result = (NameData *) palloc0(NAMEDATALEN);
NameData *result = (NameData *) palloc0(NAMEDATALEN);
return NameGetDatum(result);
}

@ -1361,9 +1361,10 @@ g_cube_distance(PG_FUNCTION_ARGS)
if (coord <= 2 * DIM(cube))
{
/* dimension index */
int index = (coord - 1) / 2;
int index = (coord - 1) / 2;
/* whether this is upper bound (lower bound otherwise) */
bool upper = ((coord - 1) % 2 == 1);
bool upper = ((coord - 1) % 2 == 1);
if (IS_POINT(cube))
{
@ -1596,9 +1597,10 @@ cube_coord_llur(PG_FUNCTION_ARGS)
if (coord <= 2 * DIM(cube))
{
/* dimension index */
int index = (coord - 1) / 2;
int index = (coord - 1) / 2;
/* whether this is upper bound (lower bound otherwise) */
bool upper = ((coord - 1) % 2 == 1);
bool upper = ((coord - 1) % 2 == 1);
if (IS_POINT(cube))
{
@ -1615,8 +1617,8 @@ cube_coord_llur(PG_FUNCTION_ARGS)
else
{
/*
* Return zero if coordinate is out of bound. That reproduces logic of
* how cubes with low dimension number are expanded during GiST
* Return zero if coordinate is out of bound. That reproduces logic
* of how cubes with low dimension number are expanded during GiST
* indexing.
*/
result = 0.0;

@ -18,7 +18,7 @@ static SV *Jsonb_to_SV(JsonbContainer *jsonb);
static JsonbValue *SV_to_JsonbValue(SV *obj, JsonbParseState **ps, bool is_elem);
static SV *
static SV *
JsonbValue_to_SV(JsonbValue *jbv)
{
dTHX;
@ -33,6 +33,7 @@ JsonbValue_to_SV(JsonbValue *jbv)
char *str = DatumGetCString(DirectFunctionCall1(numeric_out,
NumericGetDatum(jbv->val.numeric)));
SV *result = newSVnv(SvNV(cstr2sv(str)));
pfree(str);
return result;
}
@ -42,6 +43,7 @@ JsonbValue_to_SV(JsonbValue *jbv)
char *str = pnstrdup(jbv->val.string.val,
jbv->val.string.len);
SV *result = cstr2sv(str);
pfree(str);
return result;
}

@ -25,7 +25,7 @@ static PyObject *decimal_constructor;
static PyObject *PLyObject_FromJsonbContainer(JsonbContainer *jsonb);
static JsonbValue *PLyObject_ToJsonbValue(PyObject *obj,
JsonbParseState **jsonb_state, bool is_elem);
JsonbParseState **jsonb_state, bool is_elem);
#if PY_MAJOR_VERSION >= 3
typedef PyObject *(*PLyUnicode_FromStringAndSize_t)
@ -373,10 +373,11 @@ PLyObject_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state, bool is_ele
out->type = jbvNull;
else if (PyString_Check(obj) || PyUnicode_Check(obj))
PLyString_ToJsonbValue(obj, out);
/*
* PyNumber_Check() returns true for booleans, so boolean check should come
* first.
*/
/*
* PyNumber_Check() returns true for booleans, so boolean check should
* come first.
*/
else if (PyBool_Check(obj))
{
out = palloc(sizeof(JsonbValue));

@ -292,7 +292,11 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
case SimilarityStrategyNumber:
case WordSimilarityStrategyNumber:
case StrictWordSimilarityStrategyNumber:
/* Similarity search is exact. (Strict) word similarity search is inexact */
/*
* Similarity search is exact. (Strict) word similarity search is
* inexact
*/
*recheck = (strategy != SimilarityStrategyNumber);
nlimit = index_strategy_get_limit(strategy);

@ -48,14 +48,14 @@ typedef struct
/* Trigram bound type */
typedef uint8 TrgmBound;
#define TRGM_BOUND_LEFT 0x01 /* trigram is left bound of word */
#define TRGM_BOUND_RIGHT 0x02 /* trigram is right bound of word */
#define TRGM_BOUND_LEFT 0x01 /* trigram is left bound of word */
#define TRGM_BOUND_RIGHT 0x02 /* trigram is right bound of word */
/* Word similarity flags */
#define WORD_SIMILARITY_CHECK_ONLY 0x01 /* only check existence of similar
* search pattern in text */
#define WORD_SIMILARITY_STRICT 0x02 /* force bounds of extent to match
* word bounds */
#define WORD_SIMILARITY_CHECK_ONLY 0x01 /* only check existence of similar
* search pattern in text */
#define WORD_SIMILARITY_STRICT 0x02 /* force bounds of extent to match
* word bounds */
/*
* Module load callback
@ -144,7 +144,7 @@ index_strategy_get_limit(StrategyNumber strategy)
break;
}
return 0.0; /* keep compiler quiet */
return 0.0; /* keep compiler quiet */
}
/*
@ -496,13 +496,13 @@ iterate_word_similarity(int *trg2indexes,
/* Select appropriate threshold */
threshold = (flags & WORD_SIMILARITY_STRICT) ?
strict_word_similarity_threshold :
word_similarity_threshold;
strict_word_similarity_threshold :
word_similarity_threshold;
/*
* Consider first trigram as initial lower bount for strict word similarity,
* or initialize it later with first trigram present for plain word
* similarity.
* Consider first trigram as initial lower bount for strict word
* similarity, or initialize it later with first trigram present for plain
* word similarity.
*/
lower = (flags & WORD_SIMILARITY_STRICT) ? 0 : -1;
@ -533,7 +533,7 @@ iterate_word_similarity(int *trg2indexes,
* plain word similarity
*/
if ((flags & WORD_SIMILARITY_STRICT) ? (bounds[i] & TRGM_BOUND_RIGHT)
: found[trgindex])
: found[trgindex])
{
int prev_lower,
tmp_ulen2,
@ -597,8 +597,8 @@ iterate_word_similarity(int *trg2indexes,
smlr_max = Max(smlr_max, smlr_cur);
/*
* if we only check that word similarity is greater than
* threshold we do not need to calculate a maximum similarity.
* if we only check that word similarity is greater than threshold
* we do not need to calculate a maximum similarity.
*/
if ((flags & WORD_SIMILARITY_CHECK_ONLY) && smlr_max >= threshold)
break;
@ -653,7 +653,7 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
ulen1;
int *trg2indexes;
float4 result;
TrgmBound *bounds;
TrgmBound *bounds;
protect_out_of_mem(slen1 + slen2);

@ -4918,8 +4918,8 @@ add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel,
&rows, &width, &startup_cost, &total_cost);
/*
* The EPQ path must be at least as well sorted as the path itself,
* in case it gets used as input to a mergejoin.
* The EPQ path must be at least as well sorted as the path itself, in
* case it gets used as input to a mergejoin.
*/
sorted_epq_path = epq_path;
if (sorted_epq_path != NULL &&

@ -138,7 +138,7 @@ triggered_change_notification(PG_FUNCTION_ARGS)
/* we're only interested if it is the primary key and valid */
if (index->indisprimary && IndexIsValid(index))
{
int indnkeyatts = index->indnkeyatts;
int indnkeyatts = index->indnkeyatts;
if (indnkeyatts > 0)
{

@ -53,9 +53,9 @@ static void pg_decode_change(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, Relation rel,
ReorderBufferChange *change);
static void pg_decode_truncate(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn,
int nrelations, Relation relations[],
ReorderBufferChange *change);
ReorderBufferTXN *txn,
int nrelations, Relation relations[],
ReorderBufferChange *change);
static bool pg_decode_filter(LogicalDecodingContext *ctx,
RepOriginId origin_id);
static void pg_decode_message(LogicalDecodingContext *ctx,

@ -189,7 +189,7 @@ brininsert(Relation idxRel, Datum *values, bool *nulls,
NULL, BUFFER_LOCK_SHARE, NULL);
if (!lastPageTuple)
{
bool recorded;
bool recorded;
recorded = AutoVacuumRequestWork(AVW_BRINSummarizeRange,
RelationGetRelid(idxRel),

@ -1685,8 +1685,8 @@ slot_getsomeattrs(TupleTableSlot *slot, int attnum)
attno = slot->tts_nvalid;
/*
* If tuple doesn't have all the atts indicated by attnum, read the
* rest as NULLs or missing values
* If tuple doesn't have all the atts indicated by attnum, read the rest
* as NULLs or missing values
*/
if (attno < attnum)
slot_getmissingattrs(slot, attno, attnum);

@ -489,8 +489,8 @@ index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source,
Assert(IndexTupleSize(truncated) <= IndexTupleSize(source));
/*
* Cannot leak memory here, TupleDescCopy() doesn't allocate any
* inner structure, so, plain pfree() should clean all allocated memory
* Cannot leak memory here, TupleDescCopy() doesn't allocate any inner
* structure, so, plain pfree() should clean all allocated memory
*/
pfree(truncdesc);

@ -1495,9 +1495,9 @@ index_reloptions(amoptions_function amoptions, Datum reloptions, bool validate)
bytea *
index_generic_reloptions(Datum reloptions, bool validate)
{
int numoptions;
int numoptions;
GenericIndexOpts *idxopts;
relopt_value *options;
relopt_value *options;
static const relopt_parse_elt tab[] = {
{"recheck_on_update", RELOPT_TYPE_BOOL, offsetof(GenericIndexOpts, recheck_on_update)}
};
@ -1512,12 +1512,12 @@ index_generic_reloptions(Datum reloptions, bool validate)
idxopts = allocateReloptStruct(sizeof(GenericIndexOpts), options, numoptions);
fillRelOptions((void *)idxopts, sizeof(GenericIndexOpts), options, numoptions,
fillRelOptions((void *) idxopts, sizeof(GenericIndexOpts), options, numoptions,
validate, tab, lengthof(tab));
pfree(options);
return (bytea*) idxopts;
return (bytea *) idxopts;
}
/*

@ -521,12 +521,12 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
{
PredicateLockPageSplit(btree->index,
BufferGetBlockNumber(stack->buffer),
BufferGetBlockNumber(lbuffer));
BufferGetBlockNumber(stack->buffer),
BufferGetBlockNumber(lbuffer));
PredicateLockPageSplit(btree->index,
BufferGetBlockNumber(stack->buffer),
BufferGetBlockNumber(rbuffer));
BufferGetBlockNumber(stack->buffer),
BufferGetBlockNumber(rbuffer));
}
}
@ -543,8 +543,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
{
PredicateLockPageSplit(btree->index,
BufferGetBlockNumber(stack->buffer),
BufferGetBlockNumber(rbuffer));
BufferGetBlockNumber(stack->buffer),
BufferGetBlockNumber(rbuffer));
}
}

@ -1812,8 +1812,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
blkno = BufferGetBlockNumber(buffer);
/*
* Copy a predicate lock from entry tree leaf (containing posting list)
* to posting tree.
* Copy a predicate lock from entry tree leaf (containing posting list) to
* posting tree.
*/
PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);

@ -42,11 +42,11 @@ static void
GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
{
/*
* When fast update is on then no need in locking pages, because we
* anyway need to lock the whole index.
* When fast update is on then no need in locking pages, because we anyway
* need to lock the whole index.
*/
if (!GinGetUseFastUpdate(index))
PredicateLockPage(index, blkno, snapshot);
PredicateLockPage(index, blkno, snapshot);
}
/*
@ -426,8 +426,8 @@ restartScanEntry:
entry->buffer = stack->buffer;
/*
* Predicate lock visited posting tree page, following pages
* will be locked by moveRightIfItNeeded or entryLoadMoreItems
* Predicate lock visited posting tree page, following pages will
* be locked by moveRightIfItNeeded or entryLoadMoreItems
*/
GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
@ -1779,9 +1779,9 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
UnlockReleaseBuffer(metabuffer);
/*
* If fast update is enabled, we acquire a predicate lock on the entire
* relation as fast update postpones the insertion of tuples into index
* structure due to which we can't detect rw conflicts.
* If fast update is enabled, we acquire a predicate lock on the
* entire relation as fast update postpones the insertion of tuples
* into index structure due to which we can't detect rw conflicts.
*/
if (GinGetUseFastUpdate(scan->indexRelation))
PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);

@ -519,12 +519,12 @@ gininsert(Relation index, Datum *values, bool *isnull,
/*
* With fastupdate on each scan and each insert begin with access to
* pending list, so it effectively lock entire index. In this case
* we aquire predicate lock and check for conflicts over index relation,
* pending list, so it effectively lock entire index. In this case we
* aquire predicate lock and check for conflicts over index relation,
* and hope that it will reduce locking overhead.
*
* Do not use GinCheckForSerializableConflictIn() here, because
* it will do nothing (it does actual work only with fastupdate off).
* Do not use GinCheckForSerializableConflictIn() here, because it
* will do nothing (it does actual work only with fastupdate off).
* Check for conflicts for entire index.
*/
CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
@ -539,7 +539,7 @@ gininsert(Relation index, Datum *values, bool *isnull,
}
else
{
GinStatsData stats;
GinStatsData stats;
/*
* Fastupdate is off but if pending list isn't empty then we need to

@ -341,8 +341,8 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate,
ptr->page = BufferGetPage(ptr->buffer);
ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
PredicateLockPageSplit(rel,
BufferGetBlockNumber(buffer),
BufferGetBlockNumber(ptr->buffer));
BufferGetBlockNumber(buffer),
BufferGetBlockNumber(ptr->buffer));
}
/*
@ -1220,8 +1220,8 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
bool is_split;
/*
* Check for any rw conflicts (in serialisation isolation level)
* just before we intend to modify the page
* Check for any rw conflicts (in serialisation isolation level) just
* before we intend to modify the page
*/
CheckForSerializableConflictIn(state->r, NULL, stack->buffer);

@ -3460,7 +3460,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
result = heap_delete(relation, tid,
GetCurrentCommandId(true), InvalidSnapshot,
true /* wait for commit */ ,
&hufd, false /* changingPart */);
&hufd, false /* changingPart */ );
switch (result)
{
case HeapTupleSelfUpdated:
@ -4483,29 +4483,31 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
* functional index. Compare the new and old values of the indexed
* expression to see if we are able to use a HOT update or not.
*/
static bool ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple newtup)
static bool
ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple newtup)
{
ListCell *l;
List *indexoidlist = RelationGetIndexList(relation);
EState *estate = CreateExecutorState();
ExprContext *econtext = GetPerTupleExprContext(estate);
ListCell *l;
List *indexoidlist = RelationGetIndexList(relation);
EState *estate = CreateExecutorState();
ExprContext *econtext = GetPerTupleExprContext(estate);
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(relation));
bool equals = true;
Datum old_values[INDEX_MAX_KEYS];
bool old_isnull[INDEX_MAX_KEYS];
Datum new_values[INDEX_MAX_KEYS];
bool new_isnull[INDEX_MAX_KEYS];
int indexno = 0;
bool equals = true;
Datum old_values[INDEX_MAX_KEYS];
bool old_isnull[INDEX_MAX_KEYS];
Datum new_values[INDEX_MAX_KEYS];
bool new_isnull[INDEX_MAX_KEYS];
int indexno = 0;
econtext->ecxt_scantuple = slot;
foreach(l, indexoidlist)
{
if (bms_is_member(indexno, relation->rd_projidx))
{
Oid indexOid = lfirst_oid(l);
Relation indexDesc = index_open(indexOid, AccessShareLock);
Oid indexOid = lfirst_oid(l);
Relation indexDesc = index_open(indexOid, AccessShareLock);
IndexInfo *indexInfo = BuildIndexInfo(indexDesc);
int i;
int i;
ResetExprContext(econtext);
ExecStoreTuple(oldtup, slot, InvalidBuffer, false);
@ -4532,6 +4534,7 @@ static bool ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple
else if (!old_isnull[i])
{
Form_pg_attribute att = TupleDescAttr(RelationGetDescr(indexDesc), i);
if (!datumIsEqual(old_values[i], new_values[i], att->attbyval, att->attlen))
{
equals = false;
@ -6533,8 +6536,8 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
/*
* This old multi cannot possibly have members still running, but
* verify just in case. If it was a locker only, it can be removed
* without any further consideration; but if it contained an update, we
* might need to preserve it.
* without any further consideration; but if it contained an update,
* we might need to preserve it.
*/
if (MultiXactIdIsRunning(multi,
HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
@ -6681,8 +6684,8 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
else
{
/*
* Not in progress, not committed -- must be aborted or crashed;
* we can ignore it.
* Not in progress, not committed -- must be aborted or
* crashed; we can ignore it.
*/
}
@ -9275,6 +9278,7 @@ heap_redo(XLogReaderState *record)
heap_xlog_update(record, false);
break;
case XLOG_HEAP_TRUNCATE:
/*
* TRUNCATE is a no-op because the actions are already logged as
* SMGR WAL records. TRUNCATE WAL record only exists for logical

@ -132,31 +132,31 @@ _bt_doinsert(Relation rel, IndexTuple itup,
* rightmost leaf, has enough free space to accommodate a new entry and
* the insertion key is strictly greater than the first key in this page,
* then we can safely conclude that the new key will be inserted in the
* cached block. So we simply search within the cached block and insert the
* key at the appropriate location. We call it a fastpath.
* cached block. So we simply search within the cached block and insert
* the key at the appropriate location. We call it a fastpath.
*
* Testing has revealed, though, that the fastpath can result in increased
* contention on the exclusive-lock on the rightmost leaf page. So we
* conditionally check if the lock is available. If it's not available then
* we simply abandon the fastpath and take the regular path. This makes
* sense because unavailability of the lock also signals that some other
* backend might be concurrently inserting into the page, thus reducing our
* chances to finding an insertion place in this page.
* conditionally check if the lock is available. If it's not available
* then we simply abandon the fastpath and take the regular path. This
* makes sense because unavailability of the lock also signals that some
* other backend might be concurrently inserting into the page, thus
* reducing our chances to finding an insertion place in this page.
*/
top:
fastpath = false;
offset = InvalidOffsetNumber;
if (RelationGetTargetBlock(rel) != InvalidBlockNumber)
{
Size itemsz;
Page page;
BTPageOpaque lpageop;
Size itemsz;
Page page;
BTPageOpaque lpageop;
/*
* Conditionally acquire exclusive lock on the buffer before doing any
* checks. If we don't get the lock, we simply follow slowpath. If we
* do get the lock, this ensures that the index state cannot change, as
* far as the rightmost part of the index is concerned.
* do get the lock, this ensures that the index state cannot change,
* as far as the rightmost part of the index is concerned.
*/
buf = ReadBuffer(rel, RelationGetTargetBlock(rel));
@ -173,8 +173,8 @@ top:
/*
* Check if the page is still the rightmost leaf page, has enough
* free space to accommodate the new tuple, and the insertion
* scan key is strictly greater than the first key on the page.
* free space to accommodate the new tuple, and the insertion scan
* key is strictly greater than the first key on the page.
*/
if (P_ISLEAF(lpageop) && P_RIGHTMOST(lpageop) &&
!P_IGNORE(lpageop) &&
@ -207,8 +207,8 @@ top:
ReleaseBuffer(buf);
/*
* If someone's holding a lock, it's likely to change anyway,
* so don't try again until we get an updated rightmost leaf.
* If someone's holding a lock, it's likely to change anyway, so
* don't try again until we get an updated rightmost leaf.
*/
RelationSetTargetBlock(rel, InvalidBlockNumber);
}
@ -882,22 +882,22 @@ _bt_insertonpg(Relation rel,
Buffer rbuf;
/*
* If we're here then a pagesplit is needed. We should never reach here
* if we're using the fastpath since we should have checked for all the
* required conditions, including the fact that this page has enough
* freespace. Note that this routine can in theory deal with the
* situation where a NULL stack pointer is passed (that's what would
* happen if the fastpath is taken), like it does during crash
* If we're here then a pagesplit is needed. We should never reach
* here if we're using the fastpath since we should have checked for
* all the required conditions, including the fact that this page has
* enough freespace. Note that this routine can in theory deal with
* the situation where a NULL stack pointer is passed (that's what
* would happen if the fastpath is taken), like it does during crash
* recovery. But that path is much slower, defeating the very purpose
* of the optimization. The following assertion should protect us from
* any future code changes that invalidate those assumptions.
* of the optimization. The following assertion should protect us
* from any future code changes that invalidate those assumptions.
*
* Note that whenever we fail to take the fastpath, we clear the
* cached block. Checking for a valid cached block at this point is
* enough to decide whether we're in a fastpath or not.
*/
Assert(!(P_ISLEAF(lpageop) &&
BlockNumberIsValid(RelationGetTargetBlock(rel))));
BlockNumberIsValid(RelationGetTargetBlock(rel))));
/* Choose the split point */
firstright = _bt_findsplitloc(rel, page,
@ -936,7 +936,7 @@ _bt_insertonpg(Relation rel,
BTMetaPageData *metad = NULL;
OffsetNumber itup_off;
BlockNumber itup_blkno;
BlockNumber cachedBlock = InvalidBlockNumber;
BlockNumber cachedBlock = InvalidBlockNumber;
itup_off = newitemoff;
itup_blkno = BufferGetBlockNumber(buf);
@ -1093,7 +1093,8 @@ _bt_insertonpg(Relation rel,
* We do this after dropping locks on all buffers. So the information
* about whether the insertion block is still the rightmost block or
* not may have changed in between. But we will deal with that during
* next insert operation. No special care is required while setting it.
* next insert operation. No special care is required while setting
* it.
*/
if (BlockNumberIsValid(cachedBlock) &&
_bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL)

@ -155,11 +155,11 @@ void
_bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact,
float8 numHeapTuples)
{
Buffer metabuf;
Page metapg;
Buffer metabuf;
Page metapg;
BTMetaPageData *metad;
bool needsRewrite = false;
XLogRecPtr recptr;
bool needsRewrite = false;
XLogRecPtr recptr;
/* read the metapage and check if it needs rewrite */
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);

@ -785,10 +785,10 @@ _bt_parallel_advance_array_keys(IndexScanDesc scan)
static bool
_bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
{
Buffer metabuf;
Page metapg;
Buffer metabuf;
Page metapg;
BTMetaPageData *metad;
bool result = false;
bool result = false;
metabuf = _bt_getbuf(info->index, BTREE_METAPAGE, BT_READ);
metapg = BufferGetPage(metabuf);
@ -814,8 +814,8 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
}
else
{
StdRdOptions *relopts;
float8 cleanup_scale_factor;
StdRdOptions *relopts;
float8 cleanup_scale_factor;
/*
* If table receives large enough amount of insertions and no cleanup
@ -825,14 +825,14 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
*/
relopts = (StdRdOptions *) info->index->rd_options;
cleanup_scale_factor = (relopts &&
relopts->vacuum_cleanup_index_scale_factor >= 0)
? relopts->vacuum_cleanup_index_scale_factor
: vacuum_cleanup_index_scale_factor;
relopts->vacuum_cleanup_index_scale_factor >= 0)
? relopts->vacuum_cleanup_index_scale_factor
: vacuum_cleanup_index_scale_factor;
if (cleanup_scale_factor < 0 ||
metad->btm_last_cleanup_num_heap_tuples < 0 ||
info->num_heap_tuples > (1.0 + cleanup_scale_factor) *
metad->btm_last_cleanup_num_heap_tuples)
metad->btm_last_cleanup_num_heap_tuples)
result = true;
}
@ -862,7 +862,7 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/* The ENSURE stuff ensures we clean up shared memory on failure */
PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel));
{
TransactionId oldestBtpoXact;
TransactionId oldestBtpoXact;
cycleid = _bt_start_vacuum(rel);
@ -907,7 +907,7 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
*/
if (stats == NULL)
{
TransactionId oldestBtpoXact;
TransactionId oldestBtpoXact;
/* Check if we need a cleanup */
if (!_bt_vacuum_needs_cleanup(info))

@ -897,10 +897,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
/*
* Truncate any non-key attributes from high key on leaf level
* (i.e. truncate on leaf level if we're building an INCLUDE
* index). This is only done at the leaf level because
* downlinks in internal pages are either negative infinity
* items, or get their contents from copying from one level
* down. See also: _bt_split().
* index). This is only done at the leaf level because downlinks
* in internal pages are either negative infinity items, or get
* their contents from copying from one level down. See also:
* _bt_split().
*
* Since the truncated tuple is probably smaller than the
* original, it cannot just be copied in place (besides, we want
@ -908,11 +908,11 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
* original high key, and add our own truncated high key at the
* same offset.
*
* Note that the page layout won't be changed very much. oitup
* is already located at the physical beginning of tuple space,
* so we only shift the line pointer array back and forth, and
* overwrite the latter portion of the space occupied by the
* original tuple. This is fairly cheap.
* Note that the page layout won't be changed very much. oitup is
* already located at the physical beginning of tuple space, so we
* only shift the line pointer array back and forth, and overwrite
* the latter portion of the space occupied by the original tuple.
* This is fairly cheap.
*/
truncated = _bt_nonkey_truncate(wstate->index, oitup);
truncsz = IndexTupleSize(truncated);
@ -978,7 +978,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
*/
if (last_off == P_HIKEY)
{
BTPageOpaque npageop;
BTPageOpaque npageop;
Assert(state->btps_minkey == NULL);

@ -2101,12 +2101,12 @@ btproperty(Oid index_oid, int attno,
IndexTuple
_bt_nonkey_truncate(Relation rel, IndexTuple itup)
{
int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel);
IndexTuple truncated;
int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel);
IndexTuple truncated;
/*
* We should only ever truncate leaf index tuples, which must have both key
* and non-key attributes. It's never okay to truncate a second time.
* We should only ever truncate leaf index tuples, which must have both
* key and non-key attributes. It's never okay to truncate a second time.
*/
Assert(BTreeTupleGetNAtts(itup, rel) ==
IndexRelationGetNumberOfAttributes(rel));
@ -2133,10 +2133,10 @@ _bt_nonkey_truncate(Relation rel, IndexTuple itup)
bool
_bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
{
int16 natts = IndexRelationGetNumberOfAttributes(rel);
int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
IndexTuple itup;
int16 natts = IndexRelationGetNumberOfAttributes(rel);
int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
IndexTuple itup;
/*
* We cannot reliably test a deleted or half-deleted page, since they have
@ -2147,6 +2147,7 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
Assert(offnum >= FirstOffsetNumber &&
offnum <= PageGetMaxOffsetNumber(page));
/*
* Mask allocated for number of keys in index tuple must be able to fit
* maximum possible number of index attributes
@ -2178,29 +2179,29 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
return BTreeTupleGetNAtts(itup, rel) == nkeyatts;
}
}
else /* !P_ISLEAF(opaque) */
else /* !P_ISLEAF(opaque) */
{
if (offnum == P_FIRSTDATAKEY(opaque))
{
/*
* The first tuple on any internal page (possibly the first after
* its high key) is its negative infinity tuple. Negative infinity
* tuples are always truncated to zero attributes. They are a
* particular kind of pivot tuple.
* its high key) is its negative infinity tuple. Negative
* infinity tuples are always truncated to zero attributes. They
* are a particular kind of pivot tuple.
*
* The number of attributes won't be explicitly represented if the
* negative infinity tuple was generated during a page split that
* occurred with a version of Postgres before v11. There must be a
* problem when there is an explicit representation that is
* occurred with a version of Postgres before v11. There must be
* a problem when there is an explicit representation that is
* non-zero, or when there is no explicit representation and the
* tuple is evidently not a pre-pg_upgrade tuple.
*
* Prior to v11, downlinks always had P_HIKEY as their offset. Use
* that to decide if the tuple is a pre-v11 tuple.
* Prior to v11, downlinks always had P_HIKEY as their offset.
* Use that to decide if the tuple is a pre-v11 tuple.
*/
return BTreeTupleGetNAtts(itup, rel) == 0 ||
((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&
ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY);
((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&
ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY);
}
else
{

@ -1908,11 +1908,12 @@ spgdoinsert(Relation index, SpGistState *state,
/*
* Prepare the leaf datum to insert.
*
* If an optional "compress" method is provided, then call it to form
* the leaf datum from the input datum. Otherwise store the input datum as
* is. Since we don't use index_form_tuple in this AM, we have to make sure
* value to be inserted is not toasted; FormIndexDatum doesn't guarantee
* that. But we assume the "compress" method to return an untoasted value.
* If an optional "compress" method is provided, then call it to form the
* leaf datum from the input datum. Otherwise store the input datum as
* is. Since we don't use index_form_tuple in this AM, we have to make
* sure value to be inserted is not toasted; FormIndexDatum doesn't
* guarantee that. But we assume the "compress" method to return an
* untoasted value.
*/
if (!isnull)
{

@ -53,7 +53,7 @@ spgvalidate(Oid opclassoid)
OpFamilyOpFuncGroup *opclassgroup;
int i;
ListCell *lc;
spgConfigIn configIn;
spgConfigIn configIn;
spgConfigOut configOut;
Oid configOutLefttype = InvalidOid;
Oid configOutRighttype = InvalidOid;
@ -119,9 +119,9 @@ spgvalidate(Oid opclassoid)
configOutRighttype = procform->amprocrighttype;
/*
* When leaf and attribute types are the same, compress function
* is not required and we set corresponding bit in functionset
* for later group consistency check.
* When leaf and attribute types are the same, compress
* function is not required and we set corresponding bit in
* functionset for later group consistency check.
*/
if (!OidIsValid(configOut.leafType) ||
configOut.leafType == configIn.attType)

@ -913,7 +913,7 @@ typedef struct TwoPhaseFileHeader
bool initfileinval; /* does relcache init file need invalidation? */
uint16 gidlen; /* length of the GID - GID follows the header */
XLogRecPtr origin_lsn; /* lsn of this record at origin node */
TimestampTz origin_timestamp; /* time of prepare at origin node */
TimestampTz origin_timestamp; /* time of prepare at origin node */
} TwoPhaseFileHeader;
/*
@ -1065,7 +1065,7 @@ EndPrepare(GlobalTransaction gxact)
{
TwoPhaseFileHeader *hdr;
StateFileChunk *record;
bool replorigin;
bool replorigin;
/* Add the end sentinel to the list of 2PC records */
RegisterTwoPhaseRecord(TWOPHASE_RM_END_ID, 0,
@ -1317,7 +1317,7 @@ void
ParsePrepareRecord(uint8 info, char *xlrec, xl_xact_parsed_prepare *parsed)
{
TwoPhaseFileHeader *hdr;
char *bufptr;
char *bufptr;
hdr = (TwoPhaseFileHeader *) xlrec;
bufptr = xlrec + MAXALIGN(sizeof(TwoPhaseFileHeader));

@ -3267,8 +3267,8 @@ bool
IsInTransactionBlock(bool isTopLevel)
{
/*
* Return true on same conditions that would make PreventInTransactionBlock
* error out
* Return true on same conditions that would make
* PreventInTransactionBlock error out
*/
if (IsTransactionBlock())
return true;
@ -5448,9 +5448,9 @@ XactLogAbortRecord(TimestampTz abort_time,
}
/* dump transaction origin information only for abort prepared */
if ( (replorigin_session_origin != InvalidRepOriginId) &&
TransactionIdIsValid(twophase_xid) &&
XLogLogicalInfoActive())
if ((replorigin_session_origin != InvalidRepOriginId) &&
TransactionIdIsValid(twophase_xid) &&
XLogLogicalInfoActive())
{
xl_xinfo.xinfo |= XACT_XINFO_HAS_ORIGIN;

@ -10656,10 +10656,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
* Mark that start phase has correctly finished for an exclusive backup.
* Session-level locks are updated as well to reflect that state.
*
* Note that CHECK_FOR_INTERRUPTS() must not occur while updating
* backup counters and session-level lock. Otherwise they can be
* updated inconsistently, and which might cause do_pg_abort_backup()
* to fail.
* Note that CHECK_FOR_INTERRUPTS() must not occur while updating backup
* counters and session-level lock. Otherwise they can be updated
* inconsistently, and which might cause do_pg_abort_backup() to fail.
*/
if (exclusive)
{
@ -10904,11 +10903,11 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
/*
* Clean up session-level lock.
*
* You might think that WALInsertLockRelease() can be called
* before cleaning up session-level lock because session-level
* lock doesn't need to be protected with WAL insertion lock.
* But since CHECK_FOR_INTERRUPTS() can occur in it,
* session-level lock must be cleaned up before it.
* You might think that WALInsertLockRelease() can be called before
* cleaning up session-level lock because session-level lock doesn't need
* to be protected with WAL insertion lock. But since
* CHECK_FOR_INTERRUPTS() can occur in it, session-level lock must be
* cleaned up before it.
*/
sessionBackupState = SESSION_BACKUP_NONE;
@ -11042,6 +11041,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
(uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
fprintf(fp, "STOP WAL LOCATION: %X/%X (file %s)\n",
(uint32) (stoppoint >> 32), (uint32) stoppoint, stopxlogfilename);
/*
* Transfer remaining lines including label and start timeline to
* history file.
@ -11259,7 +11259,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
bool *backupFromStandby)
{
char startxlogfilename[MAXFNAMELEN];
TimeLineID tli_from_walseg, tli_from_file;
TimeLineID tli_from_walseg,
tli_from_file;
FILE *lfp;
char ch;
char backuptype[20];
@ -11322,13 +11323,13 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
}
/*
* Parse START TIME and LABEL. Those are not mandatory fields for
* recovery but checking for their presence is useful for debugging
* and the next sanity checks. Cope also with the fact that the
* result buffers have a pre-allocated size, hence if the backup_label
* file has been generated with strings longer than the maximum assumed
* here an incorrect parsing happens. That's fine as only minor
* consistency checks are done afterwards.
* Parse START TIME and LABEL. Those are not mandatory fields for recovery
* but checking for their presence is useful for debugging and the next
* sanity checks. Cope also with the fact that the result buffers have a
* pre-allocated size, hence if the backup_label file has been generated
* with strings longer than the maximum assumed here an incorrect parsing
* happens. That's fine as only minor consistency checks are done
* afterwards.
*/
if (fscanf(lfp, "START TIME: %127[^\n]\n", backuptime) == 1)
ereport(DEBUG1,
@ -11341,8 +11342,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
backuplabel, BACKUP_LABEL_FILE)));
/*
* START TIMELINE is new as of 11. Its parsing is not mandatory, still
* use it as a sanity check if present.
* START TIMELINE is new as of 11. Its parsing is not mandatory, still use
* it as a sanity check if present.
*/
if (fscanf(lfp, "START TIMELINE: %u\n", &tli_from_file) == 1)
{

@ -446,6 +446,7 @@ ExecuteGrantStmt(GrantStmt *stmt)
switch (stmt->objtype)
{
case OBJECT_TABLE:
/*
* Because this might be a sequence, we test both relation and
* sequence bits, and later do a more limited test when we know
@ -3458,7 +3459,7 @@ aclcheck_error(AclResult aclerr, ObjectType objtype,
case OBJECT_VIEW:
msg = gettext_noop("permission denied for view %s");
break;
/* these currently aren't used */
/* these currently aren't used */
case OBJECT_ACCESS_METHOD:
case OBJECT_AMOP:
case OBJECT_AMPROC:
@ -3583,11 +3584,13 @@ aclcheck_error(AclResult aclerr, ObjectType objtype,
case OBJECT_TSDICTIONARY:
msg = gettext_noop("must be owner of text search dictionary %s");
break;
/*
* Special cases: For these, the error message talks about
* "relation", because that's where the ownership is
* attached. See also check_object_ownership().
*/
/*
* Special cases: For these, the error message talks
* about "relation", because that's where the
* ownership is attached. See also
* check_object_ownership().
*/
case OBJECT_COLUMN:
case OBJECT_POLICY:
case OBJECT_RULE:
@ -3595,7 +3598,7 @@ aclcheck_error(AclResult aclerr, ObjectType objtype,
case OBJECT_TRIGGER:
msg = gettext_noop("must be owner of relation %s");
break;
/* these currently aren't used */
/* these currently aren't used */
case OBJECT_ACCESS_METHOD:
case OBJECT_AMOP:
case OBJECT_AMPROC:

@ -631,9 +631,9 @@ findDependentObjects(const ObjectAddress *object,
* transform this deletion request into a delete of this
* owning object.
*
* For INTERNAL_AUTO dependencies, we don't enforce this;
* in other words, we don't follow the links back to the
* owning object.
* For INTERNAL_AUTO dependencies, we don't enforce this; in
* other words, we don't follow the links back to the owning
* object.
*/
if (foundDep->deptype == DEPENDENCY_INTERNAL_AUTO)
break;

@ -377,7 +377,7 @@ ConstructTupleDescriptor(Relation heapRelation,
to->attislocal = true;
to->attinhcount = 0;
to->attcollation = (i < numkeyatts) ?
collationObjectId[i] : InvalidOid;
collationObjectId[i] : InvalidOid;
}
else
{
@ -414,7 +414,7 @@ ConstructTupleDescriptor(Relation heapRelation,
to->atttypmod = exprTypmod(indexkey);
to->attislocal = true;
to->attcollation = (i < numkeyatts) ?
collationObjectId[i] : InvalidOid;
collationObjectId[i] : InvalidOid;
ReleaseSysCache(tuple);
@ -1023,21 +1023,21 @@ index_create(Relation heapRelation,
}
localaddr = index_constraint_create(heapRelation,
indexRelationId,
parentConstraintId,
indexInfo,
indexRelationName,
constraintType,
constr_flags,
allow_system_table_mods,
is_internal);
indexRelationId,
parentConstraintId,
indexInfo,
indexRelationName,
constraintType,
constr_flags,
allow_system_table_mods,
is_internal);
if (constraintId)
*constraintId = localaddr.objectId;
}
else
{
bool have_simple_col = false;
DependencyType deptype;
DependencyType deptype;
deptype = OidIsValid(parentIndexRelid) ? DEPENDENCY_INTERNAL_AUTO : DEPENDENCY_AUTO;
@ -1340,12 +1340,12 @@ index_constraint_create(Relation heapRelation,
recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
/*
* Also, if this is a constraint on a partition, mark it as depending
* on the constraint in the parent.
* Also, if this is a constraint on a partition, mark it as depending on
* the constraint in the parent.
*/
if (OidIsValid(parentConstraintId))
{
ObjectAddress parentConstr;
ObjectAddress parentConstr;
ObjectAddressSet(parentConstr, ConstraintRelationId, parentConstraintId);
recordDependencyOn(&referenced, &parentConstr, DEPENDENCY_INTERNAL_AUTO);
@ -1822,7 +1822,7 @@ CompareIndexInfo(IndexInfo *info1, IndexInfo *info2,
Oid *opfamilies1, Oid *opfamilies2,
AttrNumber *attmap, int maplen)
{
int i;
int i;
if (info1->ii_Unique != info2->ii_Unique)
return false;
@ -1854,7 +1854,7 @@ CompareIndexInfo(IndexInfo *info1, IndexInfo *info2,
/* ignore expressions at this stage */
if ((info1->ii_IndexAttrNumbers[i] != InvalidAttrNumber) &&
(attmap[info2->ii_IndexAttrNumbers[i] - 1] !=
info1->ii_IndexAttrNumbers[i]))
info1->ii_IndexAttrNumbers[i]))
return false;
/* collation and opfamily is not valid for including columns */
@ -1875,8 +1875,8 @@ CompareIndexInfo(IndexInfo *info1, IndexInfo *info2,
return false;
if (info1->ii_Expressions != NIL)
{
bool found_whole_row;
Node *mapped;
bool found_whole_row;
Node *mapped;
mapped = map_variable_attnos((Node *) info2->ii_Expressions,
1, 0, attmap, maplen,
@ -1899,8 +1899,8 @@ CompareIndexInfo(IndexInfo *info1, IndexInfo *info2,
return false;
if (info1->ii_Predicate != NULL)
{
bool found_whole_row;
Node *mapped;
bool found_whole_row;
Node *mapped;
mapped = map_variable_attnos((Node *) info2->ii_Predicate,
1, 0, attmap, maplen,
@ -2105,11 +2105,11 @@ index_update_stats(Relation rel,
* It is safe to use a non-transactional update even though our
* transaction could still fail before committing. Setting relhasindex
* true is safe even if there are no indexes (VACUUM will eventually fix
* it). And of course the new relpages and
* reltuples counts are correct regardless. However, we don't want to
* change relpages (or relallvisible) if the caller isn't providing an
* updated reltuples count, because that would bollix the
* reltuples/relpages ratio which is what's really important.
* it). And of course the new relpages and reltuples counts are correct
* regardless. However, we don't want to change relpages (or
* relallvisible) if the caller isn't providing an updated reltuples
* count, because that would bollix the reltuples/relpages ratio which is
* what's really important.
*/
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
@ -4136,7 +4136,7 @@ RestoreReindexState(void *reindexstate)
{
SerializedReindexState *sistate = (SerializedReindexState *) reindexstate;
int c = 0;
MemoryContext oldcontext;
MemoryContext oldcontext;
currentlyReindexedHeap = sistate->currentlyReindexedHeap;
currentlyReindexedIndex = sistate->currentlyReindexedIndex;

@ -2062,8 +2062,8 @@ pg_get_object_address(PG_FUNCTION_ARGS)
}
/*
* get_object_address is pretty sensitive to the length of its input lists;
* check that they're what it wants.
* get_object_address is pretty sensitive to the length of its input
* lists; check that they're what it wants.
*/
switch (type)
{
@ -5130,7 +5130,11 @@ get_relkind_objtype(char relkind)
return OBJECT_MATVIEW;
case RELKIND_FOREIGN_TABLE:
return OBJECT_FOREIGN_TABLE;
/* other relkinds are not supported here because they don't map to OBJECT_* values */
/*
* other relkinds are not supported here because they don't map to
* OBJECT_* values
*/
default:
elog(ERROR, "unexpected relkind: %d", relkind);
return 0;

@ -205,7 +205,7 @@ map_partition_varattnos(List *expr, int fromrel_varno,
bool
has_partition_attrs(Relation rel, Bitmapset *attnums, bool *used_in_expr)
{
PartitionKey key;
PartitionKey key;
int partnatts;
List *partexprs;
ListCell *partexprs_item;

@ -419,8 +419,8 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned)
Relation pg_constraint;
Relation parentRel;
Relation rel;
ScanKeyData key;
SysScanDesc scan;
ScanKeyData key;
SysScanDesc scan;
TupleDesc tupdesc;
HeapTuple tuple;
AttrNumber *attmap;
@ -448,7 +448,7 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned)
while ((tuple = systable_getnext(scan)) != NULL)
{
Form_pg_constraint constrForm = (Form_pg_constraint) GETSTRUCT(tuple);
Form_pg_constraint constrForm = (Form_pg_constraint) GETSTRUCT(tuple);
AttrNumber conkey[INDEX_MAX_KEYS];
AttrNumber mapped_conkey[INDEX_MAX_KEYS];
AttrNumber confkey[INDEX_MAX_KEYS];
@ -573,8 +573,8 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned)
nelem,
nelem,
InvalidOid, /* not a domain constraint */
constrForm->conindid, /* same index */
constrForm->confrelid, /* same foreign rel */
constrForm->conindid, /* same index */
constrForm->confrelid, /* same foreign rel */
confkey,
conpfeqop,
conppeqop,
@ -606,8 +606,8 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned)
if (cloned)
{
/*
* Feed back caller about the constraints we created, so that they can
* set up constraint verification.
* Feed back caller about the constraints we created, so that they
* can set up constraint verification.
*/
newc = palloc(sizeof(ClonedConstraint));
newc->relid = relationId;
@ -625,7 +625,7 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned)
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
PartitionDesc partdesc = RelationGetPartitionDesc(rel);
PartitionDesc partdesc = RelationGetPartitionDesc(rel);
int i;
for (i = 0; i < partdesc->nparts; i++)
@ -634,7 +634,7 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned)
cloned);
}
heap_close(rel, NoLock); /* keep lock till commit */
heap_close(rel, NoLock); /* keep lock till commit */
heap_close(parentRel, NoLock);
heap_close(pg_constraint, RowShareLock);
}
@ -1020,12 +1020,12 @@ AlterConstraintNamespaces(Oid ownerId, Oid oldNspId,
void
ConstraintSetParentConstraint(Oid childConstrId, Oid parentConstrId)
{
Relation constrRel;
Relation constrRel;
Form_pg_constraint constrForm;
HeapTuple tuple,
newtup;
ObjectAddress depender;
ObjectAddress referenced;
HeapTuple tuple,
newtup;
ObjectAddress depender;
ObjectAddress referenced;
constrRel = heap_open(ConstraintRelationId, RowExclusiveLock);
tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(childConstrId));
@ -1212,8 +1212,8 @@ Oid
get_relation_idx_constraint_oid(Oid relationId, Oid indexId)
{
Relation pg_constraint;
SysScanDesc scan;
ScanKeyData key;
SysScanDesc scan;
ScanKeyData key;
HeapTuple tuple;
Oid constraintId = InvalidOid;
@ -1228,7 +1228,7 @@ get_relation_idx_constraint_oid(Oid relationId, Oid indexId)
true, NULL, 1, &key);
while ((tuple = systable_getnext(scan)) != NULL)
{
Form_pg_constraint constrForm;
Form_pg_constraint constrForm;
constrForm = (Form_pg_constraint) GETSTRUCT(tuple);
if (constrForm->conindid == indexId)

@ -448,7 +448,7 @@ StoreSingleInheritance(Oid relationId, Oid parentOid, int32 seqNumber)
bool
DeleteInheritsTuple(Oid inhrelid, Oid inhparent)
{
bool found = false;
bool found = false;
Relation catalogRelation;
ScanKeyData key;
SysScanDesc scan;

@ -942,7 +942,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId)
/* Superusers can bypass permission checks */
if (!superuser())
{
ObjectType objtype = get_object_type(classId, objectId);
ObjectType objtype = get_object_type(classId, objectId);
/* must be owner */
if (!has_privs_of_role(GetUserId(), old_ownerId))

@ -1539,8 +1539,8 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
frozenXid, cutoffMulti, mapped_tables);
/*
* If it's a system catalog, queue a sinval message to flush all
* catcaches on the catalog when we reach CommandCounterIncrement.
* If it's a system catalog, queue a sinval message to flush all catcaches
* on the catalog when we reach CommandCounterIncrement.
*/
if (is_system_catalog)
CacheInvalidateCatalog(OIDOldHeap);

@ -2783,7 +2783,7 @@ CopyFrom(CopyState cstate)
slot,
NULL);
if (slot == NULL) /* "do nothing" */
if (slot == NULL) /* "do nothing" */
goto next_tuple;
/* FDW might have changed tuple */

@ -2184,7 +2184,7 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
"GRANT" : "REVOKE");
/* object_type */
values[i++] = CStringGetTextDatum(stringify_grant_objtype(
cmd->d.grant.istmt->objtype));
cmd->d.grant.istmt->objtype));
/* schema */
nulls[i++] = true;
/* identity */
@ -2244,7 +2244,7 @@ stringify_grant_objtype(ObjectType objtype)
return "TABLESPACE";
case OBJECT_TYPE:
return "TYPE";
/* these currently aren't used */
/* these currently aren't used */
case OBJECT_ACCESS_METHOD:
case OBJECT_AGGREGATE:
case OBJECT_AMOP:
@ -2326,7 +2326,7 @@ stringify_adefprivs_objtype(ObjectType objtype)
return "TABLESPACES";
case OBJECT_TYPE:
return "TYPES";
/* these currently aren't used */
/* these currently aren't used */
case OBJECT_ACCESS_METHOD:
case OBJECT_AGGREGATE:
case OBJECT_AMOP:

@ -305,7 +305,7 @@ interpret_function_parameter_list(ParseState *pstate,
{
if (objtype == OBJECT_PROCEDURE)
*requiredResultType = RECORDOID;
else if (outCount == 0) /* save first output param's type */
else if (outCount == 0) /* save first output param's type */
*requiredResultType = toid;
outCount++;
}

@ -326,7 +326,7 @@ DefineIndex(Oid relationId,
IndexStmt *stmt,
Oid indexRelationId,
Oid parentIndexId,
Oid parentConstraintId,
Oid parentConstraintId,
bool is_alter_table,
bool check_rights,
bool check_not_in_use,
@ -381,11 +381,11 @@ DefineIndex(Oid relationId,
/*
* Calculate the new list of index columns including both key columns and
* INCLUDE columns. Later we can determine which of these are key columns,
* and which are just part of the INCLUDE list by checking the list
* position. A list item in a position less than ii_NumIndexKeyAttrs is
* part of the key columns, and anything equal to and over is part of the
* INCLUDE columns.
* INCLUDE columns. Later we can determine which of these are key
* columns, and which are just part of the INCLUDE list by checking the
* list position. A list item in a position less than ii_NumIndexKeyAttrs
* is part of the key columns, and anything equal to and over is part of
* the INCLUDE columns.
*/
allIndexParams = list_concat(list_copy(stmt->indexParams),
list_copy(stmt->indexIncludingParams));
@ -431,6 +431,7 @@ DefineIndex(Oid relationId,
/* OK */
break;
case RELKIND_FOREIGN_TABLE:
/*
* Custom error message for FOREIGN TABLE since the term is close
* to a regular table and can confuse the user.
@ -691,13 +692,13 @@ DefineIndex(Oid relationId,
* partition-local index can enforce global uniqueness iff the PK
* value completely determines the partition that a row is in.
*
* Thus, verify that all the columns in the partition key appear
* in the unique key definition.
* Thus, verify that all the columns in the partition key appear in
* the unique key definition.
*/
for (i = 0; i < key->partnatts; i++)
{
bool found = false;
int j;
bool found = false;
int j;
const char *constraint_type;
if (stmt->primary)
@ -722,7 +723,7 @@ DefineIndex(Oid relationId,
errmsg("unsupported %s constraint with partition key definition",
constraint_type),
errdetail("%s constraints cannot be used when partition keys include expressions.",
constraint_type)));
constraint_type)));
for (j = 0; j < indexInfo->ii_NumIndexAttrs; j++)
{
@ -820,8 +821,8 @@ DefineIndex(Oid relationId,
/*
* Make the catalog entries for the index, including constraints. This
* step also actually builds the index, except if caller requested not to
* or in concurrent mode, in which case it'll be done later, or
* doing a partitioned index (because those don't have storage).
* or in concurrent mode, in which case it'll be done later, or doing a
* partitioned index (because those don't have storage).
*/
flags = constr_flags = 0;
if (stmt->isconstraint)
@ -871,8 +872,8 @@ DefineIndex(Oid relationId,
if (partitioned)
{
/*
* Unless caller specified to skip this step (via ONLY), process
* each partition to make sure they all contain a corresponding index.
* Unless caller specified to skip this step (via ONLY), process each
* partition to make sure they all contain a corresponding index.
*
* If we're called internally (no stmt->relation), recurse always.
*/
@ -904,13 +905,13 @@ DefineIndex(Oid relationId,
*/
for (i = 0; i < nparts; i++)
{
Oid childRelid = part_oids[i];
Relation childrel;
List *childidxs;
ListCell *cell;
Oid childRelid = part_oids[i];
Relation childrel;
List *childidxs;
ListCell *cell;
AttrNumber *attmap;
bool found = false;
int maplen;
bool found = false;
int maplen;
childrel = heap_open(childRelid, lockmode);
childidxs = RelationGetIndexList(childrel);
@ -940,7 +941,7 @@ DefineIndex(Oid relationId,
opfamOids,
attmap, maplen))
{
Oid cldConstrOid = InvalidOid;
Oid cldConstrOid = InvalidOid;
/*
* Found a match.
@ -1002,7 +1003,7 @@ DefineIndex(Oid relationId,
childStmt->idxname = NULL;
childStmt->relationId = childRelid;
DefineIndex(childRelid, childStmt,
InvalidOid, /* no predefined OID */
InvalidOid, /* no predefined OID */
indexRelationId, /* this is our child */
createdConstraintId,
is_alter_table, check_rights, check_not_in_use,
@ -1014,9 +1015,8 @@ DefineIndex(Oid relationId,
/*
* The pg_index row we inserted for this index was marked
* indisvalid=true. But if we attached an existing index that
* is invalid, this is incorrect, so update our row to
* invalid too.
* indisvalid=true. But if we attached an existing index that is
* invalid, this is incorrect, so update our row to invalid too.
*/
if (invalidate_parent)
{
@ -1479,7 +1479,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
}
else
{
indexInfo->ii_IndexAttrNumbers[attn] = 0; /* marks expression */
indexInfo->ii_IndexAttrNumbers[attn] = 0; /* marks expression */
indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions,
expr);
@ -1505,7 +1505,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
typeOidP[attn] = atttype;
/*
* Included columns have no collation, no opclass and no ordering options.
* Included columns have no collation, no opclass and no ordering
* options.
*/
if (attn >= nkeycols)
{
@ -2465,8 +2466,8 @@ void
IndexSetParentIndex(Relation partitionIdx, Oid parentOid)
{
Relation pg_inherits;
ScanKeyData key[2];
SysScanDesc scan;
ScanKeyData key[2];
SysScanDesc scan;
Oid partRelid = RelationGetRelid(partitionIdx);
HeapTuple tuple;
bool fix_dependencies;
@ -2496,15 +2497,15 @@ IndexSetParentIndex(Relation partitionIdx, Oid parentOid)
if (parentOid == InvalidOid)
{
/*
* No pg_inherits row, and no parent wanted: nothing to do in
* this case.
* No pg_inherits row, and no parent wanted: nothing to do in this
* case.
*/
fix_dependencies = false;
}
else
{
Datum values[Natts_pg_inherits];
bool isnull[Natts_pg_inherits];
Datum values[Natts_pg_inherits];
bool isnull[Natts_pg_inherits];
/*
* No pg_inherits row exists, and we want a parent for this index,
@ -2525,7 +2526,7 @@ IndexSetParentIndex(Relation partitionIdx, Oid parentOid)
}
else
{
Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(tuple);
Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(tuple);
if (parentOid == InvalidOid)
{
@ -2572,14 +2573,14 @@ IndexSetParentIndex(Relation partitionIdx, Oid parentOid)
if (OidIsValid(parentOid))
{
ObjectAddress parentIdx;
ObjectAddress parentIdx;
ObjectAddressSet(parentIdx, RelationRelationId, parentOid);
recordDependencyOn(&partIdx, &parentIdx, DEPENDENCY_INTERNAL_AUTO);
}
else
{
ObjectAddress partitionTbl;
ObjectAddress partitionTbl;
ObjectAddressSet(partitionTbl, RelationRelationId,
partitionIdx->rd_index->indrelid);

@ -181,7 +181,7 @@ typedef struct
bool nowait; /* no wait mode */
Oid viewowner; /* view owner for checking the privilege */
Oid viewoid; /* OID of the view to be locked */
List *ancestor_views; /* OIDs of ancestor views */
List *ancestor_views; /* OIDs of ancestor views */
} LockViewRecurse_context;
static bool

@ -215,7 +215,7 @@ RelationBuildRowSecurity(Relation relation)
HeapTuple tuple;
MemoryContextCopyAndSetIdentifier(rscxt,
RelationGetRelationName(relation));
RelationGetRelationName(relation));
rsdesc = MemoryContextAllocZero(rscxt, sizeof(RowSecurityDesc));
rsdesc->rscxt = rscxt;

@ -450,9 +450,9 @@ PersistHoldablePortal(Portal portal)
PopActiveSnapshot();
/*
* We can now release any subsidiary memory of the portal's context;
* we'll never use it again. The executor already dropped its context,
* but this will clean up anything that glommed onto the portal's context via
* We can now release any subsidiary memory of the portal's context; we'll
* never use it again. The executor already dropped its context, but this
* will clean up anything that glommed onto the portal's context via
* PortalContext.
*/
MemoryContextDeleteChildren(portal->portalContext);

@ -133,7 +133,8 @@ CreateStatistics(CreateStatsStmt *stmt)
* If the node has a name, split it up and determine creation namespace.
* If not (a possibility not considered by the grammar, but one which can
* occur via the "CREATE TABLE ... (LIKE)" command), then we put the
* object in the same namespace as the relation, and cons up a name for it.
* object in the same namespace as the relation, and cons up a name for
* it.
*/
if (stmt->defnames)
namespaceId = QualifiedNameGetCreationNamespace(stmt->defnames,
@ -462,7 +463,7 @@ ChooseExtendedStatisticName(const char *name1, const char *name2,
for (;;)
{
Oid existingstats;
Oid existingstats;
stxname = makeObjectName(name1, name2, modlabel);
@ -500,7 +501,7 @@ ChooseExtendedStatisticNameAddition(List *exprs)
buf[0] = '\0';
foreach(lc, exprs)
{
ColumnRef *cref = (ColumnRef *) lfirst(lc);
ColumnRef *cref = (ColumnRef *) lfirst(lc);
const char *name;
/* It should be one of these, but just skip if it happens not to be */

@ -1634,7 +1634,8 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged,
}
/*
* Write a WAL record to allow this set of actions to be logically decoded.
* Write a WAL record to allow this set of actions to be logically
* decoded.
*
* Assemble an array of relids so we can write a single WAL record for the
* whole action.
@ -1648,7 +1649,7 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged,
Assert(XLogLogicalInfoActive());
logrelids = palloc(list_length(relids_logged) * sizeof(Oid));
foreach (cell, relids_logged)
foreach(cell, relids_logged)
logrelids[i++] = lfirst_oid(cell);
xlrec.dbId = MyDatabaseId;
@ -5560,8 +5561,8 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
CommandCounterIncrement();
/*
* Did the request for a missing value work? If not we'll have to do
* a rewrite
* Did the request for a missing value work? If not we'll have to do a
* rewrite
*/
if (!rawEnt->missingMode)
tab->rewrite |= AT_REWRITE_DEFAULT_VAL;
@ -7664,9 +7665,9 @@ ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
ObjectAddressSet(address, ConstraintRelationId, constrOid);
/*
* Create the triggers that will enforce the constraint. We only want
* the action triggers to appear for the parent partitioned relation,
* even though the constraints also exist below.
* Create the triggers that will enforce the constraint. We only want the
* action triggers to appear for the parent partitioned relation, even
* though the constraints also exist below.
*/
createForeignKeyTriggers(rel, RelationGetRelid(pkrel), fkconstraint,
constrOid, indexOid, !recursing);
@ -8793,8 +8794,8 @@ createForeignKeyTriggers(Relation rel, Oid refRelOid, Constraint *fkconstraint,
indexOid);
/*
* For the referencing side, create the check triggers. We only need these
* on the partitions.
* For the referencing side, create the check triggers. We only need
* these on the partitions.
*/
if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
createForeignKeyCheckTriggers(RelationGetRelid(rel), refRelOid,
@ -13974,8 +13975,9 @@ QueuePartitionConstraintValidation(List **wqueue, Relation scanrel,
}
/*
* Constraints proved insufficient. For plain relations, queue a validation
* item now; for partitioned tables, recurse to process each partition.
* Constraints proved insufficient. For plain relations, queue a
* validation item now; for partitioned tables, recurse to process each
* partition.
*/
if (scanrel->rd_rel->relkind == RELKIND_RELATION)
{
@ -14300,9 +14302,9 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
/*
* If we're attaching a partition other than the default partition and a
* default one exists, then that partition's partition constraint changes,
* so add an entry to the work queue to validate it, too. (We must not
* do this when the partition being attached is the default one; we
* already did it above!)
* so add an entry to the work queue to validate it, too. (We must not do
* this when the partition being attached is the default one; we already
* did it above!)
*/
if (OidIsValid(defaultPartOid))
{
@ -14408,8 +14410,8 @@ AttachPartitionEnsureIndexes(Relation rel, Relation attachrel)
*/
for (i = 0; i < list_length(attachRelIdxs); i++)
{
Oid cldIdxId = RelationGetRelid(attachrelIdxRels[i]);
Oid cldConstrOid = InvalidOid;
Oid cldIdxId = RelationGetRelid(attachrelIdxRels[i]);
Oid cldConstrOid = InvalidOid;
/* does this index have a parent? if so, can't use it */
if (attachrelIdxRels[i]->rd_rel->relispartition)
@ -14693,7 +14695,7 @@ ATExecDetachPartition(Relation rel, RangeVar *name)
continue;
Assert((IndexGetRelation(get_partition_parent(idxid), false) ==
RelationGetRelid(rel)));
RelationGetRelid(rel)));
idx = index_open(idxid, AccessExclusiveLock);
IndexSetParentIndex(idx, InvalidOid);
@ -14722,9 +14724,9 @@ ATExecDetachPartition(Relation rel, RangeVar *name)
*/
struct AttachIndexCallbackState
{
Oid partitionOid;
Oid parentTblOid;
bool lockedParentTbl;
Oid partitionOid;
Oid parentTblOid;
bool lockedParentTbl;
};
static void
@ -14836,7 +14838,8 @@ ATExecAttachPartitionIdx(List **wqueue, Relation parentIdx, RangeVar *name)
cldConstrId = InvalidOid;
/*
* If this partition already has an index attached, refuse the operation.
* If this partition already has an index attached, refuse the
* operation.
*/
refuseDupeIndexAttach(parentIdx, partIdx, partTbl);
@ -14890,8 +14893,8 @@ ATExecAttachPartitionIdx(List **wqueue, Relation parentIdx, RangeVar *name)
errdetail("The index definitions do not match.")));
/*
* If there is a constraint in the parent, make sure there is one
* in the child too.
* If there is a constraint in the parent, make sure there is one in
* the child too.
*/
constraintOid = get_relation_idx_constraint_oid(RelationGetRelid(parentTbl),
RelationGetRelid(parentIdx));
@ -14907,9 +14910,9 @@ ATExecAttachPartitionIdx(List **wqueue, Relation parentIdx, RangeVar *name)
RelationGetRelationName(partIdx),
RelationGetRelationName(parentIdx)),
errdetail("The index \"%s\" belongs to a constraint in table \"%s\" but no constraint exists for index \"%s\".",
RelationGetRelationName(parentIdx),
RelationGetRelationName(parentTbl),
RelationGetRelationName(partIdx))));
RelationGetRelationName(parentIdx),
RelationGetRelationName(parentTbl),
RelationGetRelationName(partIdx))));
}
/* All good -- do it */
@ -14938,10 +14941,10 @@ ATExecAttachPartitionIdx(List **wqueue, Relation parentIdx, RangeVar *name)
static void
refuseDupeIndexAttach(Relation parentIdx, Relation partIdx, Relation partitionTbl)
{
Relation pg_inherits;
ScanKeyData key;
HeapTuple tuple;
SysScanDesc scan;
Relation pg_inherits;
ScanKeyData key;
HeapTuple tuple;
SysScanDesc scan;
pg_inherits = heap_open(InheritsRelationId, AccessShareLock);
ScanKeyInit(&key, Anum_pg_inherits_inhparent,
@ -14951,7 +14954,7 @@ refuseDupeIndexAttach(Relation parentIdx, Relation partIdx, Relation partitionTb
NULL, 1, &key);
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
Form_pg_inherits inhForm;
Form_pg_inherits inhForm;
Oid tab;
inhForm = (Form_pg_inherits) GETSTRUCT(tuple);
@ -14979,12 +14982,12 @@ refuseDupeIndexAttach(Relation parentIdx, Relation partIdx, Relation partitionTb
static void
validatePartitionedIndex(Relation partedIdx, Relation partedTbl)
{
Relation inheritsRel;
SysScanDesc scan;
ScanKeyData key;
int tuples = 0;
HeapTuple inhTup;
bool updated = false;
Relation inheritsRel;
SysScanDesc scan;
ScanKeyData key;
int tuples = 0;
HeapTuple inhTup;
bool updated = false;
Assert(partedIdx->rd_rel->relkind == RELKIND_PARTITIONED_INDEX);
@ -15002,11 +15005,11 @@ validatePartitionedIndex(Relation partedIdx, Relation partedTbl)
while ((inhTup = systable_getnext(scan)) != NULL)
{
Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(inhTup);
HeapTuple indTup;
Form_pg_index indexForm;
HeapTuple indTup;
Form_pg_index indexForm;
indTup = SearchSysCache1(INDEXRELID,
ObjectIdGetDatum(inhForm->inhrelid));
ObjectIdGetDatum(inhForm->inhrelid));
if (!indTup)
elog(ERROR, "cache lookup failed for index %u",
inhForm->inhrelid);

@ -5741,8 +5741,9 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
* oldtup should be non-NULL, whereas for UPDATE events normally both
* oldtup and newtup are non-NULL. But for UPDATE events fired for
* capturing transition tuples during UPDATE partition-key row
* movement, oldtup is NULL when the event is for a row being inserted,
* whereas newtup is NULL when the event is for a row being deleted.
* movement, oldtup is NULL when the event is for a row being
* inserted, whereas newtup is NULL when the event is for a row being
* deleted.
*/
Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
oldtup == NULL));
@ -5769,7 +5770,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
}
if (newtup != NULL &&
((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
(event == TRIGGER_EVENT_UPDATE && update_new_table)))
(event == TRIGGER_EVENT_UPDATE && update_new_table)))
{
Tuplestorestate *new_tuplestore;
@ -5791,9 +5792,9 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
/*
* If transition tables are the only reason we're here, return. As
* mentioned above, we can also be here during update tuple routing in
* presence of transition tables, in which case this function is called
* separately for oldtup and newtup, so we expect exactly one of them
* to be NULL.
* presence of transition tables, in which case this function is
* called separately for oldtup and newtup, so we expect exactly one
* of them to be NULL.
*/
if (trigdesc == NULL ||
(event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||

@ -2200,7 +2200,7 @@ ExecEvalFuncExprFusage(ExprState *state, ExprEvalStep *op,
*/
void
ExecEvalFuncExprStrictFusage(ExprState *state, ExprEvalStep *op,
ExprContext *econtext)
ExprContext *econtext)
{
FunctionCallInfo fcinfo = op->d.func.fcinfo_data;

@ -1417,6 +1417,7 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
rInfo++;
nr--;
}
/*
* Third, search through the result relations that were created during
* tuple routing, if any.

@ -407,10 +407,9 @@ ExecSetExecProcNode(PlanState *node, ExecProcNodeMtd function)
{
/*
* Add a wrapper around the ExecProcNode callback that checks stack depth
* during the first execution and maybe adds an instrumentation
* wrapper. When the callback is changed after execution has already begun
* that means we'll superfluously execute ExecProcNodeFirst, but that seems
* ok.
* during the first execution and maybe adds an instrumentation wrapper.
* When the callback is changed after execution has already begun that
* means we'll superfluously execute ExecProcNodeFirst, but that seems ok.
*/
node->ExecProcNodeReal = function;
node->ExecProcNode = ExecProcNodeFirst;

@ -674,7 +674,7 @@ ExecFetchSlotTuple(TupleTableSlot *slot)
if (HeapTupleHeaderGetNatts(slot->tts_tuple->t_data) <
slot->tts_tupleDescriptor->natts)
{
HeapTuple tuple;
HeapTuple tuple;
MemoryContext oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
tuple = heap_expand_tuple(slot->tts_tuple,

@ -2365,7 +2365,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
/* for each grouping set */
for (i = 0; i < phasedata->numsets; i++)
{
int length = phasedata->gset_lengths[i];
int length = phasedata->gset_lengths[i];
if (phasedata->eqfunctions[length - 1] != NULL)
continue;

@ -268,7 +268,7 @@ gather_getnext(GatherState *gatherstate)
if (gatherstate->need_to_scan_locally)
{
EState *estate = gatherstate->ps.state;
EState *estate = gatherstate->ps.state;
/* Install our DSA area while executing the plan. */
estate->es_query_dsa =

@ -628,7 +628,7 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
{
PlanState *outerPlan = outerPlanState(gm_state);
TupleTableSlot *outerTupleSlot;
EState *estate = gm_state->ps.state;
EState *estate = gm_state->ps.state;
/* Install our DSA area while executing the plan. */
estate->es_query_dsa = gm_state->pei ? gm_state->pei->area : NULL;

@ -596,7 +596,8 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags)
List *lclauses;
List *rclauses;
List *hoperators;
TupleDesc outerDesc, innerDesc;
TupleDesc outerDesc,
innerDesc;
ListCell *l;
/* check for unsupported flags */

@ -1436,7 +1436,8 @@ MergeJoinState *
ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags)
{
MergeJoinState *mergestate;
TupleDesc outerDesc, innerDesc;
TupleDesc outerDesc,
innerDesc;
/* check for unsupported flags */
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));

@ -1088,7 +1088,7 @@ lreplace:;
*/
ExecDelete(mtstate, tupleid, oldtuple, planSlot, epqstate,
estate, &tuple_deleted, false,
false /* canSetTag */, true /* changingPart */);
false /* canSetTag */ , true /* changingPart */ );
/*
* For some reason if DELETE didn't happen (e.g. trigger prevented
@ -1678,8 +1678,8 @@ ExecPrepareTupleRouting(ModifyTableState *mtstate,
HeapTuple tuple;
/*
* Determine the target partition. If ExecFindPartition does not find
* a partition after all, it doesn't return here; otherwise, the returned
* Determine the target partition. If ExecFindPartition does not find a
* partition after all, it doesn't return here; otherwise, the returned
* value is to be used as an index into the arrays for the ResultRelInfo
* and TupleConversionMap for the partition.
*/
@ -2140,7 +2140,7 @@ ExecModifyTable(PlanState *pstate)
slot = ExecDelete(node, tupleid, oldtuple, planSlot,
&node->mt_epqstate, estate,
NULL, true, node->canSetTag,
false /* changingPart */);
false /* changingPart */ );
break;
default:
elog(ERROR, "unknown operation");
@ -2310,7 +2310,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
(operation == CMD_INSERT || update_tuple_routing_needed))
mtstate->mt_partition_tuple_routing =
ExecSetupPartitionTupleRouting(mtstate, rel);
ExecSetupPartitionTupleRouting(mtstate, rel);
/*
* Build state for collecting transition tuples. This requires having a

@ -153,8 +153,8 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags)
RelationGetDescr(scanstate->ss.ss_currentRelation));
/*
* Initialize result slot, type and projection.
* tuple table and result tuple initialization
* Initialize result slot, type and projection. tuple table and result
* tuple initialization
*/
ExecInitResultTupleSlotTL(estate, &scanstate->ss.ps);
ExecAssignScanProjectionInfo(&scanstate->ss);

@ -214,8 +214,8 @@ ExecInitSort(Sort *node, EState *estate, int eflags)
ExecCreateScanSlotFromOuterPlan(estate, &sortstate->ss);
/*
* Initialize return slot and type. No need to initialize projection info because
* this node doesn't do projections.
* Initialize return slot and type. No need to initialize projection info
* because this node doesn't do projections.
*/
ExecInitResultTupleSlotTL(estate, &sortstate->ss.ps);
sortstate->ss.ps.ps_ProjInfo = NULL;

@ -974,7 +974,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent)
/*
* Create comparator for lookups of rows in the table (potentially
* across-type comparison).
* across-type comparison).
*/
sstate->cur_eq_comp = ExecBuildGroupingEqual(tupDescLeft, tupDescRight,
ncols,

@ -131,8 +131,8 @@ ValuesNext(ValuesScanState *node)
node->ss.ps.subPlan = NIL;
/*
* As the expressions are only ever used once, disable JIT for
* them. This is worthwhile because it's common to insert significant
* As the expressions are only ever used once, disable JIT for them.
* This is worthwhile because it's common to insert significant
* amounts of data via VALUES().
*/
saved_jit_flags = econtext->ecxt_estate->es_jit_flags;

@ -2019,8 +2019,8 @@ llvm_compile_expr(ExprState *state)
isnull;
/*
* At this point aggref->wfuncno is not yet set (it's
* set up in ExecInitWindowAgg() after initializing the
* At this point aggref->wfuncno is not yet set (it's set
* up in ExecInitWindowAgg() after initializing the
* expression). So load it from memory each time round.
*/
v_wfuncnop = l_ptr_const(&wfunc->wfuncno,

@ -262,7 +262,8 @@ static void
k_hashes(bloom_filter *filter, uint32 *hashes, unsigned char *elem, size_t len)
{
uint64 hash;
uint32 x, y;
uint32 x,
y;
uint64 m;
int i;

@ -130,7 +130,7 @@ bool
check_ssl_key_file_permissions(const char *ssl_key_file, bool isServerStart)
{
int loglevel = isServerStart ? FATAL : LOG;
struct stat buf;
struct stat buf;
if (stat(ssl_key_file, &buf) != 0)
{

@ -125,6 +125,7 @@ be_tls_init(bool isServerStart)
if (ssl_passphrase_command[0] && ssl_passphrase_command_supports_reload)
SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb);
else
/*
* If reloading and no external command is configured, override
* OpenSSL's default handling of passphrase-protected files,
@ -1139,8 +1140,8 @@ be_tls_get_certificate_hash(Port *port, size_t *len)
return NULL;
/*
* Get the signature algorithm of the certificate to determine the
* hash algorithm to use for the result.
* Get the signature algorithm of the certificate to determine the hash
* algorithm to use for the result.
*/
if (!OBJ_find_sigid_algs(X509_get_signature_nid(server_cert),
&algo_nid, NULL))

@ -1168,6 +1168,7 @@ bms_prev_member(const Bitmapset *a, int prevbit)
{
int result;
int shift = BITS_PER_BITMAPWORD - 8;
result = wordnum * BITS_PER_BITMAPWORD;
while ((w >> shift) == 0)

@ -216,9 +216,9 @@ nodeTokenType(char *token, int length)
{
/*
* Yes. Figure out whether it is integral or float; this requires
* both a syntax check and a range check. strtoint() can do both for us.
* We know the token will end at a character that strtoint will stop at,
* so we do not need to modify the string.
* both a syntax check and a range check. strtoint() can do both for
* us. We know the token will end at a character that strtoint will
* stop at, so we do not need to modify the string.
*/
char *endptr;

@ -963,10 +963,10 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* We need attr_needed data for building targetlist of a join
* relation representing join between matching partitions for
* partitionwise join. A given attribute of a child will be
* needed in the same highest joinrel where the corresponding
* attribute of parent is needed. Hence it suffices to use the
* same Relids set for parent and child.
* partitionwise join. A given attribute of a child will be needed
* in the same highest joinrel where the corresponding attribute
* of parent is needed. Hence it suffices to use the same Relids
* set for parent and child.
*/
for (attno = rel->min_attr; attno <= rel->max_attr; attno++)
{
@ -2742,11 +2742,10 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
join_search_one_level(root, lev);
/*
* Run generate_partitionwise_join_paths() and
* generate_gather_paths() for each just-processed joinrel. We could
* not do this earlier because both regular and partial paths can get
* added to a particular joinrel at multiple times within
* join_search_one_level.
* Run generate_partitionwise_join_paths() and generate_gather_paths()
* for each just-processed joinrel. We could not do this earlier
* because both regular and partial paths can get added to a
* particular joinrel at multiple times within join_search_one_level.
*
* After that, we're done creating paths for the joinrel, so run
* set_cheapest().

@ -2696,6 +2696,7 @@ match_clause_to_ordering_op(IndexOptInfo *index,
opfamily = index->opfamily[indexcol];
idxcollation = index->indexcollations[indexcol];
/*
* Clause must be a binary opclause.
*/
@ -3945,7 +3946,7 @@ adjust_rowcompare_for_index(RowCompareExpr *clause,
IndexCollMatchesExprColl(index->indexcollations[i],
lfirst_oid(collids_cell)))
break;
break;
}
if (i >= index->ncolumns)
break; /* no match found */

@ -41,9 +41,9 @@ static void populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
RelOptInfo *rel2, RelOptInfo *joinrel,
SpecialJoinInfo *sjinfo, List *restrictlist);
static void try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1,
RelOptInfo *rel2, RelOptInfo *joinrel,
SpecialJoinInfo *parent_sjinfo,
List *parent_restrictlist);
RelOptInfo *rel2, RelOptInfo *joinrel,
SpecialJoinInfo *parent_sjinfo,
List *parent_restrictlist);
static int match_expr_to_partition_keys(Expr *expr, RelOptInfo *rel,
bool strict_op);
@ -1309,8 +1309,8 @@ restriction_is_constant_false(List *restrictlist,
*/
static void
try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
RelOptInfo *joinrel, SpecialJoinInfo *parent_sjinfo,
List *parent_restrictlist)
RelOptInfo *joinrel, SpecialJoinInfo *parent_sjinfo,
List *parent_restrictlist)
{
int nparts;
int cnt_parts;
@ -1338,8 +1338,8 @@ try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
joinrel->part_scheme == rel2->part_scheme);
/*
* Since we allow partitionwise join only when the partition bounds of
* the joining relations exactly match, the partition bounds of the join
* Since we allow partitionwise join only when the partition bounds of the
* joining relations exactly match, the partition bounds of the join
* should match those of the joining relations.
*/
Assert(partition_bounds_equal(joinrel->part_scheme->partnatts,

@ -6797,10 +6797,10 @@ apply_scanjoin_target_to_paths(PlannerInfo *root,
{
/*
* Since we can't generate the final scan/join target, this is our
* last opportunity to use any partial paths that exist. We don't
* do this if the case where the target is parallel-safe, since we
* will be able to generate superior paths by doing it after the
* final scan/join target has been applied.
* last opportunity to use any partial paths that exist. We don't do
* this if the case where the target is parallel-safe, since we will
* be able to generate superior paths by doing it after the final
* scan/join target has been applied.
*
* Note that this may invalidate rel->cheapest_total_path, so we must
* not rely on it after this point without first calling set_cheapest.

@ -1688,9 +1688,9 @@ expand_partitioned_rtentry(PlannerInfo *root, RangeTblEntry *parentrte,
/*
* Note down whether any partition key cols are being updated. Though it's
* the root partitioned table's updatedCols we are interested in, we
* instead use parentrte to get the updatedCols. This is convenient because
* parentrte already has the root partrel's updatedCols translated to match
* the attribute ordering of parentrel.
* instead use parentrte to get the updatedCols. This is convenient
* because parentrte already has the root partrel's updatedCols translated
* to match the attribute ordering of parentrel.
*/
if (!root->partColsUpdated)
root->partColsUpdated =

@ -1421,6 +1421,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
switch (constraint_exclusion)
{
case CONSTRAINT_EXCLUSION_OFF:
/*
* Don't prune if feature turned off -- except if the relation is
* a partition. While partprune.c-style partition pruning is not
@ -1435,6 +1436,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
return false;
case CONSTRAINT_EXCLUSION_PARTITION:
/*
* When constraint_exclusion is set to 'partition' we only handle
* OTHER_MEMBER_RELs, or BASERELs in cases where the result target
@ -1444,11 +1446,11 @@ relation_excluded_by_constraints(PlannerInfo *root,
!(rel->reloptkind == RELOPT_BASEREL &&
root->inhTargetKind != INHKIND_NONE &&
rel->relid == root->parse->resultRelation))
return false;
return false;
break;
case CONSTRAINT_EXCLUSION_ON:
break; /* always try to exclude */
break; /* always try to exclude */
}
/*

@ -77,7 +77,7 @@ static Query *transformExplainStmt(ParseState *pstate,
static Query *transformCreateTableAsStmt(ParseState *pstate,
CreateTableAsStmt *stmt);
static Query *transformCallStmt(ParseState *pstate,
CallStmt *stmt);
CallStmt *stmt);
static void transformLockingClause(ParseState *pstate, Query *qry,
LockingClause *lc, bool pushedDown);
#ifdef RAW_EXPRESSION_COVERAGE_TEST

@ -484,10 +484,10 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
cxt->blist = lappend(cxt->blist, seqstmt);
/*
* Store the identity sequence name that we decided on. ALTER TABLE
* ... ADD COLUMN ... IDENTITY needs this so that it can fill the new
* column with values from the sequence, while the association of the
* sequence with the table is not set until after the ALTER TABLE.
* Store the identity sequence name that we decided on. ALTER TABLE ...
* ADD COLUMN ... IDENTITY needs this so that it can fill the new column
* with values from the sequence, while the association of the sequence
* with the table is not set until after the ALTER TABLE.
*/
column->identitySequence = seqstmt->sequence;
@ -1193,14 +1193,14 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
*/
if (table_like_clause->options & CREATE_TABLE_LIKE_STATISTICS)
{
List *parent_extstats;
ListCell *l;
List *parent_extstats;
ListCell *l;
parent_extstats = RelationGetStatExtList(relation);
foreach(l, parent_extstats)
{
Oid parent_stat_oid = lfirst_oid(l);
Oid parent_stat_oid = lfirst_oid(l);
CreateStatsStmt *stats_stmt;
stats_stmt = generateClonedExtStatsStmt(cxt->relation,
@ -1643,16 +1643,16 @@ static CreateStatsStmt *
generateClonedExtStatsStmt(RangeVar *heapRel, Oid heapRelid,
Oid source_statsid)
{
HeapTuple ht_stats;
HeapTuple ht_stats;
Form_pg_statistic_ext statsrec;
CreateStatsStmt *stats;
List *stat_types = NIL;
List *def_names = NIL;
bool isnull;
Datum datum;
ArrayType *arr;
char *enabled;
int i;
List *stat_types = NIL;
List *def_names = NIL;
bool isnull;
Datum datum;
ArrayType *arr;
char *enabled;
int i;
Assert(OidIsValid(heapRelid));
Assert(heapRel != NULL);

@ -1486,7 +1486,7 @@ match_clause_to_partition_key(RelOptInfo *rel,
*/
if (op_in_opfamily(opclause->opno, partopfamily))
{
Oid oper;
Oid oper;
oper = OidIsValid(commutator) ? commutator : opclause->opno;
get_op_opfamily_properties(oper, partopfamily, false,
@ -1528,11 +1528,11 @@ match_clause_to_partition_key(RelOptInfo *rel,
{
switch (part_scheme->strategy)
{
/*
* For range and list partitioning, we need the ordering
* procedure with lefttype being the partition key's type, and
* righttype the clause's operator's right type.
*/
/*
* For range and list partitioning, we need the ordering
* procedure with lefttype being the partition key's type,
* and righttype the clause's operator's right type.
*/
case PARTITION_STRATEGY_LIST:
case PARTITION_STRATEGY_RANGE:
cmpfn =
@ -1541,10 +1541,10 @@ match_clause_to_partition_key(RelOptInfo *rel,
op_righttype, BTORDER_PROC);
break;
/*
* For hash partitioning, we need the hashing procedure for
* the clause's type.
*/
/*
* For hash partitioning, we need the hashing procedure
* for the clause's type.
*/
case PARTITION_STRATEGY_HASH:
cmpfn =
get_opfamily_proc(part_scheme->partopfamily[partkeyidx],

@ -112,9 +112,9 @@ PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2)
static bool
EnableLockPagesPrivilege(int elevel)
{
HANDLE hToken;
HANDLE hToken;
TOKEN_PRIVILEGES tp;
LUID luid;
LUID luid;
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken))
{
@ -267,8 +267,8 @@ retry:
size);
/*
* Use the original size, not the rounded-up value, when falling back
* to non-huge pages.
* Use the original size, not the rounded-up value, when
* falling back to non-huge pages.
*/
size = orig_size;
flProtect = PAGE_READWRITE;

@ -243,8 +243,8 @@ perform_base_backup(basebackup_options *opt)
/*
* Once do_pg_start_backup has been called, ensure that any failure causes
* us to abort the backup so we don't "leak" a backup counter. For this
* reason, *all* functionality between do_pg_start_backup() and
* the end of do_pg_stop_backup() should be inside the error cleanup block!
* reason, *all* functionality between do_pg_start_backup() and the end of
* do_pg_stop_backup() should be inside the error cleanup block!
*/
PG_ENSURE_ERROR_CLEANUP(base_backup_cleanup, (Datum) 0);
@ -598,7 +598,7 @@ perform_base_backup(basebackup_options *opt)
{
if (total_checksum_failures > 1)
{
char buf[64];
char buf[64];
snprintf(buf, sizeof(buf), INT64_FORMAT, total_checksum_failures);
@ -1015,15 +1015,15 @@ sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces,
char pathbuf[MAXPGPATH * 2];
struct stat statbuf;
int64 size = 0;
const char *lastDir; /* Split last dir from parent path. */
bool isDbDir = false; /* Does this directory contain relations? */
const char *lastDir; /* Split last dir from parent path. */
bool isDbDir = false; /* Does this directory contain relations? */
/*
* Determine if the current path is a database directory that can
* contain relations.
* Determine if the current path is a database directory that can contain
* relations.
*
* Start by finding the location of the delimiter between the parent
* path and the current path.
* Start by finding the location of the delimiter between the parent path
* and the current path.
*/
lastDir = last_dir_separator(path);
@ -1032,7 +1032,7 @@ sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces,
strspn(lastDir + 1, "0123456789") == strlen(lastDir + 1))
{
/* Part of path that contains the parent directory. */
int parentPathLen = lastDir - path;
int parentPathLen = lastDir - path;
/*
* Mark path as a database directory if the parent path is either
@ -1051,7 +1051,7 @@ sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces,
{
int excludeIdx;
bool excludeFound;
ForkNumber relForkNum; /* Type of fork if file is a relation */
ForkNumber relForkNum; /* Type of fork if file is a relation */
int relOidChars; /* Chars in filename that are the rel oid */
/* Skip special stuff */
@ -1104,8 +1104,8 @@ sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces,
/* Never exclude init forks */
if (relForkNum != INIT_FORKNUM)
{
char initForkFile[MAXPGPATH];
char relOid[OIDCHARS + 1];
char initForkFile[MAXPGPATH];
char relOid[OIDCHARS + 1];
/*
* If any other type of fork, check if there is an init fork
@ -1417,10 +1417,10 @@ sendFile(const char *readfilename, const char *tarfilename, struct stat *statbuf
while ((cnt = fread(buf, 1, Min(sizeof(buf), statbuf->st_size - len), fp)) > 0)
{
/*
* The checksums are verified at block level, so we iterate over
* the buffer in chunks of BLCKSZ, after making sure that
* TAR_SEND_SIZE/buf is divisible by BLCKSZ and we read a multiple
* of BLCKSZ bytes.
* The checksums are verified at block level, so we iterate over the
* buffer in chunks of BLCKSZ, after making sure that
* TAR_SEND_SIZE/buf is divisible by BLCKSZ and we read a multiple of
* BLCKSZ bytes.
*/
Assert(TAR_SEND_SIZE % BLCKSZ == 0);
@ -1445,9 +1445,8 @@ sendFile(const char *readfilename, const char *tarfilename, struct stat *statbuf
* start of the base backup. Otherwise, they might have been
* written only halfway and the checksum would not be valid.
* However, replaying WAL would reinstate the correct page in
* this case.
* We also skip completely new pages, since they don't have
* a checksum yet.
* this case. We also skip completely new pages, since they
* don't have a checksum yet.
*/
if (!PageIsNew(page) && PageGetLSN(page) < startptr)
{

@ -54,7 +54,7 @@ static WalReceiverConn *libpqrcv_connect(const char *conninfo,
static void libpqrcv_check_conninfo(const char *conninfo);
static char *libpqrcv_get_conninfo(WalReceiverConn *conn);
static void libpqrcv_get_senderinfo(WalReceiverConn *conn,
char **sender_host, int *sender_port);
char **sender_host, int *sender_port);
static char *libpqrcv_identify_system(WalReceiverConn *conn,
TimeLineID *primary_tli,
int *server_version);
@ -291,9 +291,9 @@ libpqrcv_get_conninfo(WalReceiverConn *conn)
*/
static void
libpqrcv_get_senderinfo(WalReceiverConn *conn, char **sender_host,
int *sender_port)
int *sender_port)
{
char *ret = NULL;
char *ret = NULL;
*sender_host = NULL;
*sender_port = 0;

@ -63,7 +63,7 @@ static void commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
Relation relation, ReorderBufferChange *change);
static void truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
int nrelations, Relation relations[], ReorderBufferChange *change);
int nrelations, Relation relations[], ReorderBufferChange *change);
static void message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
XLogRecPtr message_lsn, bool transactional,
const char *prefix, Size message_size, const char *message);

@ -305,7 +305,7 @@ logicalrep_write_truncate(StringInfo out,
bool cascade, bool restart_seqs)
{
int i;
uint8 flags = 0;
uint8 flags = 0;
pq_sendbyte(out, 'T'); /* action TRUNCATE */
@ -332,7 +332,7 @@ logicalrep_read_truncate(StringInfo in,
int i;
int nrelids;
List *relids = NIL;
uint8 flags;
uint8 flags;
nrelids = pq_getmsgint(in, 4);

@ -1493,36 +1493,36 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
break;
case REORDER_BUFFER_CHANGE_TRUNCATE:
{
int i;
int nrelids = change->data.truncate.nrelids;
int nrelations = 0;
Relation *relations;
relations = palloc0(nrelids * sizeof(Relation));
for (i = 0; i < nrelids; i++)
{
Oid relid = change->data.truncate.relids[i];
Relation relation;
int i;
int nrelids = change->data.truncate.nrelids;
int nrelations = 0;
Relation *relations;
relation = RelationIdGetRelation(relid);
relations = palloc0(nrelids * sizeof(Relation));
for (i = 0; i < nrelids; i++)
{
Oid relid = change->data.truncate.relids[i];
Relation relation;
if (relation == NULL)
elog(ERROR, "could not open relation with OID %u", relid);
relation = RelationIdGetRelation(relid);
if (!RelationIsLogicallyLogged(relation))
continue;
if (relation == NULL)
elog(ERROR, "could not open relation with OID %u", relid);
relations[nrelations++] = relation;
}
if (!RelationIsLogicallyLogged(relation))
continue;
rb->apply_truncate(rb, txn, nrelations, relations, change);
relations[nrelations++] = relation;
}
for (i = 0; i < nrelations; i++)
RelationClose(relations[i]);
rb->apply_truncate(rb, txn, nrelations, relations, change);
break;
}
for (i = 0; i < nrelations; i++)
RelationClose(relations[i]);
break;
}
case REORDER_BUFFER_CHANGE_MESSAGE:
rb->message(rb, txn, change->lsn, true,
@ -1744,7 +1744,7 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid)
if (txn->serialized && txn->final_lsn == 0)
{
ReorderBufferChange *last =
dlist_tail_element(ReorderBufferChange, node, &txn->changes);
dlist_tail_element(ReorderBufferChange, node, &txn->changes);
txn->final_lsn = last->lsn;
}
@ -2660,9 +2660,9 @@ ReorderBufferSerializedPath(char *path, ReplicationSlot *slot, TransactionId xid
XLogSegNoOffsetToRecPtr(segno, 0, recptr, wal_segment_size);
snprintf(path, MAXPGPATH, "pg_replslot/%s/xid-%u-lsn-%X-%X.snap",
NameStr(MyReplicationSlot->data.name),
xid,
(uint32) (recptr >> 32), (uint32) recptr);
NameStr(MyReplicationSlot->data.name),
xid,
(uint32) (recptr >> 32), (uint32) recptr);
}
/*

@ -899,14 +899,14 @@ apply_handle_delete(StringInfo s)
static void
apply_handle_truncate(StringInfo s)
{
bool cascade = false;
bool restart_seqs = false;
List *remote_relids = NIL;
List *remote_rels = NIL;
List *rels = NIL;
List *relids = NIL;
List *relids_logged = NIL;
ListCell *lc;
bool cascade = false;
bool restart_seqs = false;
List *remote_relids = NIL;
List *remote_rels = NIL;
List *rels = NIL;
List *relids = NIL;
List *relids_logged = NIL;
ListCell *lc;
ensure_transaction();
@ -936,9 +936,9 @@ apply_handle_truncate(StringInfo s)
}
/*
* Even if we used CASCADE on the upstream master we explicitly
* default to replaying changes without further cascading.
* This might be later changeable with a user specified option.
* Even if we used CASCADE on the upstream master we explicitly default to
* replaying changes without further cascading. This might be later
* changeable with a user specified option.
*/
ExecuteTruncateGuts(rels, relids, relids_logged, DROP_RESTRICT, restart_seqs);

@ -40,8 +40,8 @@ static void pgoutput_change(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, Relation rel,
ReorderBufferChange *change);
static void pgoutput_truncate(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, int nrelations, Relation relations[],
ReorderBufferChange *change);
ReorderBufferTXN *txn, int nrelations, Relation relations[],
ReorderBufferChange *change);
static bool pgoutput_origin_filter(LogicalDecodingContext *ctx,
RepOriginId origin_id);

@ -342,8 +342,8 @@ static XLogRecPtr
pg_logical_replication_slot_advance(XLogRecPtr startlsn, XLogRecPtr moveto)
{
LogicalDecodingContext *ctx;
ResourceOwner old_resowner = CurrentResourceOwner;
XLogRecPtr retlsn = InvalidXLogRecPtr;
ResourceOwner old_resowner = CurrentResourceOwner;
XLogRecPtr retlsn = InvalidXLogRecPtr;
PG_TRY();
{

@ -1461,8 +1461,8 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS)
{
/*
* Only superusers and members of pg_read_all_stats can see details.
* Other users only get the pid value
* to know whether it is a WAL receiver, but no details.
* Other users only get the pid value to know whether it is a WAL
* receiver, but no details.
*/
MemSet(&nulls[1], true, sizeof(bool) * (tupdesc->natts - 1));
}

@ -1153,7 +1153,7 @@ static void
WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
bool last_write)
{
TimestampTz now;
TimestampTz now;
/* output previously gathered data in a CopyData packet */
pq_putmessage_noblock('d', ctx->out->data, ctx->out->len);
@ -3247,9 +3247,9 @@ pg_stat_get_wal_senders(PG_FUNCTION_ARGS)
if (!is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS))
{
/*
* Only superusers and members of pg_read_all_stats can see details.
* Other users only get the pid value to know it's a walsender,
* but no details.
* Only superusers and members of pg_read_all_stats can see
* details. Other users only get the pid value to know it's a
* walsender, but no details.
*/
MemSet(&nulls[1], true, PG_STAT_GET_WAL_SENDERS_COLS - 1);
}

@ -213,9 +213,9 @@ MakeNewSharedSegment(BufFile *buffile, int segment)
/*
* It is possible that there are files left over from before a crash
* restart with the same name. In order for BufFileOpenShared()
* not to get confused about how many segments there are, we'll unlink
* the next segment number if it already exists.
* restart with the same name. In order for BufFileOpenShared() not to
* get confused about how many segments there are, we'll unlink the next
* segment number if it already exists.
*/
SharedSegmentName(name, buffile->name, segment + 1);
SharedFileSetDelete(buffile->fileset, name, true);

@ -1203,9 +1203,10 @@ shm_mq_inc_bytes_read(shm_mq *mq, Size n)
/*
* Separate prior reads of mq_ring from the increment of mq_bytes_read
* which follows. This pairs with the full barrier in shm_mq_send_bytes().
* We only need a read barrier here because the increment of mq_bytes_read
* is actually a read followed by a dependent write.
* which follows. This pairs with the full barrier in
* shm_mq_send_bytes(). We only need a read barrier here because the
* increment of mq_bytes_read is actually a read followed by a dependent
* write.
*/
pg_read_barrier();

@ -792,9 +792,9 @@ standard_ProcessUtility(PlannedStmt *pstmt,
* intended effect!
*/
PreventInTransactionBlock(isTopLevel,
(stmt->kind == REINDEX_OBJECT_SCHEMA) ? "REINDEX SCHEMA" :
(stmt->kind == REINDEX_OBJECT_SYSTEM) ? "REINDEX SYSTEM" :
"REINDEX DATABASE");
(stmt->kind == REINDEX_OBJECT_SCHEMA) ? "REINDEX SCHEMA" :
(stmt->kind == REINDEX_OBJECT_SYSTEM) ? "REINDEX SYSTEM" :
"REINDEX DATABASE");
ReindexMultipleTables(stmt->name, stmt->kind, stmt->options);
break;
default:
@ -1291,7 +1291,7 @@ ProcessUtilitySlow(ParseState *pstate,
if (stmt->concurrent)
PreventInTransactionBlock(isTopLevel,
"CREATE INDEX CONCURRENTLY");
"CREATE INDEX CONCURRENTLY");
/*
* Look up the relation OID just once, right here at the
@ -1700,7 +1700,7 @@ ExecDropStmt(DropStmt *stmt, bool isTopLevel)
case OBJECT_INDEX:
if (stmt->concurrent)
PreventInTransactionBlock(isTopLevel,
"DROP INDEX CONCURRENTLY");
"DROP INDEX CONCURRENTLY");
/* fall through */
case OBJECT_TABLE:

@ -660,7 +660,7 @@ Datum
websearch_to_tsquery_byid(PG_FUNCTION_ARGS)
{
text *in = PG_GETARG_TEXT_PP(1);
MorphOpaque data;
MorphOpaque data;
TSQuery query = NULL;
data.cfg_id = PG_GETARG_OID(0);

@ -187,8 +187,8 @@ indexam_property(FunctionCallInfo fcinfo,
}
/*
* At this point, either index_oid == InvalidOid or it's a valid index OID.
* Also, after this test and the one below, either attno == 0 for
* At this point, either index_oid == InvalidOid or it's a valid index
* OID. Also, after this test and the one below, either attno == 0 for
* index-wide or AM-wide tests, or it's a valid column number in a valid
* index.
*/
@ -276,6 +276,7 @@ indexam_property(FunctionCallInfo fcinfo,
break;
case AMPROP_ORDERABLE:
/*
* generic assumption is that nonkey columns are not orderable
*/
@ -293,8 +294,9 @@ indexam_property(FunctionCallInfo fcinfo,
* getting there from just the index column type seems like a
* lot of work. So instead we expect the AM to handle this in
* its amproperty routine. The generic result is to return
* false if the AM says it never supports this, or if this is a
* nonkey column, and null otherwise (meaning we don't know).
* false if the AM says it never supports this, or if this is
* a nonkey column, and null otherwise (meaning we don't
* know).
*/
if (!iskey || !routine->amcanorderbyop)
{
@ -314,8 +316,8 @@ indexam_property(FunctionCallInfo fcinfo,
{
/*
* If possible, the AM should handle this test in its
* amproperty function without opening the rel. But this is the
* generic fallback if it does not.
* amproperty function without opening the rel. But this
* is the generic fallback if it does not.
*/
Relation indexrel = index_open(index_oid, AccessShareLock);

@ -3905,7 +3905,7 @@ do_to_timestamp(text *date_txt, text *fmt,
DateTimeParseError(DTERR_TZDISP_OVERFLOW, date_str, "timestamp");
tz = psprintf("%c%02d:%02d",
tmfc.tzsign > 0 ? '+' : '-', tmfc.tzh, tmfc.tzm);
tmfc.tzsign > 0 ? '+' : '-', tmfc.tzh, tmfc.tzm);
tm->tm_zone = tz;
}

@ -686,10 +686,10 @@ spg_box_quad_leaf_consistent(PG_FUNCTION_ARGS)
/* Perform the required comparison(s) */
for (i = 0; i < in->nkeys; i++)
{
StrategyNumber strategy = in->scankeys[i].sk_strategy;
BOX *box = spg_box_quad_get_scankey_bbox(&in->scankeys[i],
&out->recheck);
Datum query = BoxPGetDatum(box);
StrategyNumber strategy = in->scankeys[i].sk_strategy;
BOX *box = spg_box_quad_get_scankey_bbox(&in->scankeys[i],
&out->recheck);
Datum query = BoxPGetDatum(box);
switch (strategy)
{
@ -790,7 +790,7 @@ spg_bbox_quad_config(PG_FUNCTION_ARGS)
Datum
spg_poly_quad_compress(PG_FUNCTION_ARGS)
{
POLYGON *polygon = PG_GETARG_POLYGON_P(0);
POLYGON *polygon = PG_GETARG_POLYGON_P(0);
BOX *box;
box = box_copy(&polygon->boundbox);

@ -1861,8 +1861,8 @@ JsonbExtractScalar(JsonbContainer *jbc, JsonbValue *res)
return NULL;
/*
* A root scalar is stored as an array of one element, so we get the
* array and then its first (and only) member.
* A root scalar is stored as an array of one element, so we get the array
* and then its first (and only) member.
*/
it = JsonbIteratorInit(jbc);
@ -1871,11 +1871,11 @@ JsonbExtractScalar(JsonbContainer *jbc, JsonbValue *res)
Assert(tmp.val.array.nElems == 1 && tmp.val.array.rawScalar);
tok = JsonbIteratorNext(&it, res, true);
Assert (tok == WJB_ELEM);
Assert(tok == WJB_ELEM);
Assert(IsAJsonbScalar(res));
tok = JsonbIteratorNext(&it, &tmp, true);
Assert (tok == WJB_END_ARRAY);
Assert(tok == WJB_END_ARRAY);
tok = JsonbIteratorNext(&it, &tmp, true);
Assert(tok == WJB_DONE);
@ -1912,7 +1912,8 @@ jsonb_numeric(PG_FUNCTION_ARGS)
errmsg("jsonb value must be numeric")));
/*
* v.val.numeric points into jsonb body, so we need to make a copy to return
* v.val.numeric points into jsonb body, so we need to make a copy to
* return
*/
retValue = DatumGetNumericCopy(NumericGetDatum(v.val.numeric));
@ -1925,7 +1926,7 @@ Datum
jsonb_int2(PG_FUNCTION_ARGS)
{
Jsonb *in = PG_GETARG_JSONB_P(0);
JsonbValue v;
JsonbValue v;
Datum retValue;
if (!JsonbExtractScalar(&in->root, &v) || v.type != jbvNumeric)
@ -1945,7 +1946,7 @@ Datum
jsonb_int4(PG_FUNCTION_ARGS)
{
Jsonb *in = PG_GETARG_JSONB_P(0);
JsonbValue v;
JsonbValue v;
Datum retValue;
if (!JsonbExtractScalar(&in->root, &v) || v.type != jbvNumeric)
@ -1965,7 +1966,7 @@ Datum
jsonb_int8(PG_FUNCTION_ARGS)
{
Jsonb *in = PG_GETARG_JSONB_P(0);
JsonbValue v;
JsonbValue v;
Datum retValue;
if (!JsonbExtractScalar(&in->root, &v) || v.type != jbvNumeric)

@ -60,7 +60,8 @@ typedef struct IterateJsonStringValuesState
JsonIterateStringValuesAction action; /* an action that will be applied
* to each json value */
void *action_state; /* any necessary context for iteration */
uint32 flags; /* what kind of elements from a json we want to iterate */
uint32 flags; /* what kind of elements from a json we want
* to iterate */
} IterateJsonStringValuesState;
/* state for transform_json_string_values function */
@ -4950,19 +4951,19 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
uint32
parse_jsonb_index_flags(Jsonb *jb)
{
JsonbIterator *it;
JsonbValue v;
JsonbIteratorToken type;
uint32 flags = 0;
JsonbIterator *it;
JsonbValue v;
JsonbIteratorToken type;
uint32 flags = 0;
it = JsonbIteratorInit(&jb->root);
type = JsonbIteratorNext(&it, &v, false);
/*
* We iterate over array (scalar internally is represented as array, so, we
* will accept it too) to check all its elements. Flag names are chosen
* the same as jsonb_typeof uses.
* We iterate over array (scalar internally is represented as array, so,
* we will accept it too) to check all its elements. Flag names are
* chosen the same as jsonb_typeof uses.
*/
if (type != WJB_BEGIN_ARRAY)
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@ -4977,7 +4978,7 @@ parse_jsonb_index_flags(Jsonb *jb)
errhint("Possible values are: \"string\", \"numeric\", \"boolean\", \"key\" and \"all\"")));
if (v.val.string.len == 3 &&
pg_strncasecmp(v.val.string.val, "all", 3) == 0)
pg_strncasecmp(v.val.string.val, "all", 3) == 0)
flags |= jtiAll;
else if (v.val.string.len == 3 &&
pg_strncasecmp(v.val.string.val, "key", 3) == 0)
@ -5045,7 +5046,7 @@ iterate_jsonb_values(Jsonb *jb, uint32 flags, void *state,
}
/* JsonbValue is a value of object or element of array */
switch(v.type)
switch (v.type)
{
case jbvString:
if (flags & jtiString)
@ -5054,10 +5055,10 @@ iterate_jsonb_values(Jsonb *jb, uint32 flags, void *state,
case jbvNumeric:
if (flags & jtiNumeric)
{
char *val;
char *val;
val = DatumGetCString(DirectFunctionCall1(numeric_out,
NumericGetDatum(v.val.numeric)));
NumericGetDatum(v.val.numeric)));
action(state, val, strlen(val));
pfree(val);
@ -5112,7 +5113,7 @@ iterate_values_scalar(void *state, char *token, JsonTokenType tokentype)
{
IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state;
switch(tokentype)
switch (tokentype)
{
case JSON_TOKEN_STRING:
if (_state->flags & jtiString)
@ -5140,7 +5141,8 @@ iterate_values_object_field_start(void *state, char *fname, bool isnull)
if (_state->flags & jtiKey)
{
char *val = pstrdup(fname);
char *val = pstrdup(fname);
_state->action(_state->action_state, val, strlen(val));
}
}

@ -63,9 +63,9 @@ typedef enum
* *strval, *lenval and *weight are filled in when return value is PT_VAL
*
*/
typedef ts_tokentype (*ts_tokenizer)(TSQueryParserState state, int8 *operator,
int *lenval, char **strval,
int16 *weight, bool *prefix);
typedef ts_tokentype (*ts_tokenizer) (TSQueryParserState state, int8 *operator,
int *lenval, char **strval,
int16 *weight, bool *prefix);
struct TSQueryParserStateData
{
@ -233,7 +233,7 @@ parse_phrase_operator(TSQueryParserState pstate, int16 *distance)
static bool
parse_or_operator(TSQueryParserState pstate)
{
char *ptr = pstate->buf;
char *ptr = pstate->buf;
if (pstate->in_quotes)
return false;
@ -245,26 +245,26 @@ parse_or_operator(TSQueryParserState pstate)
ptr += 2;
/*
* it shouldn't be a part of any word but somewhere later it should be some
* operand
* it shouldn't be a part of any word but somewhere later it should be
* some operand
*/
if (*ptr == '\0') /* no operand */
if (*ptr == '\0') /* no operand */
return false;
/* it shouldn't be a part of any word */
if (t_iseq(ptr, '-') || t_iseq(ptr, '_') || t_isalpha(ptr) || t_isdigit(ptr))
if (t_iseq(ptr, '-') || t_iseq(ptr, '_') || t_isalpha(ptr) || t_isdigit(ptr))
return false;
for(;;)
for (;;)
{
ptr += pg_mblen(ptr);
if (*ptr == '\0') /* got end of string without operand */
if (*ptr == '\0') /* got end of string without operand */
return false;
/*
* Suppose, we found an operand, but could be a not correct operand. So
* we still treat OR literal as operation with possibly incorrect
* Suppose, we found an operand, but could be a not correct operand.
* So we still treat OR literal as operation with possibly incorrect
* operand and will not search it as lexeme
*/
if (!t_isspace(ptr))
@ -312,7 +312,10 @@ gettoken_query_standard(TSQueryParserState state, int8 *operator,
}
else if (!t_isspace(state->buf))
{
/* We rely on the tsvector parser to parse the value for us */
/*
* We rely on the tsvector parser to parse the value for
* us
*/
reset_tsvector_parser(state->valstate, state->buf);
if (gettoken_tsvector(state->valstate, strval, lenval,
NULL, NULL, &state->buf))
@ -437,7 +440,10 @@ gettoken_query_websearch(TSQueryParserState state, int8 *operator,
}
else if (!t_isspace(state->buf))
{
/* We rely on the tsvector parser to parse the value for us */
/*
* We rely on the tsvector parser to parse the value for
* us
*/
reset_tsvector_parser(state->valstate, state->buf);
if (gettoken_tsvector(state->valstate, strval, lenval,
NULL, NULL, &state->buf))
@ -464,8 +470,8 @@ gettoken_query_websearch(TSQueryParserState state, int8 *operator,
if (!state->in_quotes)
{
/*
* put implicit AND after an operand
* and handle this quote in WAITOPERAND
* put implicit AND after an operand and handle this
* quote in WAITOPERAND
*/
state->state = WAITOPERAND;
*operator = OP_AND;

@ -743,7 +743,7 @@ RelationBuildRuleLock(Relation relation)
ALLOCSET_SMALL_SIZES);
relation->rd_rulescxt = rulescxt;
MemoryContextCopyAndSetIdentifier(rulescxt,
RelationGetRelationName(relation));
RelationGetRelationName(relation));
/*
* allocate an array to hold the rewrite rules (the array is extended if
@ -1400,7 +1400,7 @@ RelationInitIndexAccessInfo(Relation relation)
ALLOCSET_SMALL_SIZES);
relation->rd_indexcxt = indexcxt;
MemoryContextCopyAndSetIdentifier(indexcxt,
RelationGetRelationName(relation));
RelationGetRelationName(relation));
/*
* Now we can fetch the index AM's API struct
@ -4678,16 +4678,17 @@ RelationGetIndexPredicate(Relation relation)
expensive, so we don't attempt it by default.
* 2. "recheck_on_update" index option explicitly set by user, which overrides 1)
*/
static bool IsProjectionFunctionalIndex(Relation index, IndexInfo* ii)
static bool
IsProjectionFunctionalIndex(Relation index, IndexInfo *ii)
{
bool is_projection = false;
bool is_projection = false;
if (ii->ii_Expressions)
{
HeapTuple tuple;
Datum reloptions;
bool isnull;
QualCost index_expr_cost;
HeapTuple tuple;
Datum reloptions;
bool isnull;
QualCost index_expr_cost;
/* by default functional index is considered as non-injective */
is_projection = true;
@ -4704,7 +4705,7 @@ static bool IsProjectionFunctionalIndex(Relation index, IndexInfo* ii)
* inserting a new index entry for the changed value.
*/
if ((index_expr_cost.startup + index_expr_cost.per_tuple) >
HEURISTIC_MAX_HOT_RECHECK_EXPR_COST)
HEURISTIC_MAX_HOT_RECHECK_EXPR_COST)
is_projection = false;
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(RelationGetRelid(index)));
@ -4758,7 +4759,7 @@ Bitmapset *
RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
{
Bitmapset *indexattrs; /* columns used in non-projection indexes */
Bitmapset *projindexattrs; /* columns used in projection indexes */
Bitmapset *projindexattrs; /* columns used in projection indexes */
Bitmapset *uindexattrs; /* columns in unique indexes */
Bitmapset *pkindexattrs; /* columns in the primary index */
Bitmapset *idindexattrs; /* columns in the replica identity */
@ -4769,7 +4770,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Oid relreplindex;
ListCell *l;
MemoryContext oldcxt;
int indexno;
int indexno;
/* Quick exit if we already computed the result. */
if (relation->rd_indexattr != NULL)
@ -5479,7 +5480,7 @@ load_relcache_init_file(bool shared)
ALLOCSET_SMALL_SIZES);
rel->rd_indexcxt = indexcxt;
MemoryContextCopyAndSetIdentifier(indexcxt,
RelationGetRelationName(rel));
RelationGetRelationName(rel));
/*
* Now we can fetch the index AM's API struct. (We can't store

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save