Manual cleanup of pgindent results.

Fix some places where pgindent did silly stuff, often because project
style wasn't followed to begin with.  (I've not touched the atomics
headers, though.)
pull/14/head
Tom Lane 10 years ago
parent 17b48a1a9f
commit 2aa0476dc3
  1. 76
      contrib/pg_audit/pg_audit.c
  2. 15
      src/backend/access/tablesample/bernoulli.c
  3. 9
      src/backend/access/tablesample/tablesample.c
  4. 4
      src/backend/executor/execUtils.c
  5. 8
      src/backend/executor/nodeAgg.c
  6. 8
      src/backend/executor/nodeHash.c
  7. 25
      src/backend/optimizer/plan/planner.c
  8. 4
      src/backend/rewrite/rowsecurity.c
  9. 2
      src/backend/utils/adt/jsonb.c
  10. 4
      src/backend/utils/adt/ruleutils.c

@ -212,19 +212,19 @@ typedef struct
int64 statementId; /* Simple counter */ int64 statementId; /* Simple counter */
int64 substatementId; /* Simple counter */ int64 substatementId; /* Simple counter */
LogStmtLevel logStmtLevel; /* From GetCommandLogLevel when possible, */ LogStmtLevel logStmtLevel; /* From GetCommandLogLevel when possible,
/* generated when not. */ * generated when not. */
NodeTag commandTag; /* same here */ NodeTag commandTag; /* same here */
const char *command; /* same here */ const char *command; /* same here */
const char *objectType; /* From event trigger when possible */ const char *objectType; /* From event trigger when possible, generated
/* generated when not. */ * when not. */
char *objectName; /* Fully qualified object identification */ char *objectName; /* Fully qualified object identification */
const char *commandText; /* sourceText / queryString */ const char *commandText; /* sourceText / queryString */
ParamListInfo paramList; /* QueryDesc/ProcessUtility parameters */ ParamListInfo paramList; /* QueryDesc/ProcessUtility parameters */
bool granted; /* Audit role has object permissions? */ bool granted; /* Audit role has object permissions? */
bool logged; /* Track if we have logged this event, used */ bool logged; /* Track if we have logged this event, used
/* post-ProcessUtility to make sure we log */ * post-ProcessUtility to make sure we log */
bool statementLogged; /* Track if we have logged the statement */ bool statementLogged; /* Track if we have logged the statement */
} AuditEvent; } AuditEvent;
@ -467,7 +467,7 @@ log_audit_event(AuditEventStackItem *stackItem)
/* Classify the statement using log stmt level and the command tag */ /* Classify the statement using log stmt level and the command tag */
switch (stackItem->auditEvent.logStmtLevel) switch (stackItem->auditEvent.logStmtLevel)
{ {
/* All mods go in WRITE class, execpt EXECUTE */ /* All mods go in WRITE class, except EXECUTE */
case LOGSTMT_MOD: case LOGSTMT_MOD:
className = CLASS_WRITE; className = CLASS_WRITE;
class = LOG_WRITE; class = LOG_WRITE;
@ -553,13 +553,14 @@ log_audit_event(AuditEventStackItem *stackItem)
break; break;
} }
/* /*----------
* Only log the statement if: * Only log the statement if:
* *
* 1. If object was selected for audit logging (granted) 2. The statement * 1. If object was selected for audit logging (granted), or
* belongs to a class that is being logged * 2. The statement belongs to a class that is being logged
* *
* If neither of these is true, return. * If neither of these is true, return.
*----------
*/ */
if (!stackItem->auditEvent.granted && !(auditLogBitmap & class)) if (!stackItem->auditEvent.granted && !(auditLogBitmap & class))
return; return;
@ -979,57 +980,39 @@ log_select_dml(Oid auditOid, List *rangeTabls)
switch (rte->relkind) switch (rte->relkind)
{ {
case RELKIND_RELATION: case RELKIND_RELATION:
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType = OBJECT_TYPE_TABLE;
OBJECT_TYPE_TABLE;
break; break;
case RELKIND_INDEX: case RELKIND_INDEX:
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType = OBJECT_TYPE_INDEX;
OBJECT_TYPE_INDEX;
break; break;
case RELKIND_SEQUENCE: case RELKIND_SEQUENCE:
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType = OBJECT_TYPE_SEQUENCE;
OBJECT_TYPE_SEQUENCE;
break; break;
case RELKIND_TOASTVALUE: case RELKIND_TOASTVALUE:
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType = OBJECT_TYPE_TOASTVALUE;
OBJECT_TYPE_TOASTVALUE;
break; break;
case RELKIND_VIEW: case RELKIND_VIEW:
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType = OBJECT_TYPE_VIEW;
OBJECT_TYPE_VIEW;
break; break;
case RELKIND_COMPOSITE_TYPE: case RELKIND_COMPOSITE_TYPE:
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType = OBJECT_TYPE_COMPOSITE_TYPE;
OBJECT_TYPE_COMPOSITE_TYPE;
break; break;
case RELKIND_FOREIGN_TABLE: case RELKIND_FOREIGN_TABLE:
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType = OBJECT_TYPE_FOREIGN_TABLE;
OBJECT_TYPE_FOREIGN_TABLE;
break; break;
case RELKIND_MATVIEW: case RELKIND_MATVIEW:
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType = OBJECT_TYPE_MATVIEW;
OBJECT_TYPE_MATVIEW;
break; break;
default: default:
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType = OBJECT_TYPE_UNKNOWN;
OBJECT_TYPE_UNKNOWN;
break; break;
} }
@ -1043,9 +1026,7 @@ log_select_dml(Oid auditOid, List *rangeTabls)
/* Perform object auditing only if the audit role is valid */ /* Perform object auditing only if the audit role is valid */
if (auditOid != InvalidOid) if (auditOid != InvalidOid)
{ {
AclMode auditPerms = AclMode auditPerms = (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) & rte->requiredPerms;
(ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
rte->requiredPerms;
/* /*
* If any of the required permissions for the relation are granted * If any of the required permissions for the relation are granted
@ -1166,7 +1147,6 @@ log_function_execute(Oid objectId)
stackItem->auditEvent.commandTag = T_DoStmt; stackItem->auditEvent.commandTag = T_DoStmt;
stackItem->auditEvent.command = COMMAND_EXECUTE; stackItem->auditEvent.command = COMMAND_EXECUTE;
stackItem->auditEvent.objectType = OBJECT_TYPE_FUNCTION; stackItem->auditEvent.objectType = OBJECT_TYPE_FUNCTION;
stackItem->auditEvent.commandText = stackItem->next->auditEvent.commandText; stackItem->auditEvent.commandText = stackItem->next->auditEvent.commandText;
log_audit_event(stackItem); log_audit_event(stackItem);
@ -1459,8 +1439,7 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
/* Supply object name and type for audit event */ /* Supply object name and type for audit event */
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType =
SPI_getvalue(spiTuple, spiTupDesc, 1); SPI_getvalue(spiTuple, spiTupDesc, 1);
auditEventStack->auditEvent.objectName = auditEventStack->auditEvent.objectName =
SPI_getvalue(spiTuple, spiTupDesc, 2); SPI_getvalue(spiTuple, spiTupDesc, 2);
@ -1545,8 +1524,7 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
spiTuple = SPI_tuptable->vals[row]; spiTuple = SPI_tuptable->vals[row];
auditEventStack->auditEvent.objectType = auditEventStack->auditEvent.objectType =
SPI_getvalue(spiTuple, spiTupDesc, 1); SPI_getvalue(spiTuple, spiTupDesc, 1);
auditEventStack->auditEvent.objectName = auditEventStack->auditEvent.objectName =
SPI_getvalue(spiTuple, spiTupDesc, 2); SPI_getvalue(spiTuple, spiTupDesc, 2);
@ -1603,16 +1581,14 @@ check_pg_audit_log(char **newVal, void **extra, GucSource source)
foreach(lt, flagRawList) foreach(lt, flagRawList)
{ {
char *token = (char *) lfirst(lt);
bool subtract = false; bool subtract = false;
int class; int class;
/* Retrieve a token */
char *token = (char *) lfirst(lt);
/* If token is preceded by -, then the token is subtractive */ /* If token is preceded by -, then the token is subtractive */
if (strstr(token, "-") == token) if (token[0] == '-')
{ {
token = token + 1; token++;
subtract = true; subtract = true;
} }

@ -80,8 +80,7 @@ Datum
tsm_bernoulli_nextblock(PG_FUNCTION_ARGS) tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
{ {
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
BernoulliSamplerData *sampler = BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata;
(BernoulliSamplerData *) tsdesc->tsmdata;
/* /*
* Bernoulli sampling scans all blocks on the table and supports syncscan * Bernoulli sampling scans all blocks on the table and supports syncscan
@ -117,10 +116,10 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
* tuples have same probability of being returned the visible and invisible * tuples have same probability of being returned the visible and invisible
* tuples will be returned in same ratio as they have in the actual table. * tuples will be returned in same ratio as they have in the actual table.
* This means that there is no skew towards either visible or invisible tuples * This means that there is no skew towards either visible or invisible tuples
* and the number returned visible tuples to from the executor node is the * and the number of visible tuples returned from the executor node should
* fraction of visible tuples which was specified in input. * match the fraction of visible tuples which was specified by user.
* *
* This is faster than doing the coinflip in the examinetuple because we don't * This is faster than doing the coinflip in examinetuple because we don't
* have to do visibility checks on uninteresting tuples. * have to do visibility checks on uninteresting tuples.
* *
* If we reach end of the block return InvalidOffsetNumber which tells * If we reach end of the block return InvalidOffsetNumber which tells
@ -131,8 +130,7 @@ tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
{ {
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
OffsetNumber maxoffset = PG_GETARG_UINT16(2); OffsetNumber maxoffset = PG_GETARG_UINT16(2);
BernoulliSamplerData *sampler = BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata;
(BernoulliSamplerData *) tsdesc->tsmdata;
OffsetNumber tupoffset = sampler->lt; OffsetNumber tupoffset = sampler->lt;
float4 probability = sampler->probability; float4 probability = sampler->probability;
@ -185,8 +183,7 @@ Datum
tsm_bernoulli_reset(PG_FUNCTION_ARGS) tsm_bernoulli_reset(PG_FUNCTION_ARGS)
{ {
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
BernoulliSamplerData *sampler = BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata;
(BernoulliSamplerData *) tsdesc->tsmdata;
sampler->blockno = InvalidBlockNumber; sampler->blockno = InvalidBlockNumber;
sampler->lt = InvalidOffsetNumber; sampler->lt = InvalidOffsetNumber;

@ -78,9 +78,12 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
fcinfo.argnull[0] = false; fcinfo.argnull[0] = false;
/* /*
* Second arg for init function is always REPEATABLE When * Second arg for init function is always REPEATABLE.
* tablesample->repeatable is NULL then REPEATABLE clause was not *
* specified. When specified, the expression cannot evaluate to NULL. * If tablesample->repeatable is NULL then REPEATABLE clause was not
* specified, and we insert a random value as default.
*
* When specified, the expression cannot evaluate to NULL.
*/ */
if (tablesample->repeatable) if (tablesample->repeatable)
{ {

@ -645,10 +645,12 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
* overall targetlist's econtext. GroupingFunc arguments are never * overall targetlist's econtext. GroupingFunc arguments are never
* evaluated at all. * evaluated at all.
*/ */
if (IsA(node, Aggref) ||IsA(node, GroupingFunc)) if (IsA(node, Aggref))
return false; return false;
if (IsA(node, WindowFunc)) if (IsA(node, WindowFunc))
return false; return false;
if (IsA(node, GroupingFunc))
return false;
return expression_tree_walker(node, get_last_attnums, return expression_tree_walker(node, get_last_attnums,
(void *) projInfo); (void *) projInfo);
} }

@ -1519,8 +1519,9 @@ agg_retrieve_direct(AggState *aggstate)
/* /*
* get state info from node * get state info from node
* *
* econtext is the per-output-tuple expression context tmpcontext is the * econtext is the per-output-tuple expression context
* per-input-tuple expression context *
* tmpcontext is the per-input-tuple expression context
*/ */
econtext = aggstate->ss.ps.ps_ExprContext; econtext = aggstate->ss.ps.ps_ExprContext;
tmpcontext = aggstate->tmpcontext; tmpcontext = aggstate->tmpcontext;
@ -1609,7 +1610,7 @@ agg_retrieve_direct(AggState *aggstate)
else else
nextSetSize = 0; nextSetSize = 0;
/*- /*----------
* If a subgroup for the current grouping set is present, project it. * If a subgroup for the current grouping set is present, project it.
* *
* We have a new group if: * We have a new group if:
@ -1624,6 +1625,7 @@ agg_retrieve_direct(AggState *aggstate)
* AND * AND
* - the previous and pending rows differ on the grouping columns * - the previous and pending rows differ on the grouping columns
* of the next grouping set * of the next grouping set
*----------
*/ */
if (aggstate->input_done || if (aggstate->input_done ||
(node->aggstrategy == AGG_SORTED && (node->aggstrategy == AGG_SORTED &&

@ -527,8 +527,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
* Buckets are simple pointers to hashjoin tuples, while tupsize * Buckets are simple pointers to hashjoin tuples, while tupsize
* includes the pointer, hash code, and MinimalTupleData. So buckets * includes the pointer, hash code, and MinimalTupleData. So buckets
* should never really exceed 25% of work_mem (even for * should never really exceed 25% of work_mem (even for
* NTUP_PER_BUCKET=1); except maybe * for work_mem values that are not * NTUP_PER_BUCKET=1); except maybe for work_mem values that are not
* 2^N bytes, where we might get more * because of doubling. So let's * 2^N bytes, where we might get more because of doubling. So let's
* look for 50% here. * look for 50% here.
*/ */
Assert(bucket_bytes <= hash_table_bytes / 2); Assert(bucket_bytes <= hash_table_bytes / 2);
@ -691,9 +691,9 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
if (batchno == curbatch) if (batchno == curbatch)
{ {
/* keep tuple in memory - copy it into the new chunk */ /* keep tuple in memory - copy it into the new chunk */
HashJoinTuple copyTuple = HashJoinTuple copyTuple;
(HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
memcpy(copyTuple, hashTuple, hashTupleSize); memcpy(copyTuple, hashTuple, hashTupleSize);
/* and add it back to the appropriate bucket */ /* and add it back to the appropriate bucket */

@ -1918,10 +1918,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* whether HAVING succeeds. Furthermore, there cannot be any * whether HAVING succeeds. Furthermore, there cannot be any
* variables in either HAVING or the targetlist, so we * variables in either HAVING or the targetlist, so we
* actually do not need the FROM table at all! We can just * actually do not need the FROM table at all! We can just
* throw away the plan-so-far and generate a Result node. * throw away the plan-so-far and generate a Result node. This
* This is a sufficiently unusual corner case that it's not * is a sufficiently unusual corner case that it's not worth
* worth contorting the structure of this routine to avoid * contorting the structure of this routine to avoid having to
* having to generate the plan in the first place. * generate the plan in the first place.
*/ */
result_plan = (Plan *) make_result(root, result_plan = (Plan *) make_result(root,
tlist, tlist,
@ -3157,22 +3157,23 @@ extract_rollup_sets(List *groupingSets)
if (!lc1) if (!lc1)
return list_make1(groupingSets); return list_make1(groupingSets);
/* /*----------
* We don't strictly need to remove duplicate sets here, but if we don't, * We don't strictly need to remove duplicate sets here, but if we don't,
* they tend to become scattered through the result, which is a bit * they tend to become scattered through the result, which is a bit
* confusing (and irritating if we ever decide to optimize them out). So * confusing (and irritating if we ever decide to optimize them out).
* we remove them here and add them back after. * So we remove them here and add them back after.
* *
* For each non-duplicate set, we fill in the following: * For each non-duplicate set, we fill in the following:
* *
* orig_sets[i] = list of the original set lists set_masks[i] = bitmapset * orig_sets[i] = list of the original set lists
* for testing inclusion adjacency[i] = array [n, v1, v2, ... vn] of * set_masks[i] = bitmapset for testing inclusion
* adjacency indices * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
* *
* chains[i] will be the result group this set is assigned to. * chains[i] will be the result group this set is assigned to.
* *
* We index all of these from 1 rather than 0 because it is convenient to * We index all of these from 1 rather than 0 because it is convenient
* leave 0 free for the NIL node in the graph algorithm. * to leave 0 free for the NIL node in the graph algorithm.
*----------
*/ */
orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *)); orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *)); set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));

@ -596,8 +596,8 @@ process_policies(Query *root, List *policies, int rt_index, Expr **qual_eval,
*qual_eval = (Expr *) linitial(quals); *qual_eval = (Expr *) linitial(quals);
/* /*
* Similairly, if more than one WITH CHECK qual is returned, then they * Similarly, if more than one WITH CHECK qual is returned, then they need
* need to be combined together. * to be combined together.
* *
* with_check_quals is allowed to be NIL here since this might not be the * with_check_quals is allowed to be NIL here since this might not be the
* resultRelation (see above). * resultRelation (see above).

@ -584,7 +584,7 @@ add_indent(StringInfo out, bool indent, int level)
* *
* Given the datatype OID, return its JsonbTypeCategory, as well as the type's * Given the datatype OID, return its JsonbTypeCategory, as well as the type's
* output function OID. If the returned category is JSONBTYPE_JSONCAST, * output function OID. If the returned category is JSONBTYPE_JSONCAST,
* we return the OID of the relevant cast function instead. * we return the OID of the relevant cast function instead.
*/ */
static void static void
jsonb_categorize_type(Oid typoid, jsonb_categorize_type(Oid typoid,

@ -106,8 +106,8 @@ typedef struct
int wrapColumn; /* max line length, or -1 for no limit */ int wrapColumn; /* max line length, or -1 for no limit */
int indentLevel; /* current indent level for prettyprint */ int indentLevel; /* current indent level for prettyprint */
bool varprefix; /* TRUE to print prefixes on Vars */ bool varprefix; /* TRUE to print prefixes on Vars */
ParseExprKind special_exprkind; /* set only for exprkinds needing */ ParseExprKind special_exprkind; /* set only for exprkinds needing
/* special handling */ * special handling */
} deparse_context; } deparse_context;
/* /*

Loading…
Cancel
Save