summaryrefslogtreecommitdiff
path: root/src/backend/commands
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/aggregatecmds.c24
-rw-r--r--src/backend/commands/analyze.c17
-rw-r--r--src/backend/commands/cluster.c6
-rw-r--r--src/backend/commands/comment.c57
-rw-r--r--src/backend/commands/copy.c181
-rw-r--r--src/backend/commands/dbcommands.c76
-rw-r--r--src/backend/commands/define.c9
-rw-r--r--src/backend/commands/explain.c10
-rw-r--r--src/backend/commands/functioncmds.c8
-rw-r--r--src/backend/commands/indexcmds.c56
-rw-r--r--src/backend/commands/opclasscmds.c20
-rw-r--r--src/backend/commands/operatorcmds.c18
-rw-r--r--src/backend/commands/portalcmds.c6
-rw-r--r--src/backend/commands/prepare.c40
-rw-r--r--src/backend/commands/proclang.c10
-rw-r--r--src/backend/commands/sequence.c16
-rw-r--r--src/backend/commands/tablecmds.c170
-rw-r--r--src/backend/commands/tablespace.c10
-rw-r--r--src/backend/commands/trigger.c67
-rw-r--r--src/backend/commands/typecmds.c18
-rw-r--r--src/backend/commands/user.c32
-rw-r--r--src/backend/commands/vacuum.c108
-rw-r--r--src/backend/commands/vacuumlazy.c31
-rw-r--r--src/backend/commands/variable.c4
-rw-r--r--src/backend/commands/view.c4
25 files changed, 506 insertions, 492 deletions
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index 7923199f63..e072a23f27 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.40 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.41 2006/10/04 00:29:50 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -58,7 +58,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
TypeName *transType = NULL;
char *initval = NULL;
Oid *aggArgTypes;
- int numArgs;
+ int numArgs;
Oid transTypeId;
ListCell *pl;
@@ -122,8 +122,8 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
if (oldstyle)
{
/*
- * Old style: use basetype parameter. This supports aggregates
- * of zero or one input, with input type ANY meaning zero inputs.
+ * Old style: use basetype parameter. This supports aggregates of
+ * zero or one input, with input type ANY meaning zero inputs.
*
* Historically we allowed the command to look like basetype = 'ANY'
* so we must do a case-insensitive comparison for the name ANY. Ugh.
@@ -150,8 +150,8 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
/*
* New style: args is a list of TypeNames (possibly zero of 'em).
*/
- ListCell *lc;
- int i = 0;
+ ListCell *lc;
+ int i = 0;
if (baseType != NULL)
ereport(ERROR,
@@ -162,7 +162,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
aggArgTypes = (Oid *) palloc(sizeof(Oid) * numArgs);
foreach(lc, args)
{
- TypeName *curTypeName = (TypeName *) lfirst(lc);
+ TypeName *curTypeName = (TypeName *) lfirst(lc);
aggArgTypes[i++] = typenameTypeId(NULL, curTypeName);
}
@@ -171,9 +171,9 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
/*
* look up the aggregate's transtype.
*
- * transtype can't be a pseudo-type, since we need to be
- * able to store values of the transtype. However, we can allow
- * polymorphic transtype in some cases (AggregateCreate will check).
+ * transtype can't be a pseudo-type, since we need to be able to store
+ * values of the transtype. However, we can allow polymorphic transtype
+ * in some cases (AggregateCreate will check).
*/
transTypeId = typenameTypeId(NULL, transType);
if (get_typtype(transTypeId) == 'p' &&
@@ -189,7 +189,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
*/
AggregateCreate(aggName, /* aggregate name */
aggNamespace, /* namespace */
- aggArgTypes, /* input data type(s) */
+ aggArgTypes, /* input data type(s) */
numArgs,
transfuncName, /* step function name */
finalfuncName, /* final function name */
@@ -289,7 +289,7 @@ RenameAggregate(List *name, List *args, const char *newname)
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes.values),
+ procForm->proargtypes.values),
get_namespace_name(namespaceOid))));
/* must be owner */
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index f9e41e3531..1ce768f046 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.98 2006/09/17 22:50:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.99 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -129,12 +129,11 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
CHECK_FOR_INTERRUPTS();
/*
- * Open the relation, getting ShareUpdateExclusiveLock to ensure that
- * two ANALYZEs don't run on it concurrently. (This also locks out
- * a concurrent VACUUM, which doesn't matter much at the moment but
- * might matter if we ever try to accumulate stats on dead tuples.)
- * If the rel has been dropped since we last saw it, we don't need
- * to process it.
+ * Open the relation, getting ShareUpdateExclusiveLock to ensure that two
+ * ANALYZEs don't run on it concurrently. (This also locks out a
+ * concurrent VACUUM, which doesn't matter much at the moment but might
+ * matter if we ever try to accumulate stats on dead tuples.) If the rel
+ * has been dropped since we last saw it, we don't need to process it.
*/
onerel = try_relation_open(relid, ShareUpdateExclusiveLock);
if (!onerel)
@@ -216,8 +215,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
if (i == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- col, RelationGetRelationName(onerel))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ col, RelationGetRelationName(onerel))));
vacattrstats[tcnt] = examine_attribute(onerel, i);
if (vacattrstats[tcnt] != NULL)
tcnt++;
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index bd2301dd62..665c66cad5 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.153 2006/08/18 16:09:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.154 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -288,7 +288,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(rvtc->indexOid),
0, 0, 0);
- if (!HeapTupleIsValid(tuple)) /* probably can't happen */
+ if (!HeapTupleIsValid(tuple)) /* probably can't happen */
{
relation_close(OldHeap, AccessExclusiveLock);
return;
@@ -350,7 +350,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
errmsg("cannot cluster on partial index \"%s\"",
RelationGetRelationName(OldIndex))));
- if (!OldIndex->rd_am->amclusterable)
+ if (!OldIndex->rd_am->amclusterable)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster on index \"%s\" because access method does not support clustering",
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index f54243a495..df4416d37c 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.91 2006/09/05 21:08:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.92 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -261,11 +261,12 @@ CreateComments(Oid oid, Oid classoid, int32 subid, char *comment)
* If the comment given is null or an empty string, instead delete any
* existing comment for the specified key.
*/
-void CreateSharedComments(Oid oid, Oid classoid, char *comment)
+void
+CreateSharedComments(Oid oid, Oid classoid, char *comment)
{
Relation shdescription;
- ScanKeyData skey[2];
- SysScanDesc sd;
+ ScanKeyData skey[2];
+ SysScanDesc sd;
HeapTuple oldtuple;
HeapTuple newtuple = NULL;
Datum values[Natts_pg_shdescription];
@@ -294,18 +295,18 @@ void CreateSharedComments(Oid oid, Oid classoid, char *comment)
/* Use the index to search for a matching old tuple */
ScanKeyInit(&skey[0],
- Anum_pg_shdescription_objoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(oid));
+ Anum_pg_shdescription_objoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(oid));
ScanKeyInit(&skey[1],
- Anum_pg_shdescription_classoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(classoid));
+ Anum_pg_shdescription_classoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(classoid));
shdescription = heap_open(SharedDescriptionRelationId, RowExclusiveLock);
sd = systable_beginscan(shdescription, SharedDescriptionObjIndexId, true,
- SnapshotNow, 2, skey);
+ SnapshotNow, 2, skey);
while ((oldtuple = systable_getnext(sd)) != NULL)
{
@@ -316,11 +317,11 @@ void CreateSharedComments(Oid oid, Oid classoid, char *comment)
else
{
newtuple = heap_modifytuple(oldtuple, RelationGetDescr(shdescription),
- values, nulls, replaces);
+ values, nulls, replaces);
simple_heap_update(shdescription, &oldtuple->t_self, newtuple);
}
- break; /* Assume there can be only one match */
+ break; /* Assume there can be only one match */
}
systable_endscan(sd);
@@ -330,7 +331,7 @@ void CreateSharedComments(Oid oid, Oid classoid, char *comment)
if (newtuple == NULL && comment != NULL)
{
newtuple = heap_formtuple(RelationGetDescr(shdescription),
- values, nulls);
+ values, nulls);
simple_heap_insert(shdescription, newtuple);
}
@@ -405,25 +406,25 @@ void
DeleteSharedComments(Oid oid, Oid classoid)
{
Relation shdescription;
- ScanKeyData skey[2];
- SysScanDesc sd;
+ ScanKeyData skey[2];
+ SysScanDesc sd;
HeapTuple oldtuple;
/* Use the index to search for all matching old tuples */
ScanKeyInit(&skey[0],
- Anum_pg_shdescription_objoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(oid));
+ Anum_pg_shdescription_objoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(oid));
ScanKeyInit(&skey[1],
- Anum_pg_shdescription_classoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(classoid));
+ Anum_pg_shdescription_classoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(classoid));
shdescription = heap_open(SharedDescriptionRelationId, RowExclusiveLock);
sd = systable_beginscan(shdescription, SharedDescriptionObjIndexId, true,
- SnapshotNow, 2, skey);
+ SnapshotNow, 2, skey);
while ((oldtuple = systable_getnext(sd)) != NULL)
simple_heap_delete(shdescription, &oldtuple->t_self);
@@ -620,8 +621,8 @@ CommentDatabase(List *qualname, char *comment)
static void
CommentTablespace(List *qualname, char *comment)
{
- char *tablespace;
- Oid oid;
+ char *tablespace;
+ Oid oid;
if (list_length(qualname) != 1)
ereport(ERROR,
@@ -657,8 +658,8 @@ CommentTablespace(List *qualname, char *comment)
static void
CommentRole(List *qualname, char *comment)
{
- char *role;
- Oid oid;
+ char *role;
+ Oid oid;
if (list_length(qualname) != 1)
ereport(ERROR,
@@ -672,7 +673,7 @@ CommentRole(List *qualname, char *comment)
if (!has_privs_of_role(GetUserId(), oid))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be member of role \"%s\" to comment upon it", role)));
+ errmsg("must be member of role \"%s\" to comment upon it", role)));
/* Call CreateSharedComments() to create/drop the comments */
CreateSharedComments(oid, AuthIdRelationId, comment);
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 23d9742234..0cbfc237ff 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.271 2006/08/31 03:17:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.272 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -88,8 +88,8 @@ typedef struct CopyStateData
/* low-level state data */
CopyDest copy_dest; /* type of copy source/destination */
FILE *copy_file; /* used if copy_dest == COPY_FILE */
- StringInfo fe_msgbuf; /* used for all dests during COPY TO, only
- * for dest == COPY_NEW_FE in COPY FROM */
+ StringInfo fe_msgbuf; /* used for all dests during COPY TO, only for
+ * dest == COPY_NEW_FE in COPY FROM */
bool fe_copy; /* true for all FE copy dests */
bool fe_eof; /* true if detected end of copy data */
EolType eol_type; /* EOL type of input */
@@ -109,7 +109,7 @@ typedef struct CopyStateData
bool header_line; /* CSV header line? */
char *null_print; /* NULL marker string (server encoding!) */
int null_print_len; /* length of same */
- char *null_print_client; /* same converted to client encoding */
+ char *null_print_client; /* same converted to client encoding */
char *delim; /* column delimiter (must be 1 byte) */
char *quote; /* CSV quote char (must be 1 byte) */
char *escape; /* CSV escape char (must be 1 byte) */
@@ -125,8 +125,8 @@ typedef struct CopyStateData
/*
* Working state for COPY TO
*/
- FmgrInfo *out_functions; /* lookup info for output functions */
- MemoryContext rowcontext; /* per-row evaluation context */
+ FmgrInfo *out_functions; /* lookup info for output functions */
+ MemoryContext rowcontext; /* per-row evaluation context */
/*
* These variables are used to reduce overhead in textual COPY FROM.
@@ -177,7 +177,7 @@ typedef struct
* function call overhead in tight COPY loops.
*
* We must use "if (1)" because "do {} while(0)" overrides the continue/break
- * processing. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
+ * processing. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
*/
/*
@@ -243,7 +243,7 @@ static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0";
static void DoCopyTo(CopyState cstate);
static void CopyTo(CopyState cstate);
static void CopyOneRowTo(CopyState cstate, Oid tupleOid,
- Datum *values, bool *nulls);
+ Datum *values, bool *nulls);
static void CopyFrom(CopyState cstate);
static bool CopyReadLine(CopyState cstate);
static bool CopyReadLineText(CopyState cstate);
@@ -259,7 +259,7 @@ static void CopyAttributeOutText(CopyState cstate, char *string);
static void CopyAttributeOutCSV(CopyState cstate, char *string,
bool use_quote, bool single_attr);
static List *CopyGetAttnums(TupleDesc tupDesc, Relation rel,
- List *attnamelist);
+ List *attnamelist);
static char *limit_printout_length(const char *str);
/* Low-level communications functions */
@@ -863,10 +863,10 @@ DoCopy(const CopyStmt *stmt)
/* Disallow end-of-line characters */
if (strchr(cstate->delim, '\r') != NULL ||
- strchr(cstate->delim, '\n') != NULL)
+ strchr(cstate->delim, '\n') != NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("COPY delimiter cannot be newline or carriage return")));
+ errmsg("COPY delimiter cannot be newline or carriage return")));
if (strchr(cstate->null_print, '\r') != NULL ||
strchr(cstate->null_print, '\n') != NULL)
@@ -956,7 +956,7 @@ DoCopy(const CopyStmt *stmt)
/* Open and lock the relation, using the appropriate lock type. */
cstate->rel = heap_openrv(stmt->relation,
- (is_from ? RowExclusiveLock : AccessShareLock));
+ (is_from ? RowExclusiveLock : AccessShareLock));
/* Check relation permissions. */
aclresult = pg_class_aclcheck(RelationGetRelid(cstate->rel),
@@ -1009,7 +1009,7 @@ DoCopy(const CopyStmt *stmt)
* rewriting or planning. Do that now.
*
* Because the planner is not cool about not scribbling on its input,
- * we make a preliminary copy of the source querytree. This prevents
+ * we make a preliminary copy of the source querytree. This prevents
* problems in the case that the COPY is in a portal or plpgsql
* function and is executed repeatedly. (See also the same hack in
* EXPLAIN, DECLARE CURSOR and PREPARE.) XXX the planner really
@@ -1124,8 +1124,8 @@ DoCopy(const CopyStmt *stmt)
/*
* Set up encoding conversion info. Even if the client and server
- * encodings are the same, we must apply pg_client_to_server() to
- * validate data in multibyte encodings.
+ * encodings are the same, we must apply pg_client_to_server() to validate
+ * data in multibyte encodings.
*/
cstate->client_encoding = pg_get_client_encoding();
cstate->need_transcoding =
@@ -1139,7 +1139,8 @@ DoCopy(const CopyStmt *stmt)
if (is_from) /* copy from file to database */
CopyFrom(cstate);
- else /* copy from database to file */
+ else
+ /* copy from database to file */
DoCopyTo(cstate);
/*
@@ -1210,12 +1211,12 @@ DoCopyTo(CopyState cstate)
}
else
{
- mode_t oumask; /* Pre-existing umask value */
+ mode_t oumask; /* Pre-existing umask value */
struct stat st;
/*
- * Prevent write to relative path ... too easy to shoot oneself in
- * the foot by overwriting a database file ...
+ * Prevent write to relative path ... too easy to shoot oneself in the
+ * foot by overwriting a database file ...
*/
if (!is_absolute_path(cstate->filename))
ereport(ERROR,
@@ -1351,7 +1352,7 @@ CopyTo(CopyState cstate)
*/
if (cstate->need_transcoding)
cstate->null_print_client = pg_server_to_client(cstate->null_print,
- cstate->null_print_len);
+ cstate->null_print_len);
/* if a header has been requested send the line */
if (cstate->header_line)
@@ -1508,7 +1509,7 @@ CopyOneRowTo(CopyState cstate, Oid tupleOid, Datum *values, bool *nulls)
CopySendEndOfRow(cstate);
MemoryContextSwitchTo(oldcontext);
-
+
cstate->processed++;
}
@@ -2237,6 +2238,7 @@ CopyReadLineText(CopyState cstate)
bool hit_eof = false;
bool result = false;
char mblen_str[2];
+
/* CSV variables */
bool first_char_in_line = true;
bool in_quote = false,
@@ -2268,10 +2270,10 @@ CopyReadLineText(CopyState cstate)
* assumed the same in frontend and backend encodings.
*
* For speed, we try to move data from raw_buf to line_buf in chunks
- * rather than one character at a time. raw_buf_ptr points to the next
+ * rather than one character at a time. raw_buf_ptr points to the next
* character to examine; any characters from raw_buf_index to raw_buf_ptr
- * have been determined to be part of the line, but not yet transferred
- * to line_buf.
+ * have been determined to be part of the line, but not yet transferred to
+ * line_buf.
*
* For a little extra speed within the loop, we copy raw_buf and
* raw_buf_len into local variables.
@@ -2286,14 +2288,14 @@ CopyReadLineText(CopyState cstate)
char c;
/*
- * Load more data if needed. Ideally we would just force four bytes
- * of read-ahead and avoid the many calls to
- * IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE
- * protocol does not allow us to read too far ahead or we might
- * read into the next data, so we read-ahead only as far we know
- * we can. One optimization would be to read-ahead four byte here
- * if cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
- * considering the size of the buffer.
+ * Load more data if needed. Ideally we would just force four bytes
+ * of read-ahead and avoid the many calls to
+ * IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE protocol
+ * does not allow us to read too far ahead or we might read into the
+ * next data, so we read-ahead only as far we know we can. One
+ * optimization would be to read-ahead four byte here if
+ * cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
+ * considering the size of the buffer.
*/
if (raw_buf_ptr >= copy_buf_len || need_data)
{
@@ -2328,12 +2330,12 @@ CopyReadLineText(CopyState cstate)
{
/*
* If character is '\\' or '\r', we may need to look ahead below.
- * Force fetch of the next character if we don't already have it. We
- * need to do this before changing CSV state, in case one of these
- * characters is also the quote or escape character.
+ * Force fetch of the next character if we don't already have it.
+ * We need to do this before changing CSV state, in case one of
+ * these characters is also the quote or escape character.
*
- * Note: old-protocol does not like forced prefetch, but it's OK here
- * since we cannot validly be at EOF.
+ * Note: old-protocol does not like forced prefetch, but it's OK
+ * here since we cannot validly be at EOF.
*/
if (c == '\\' || c == '\r')
{
@@ -2341,12 +2343,12 @@ CopyReadLineText(CopyState cstate)
}
/*
- * Dealing with quotes and escapes here is mildly tricky. If the quote
- * char is also the escape char, there's no problem - we just use the
- * char as a toggle. If they are different, we need to ensure that we
- * only take account of an escape inside a quoted field and
- * immediately preceding a quote char, and not the second in a
- * escape-escape sequence.
+ * Dealing with quotes and escapes here is mildly tricky. If the
+ * quote char is also the escape char, there's no problem - we
+ * just use the char as a toggle. If they are different, we need
+ * to ensure that we only take account of an escape inside a
+ * quoted field and immediately preceding a quote char, and not
+ * the second in a escape-escape sequence.
*/
if (in_quote && c == escapec)
last_was_esc = !last_was_esc;
@@ -2357,9 +2359,9 @@ CopyReadLineText(CopyState cstate)
/*
* Updating the line count for embedded CR and/or LF chars is
- * necessarily a little fragile - this test is probably about the best
- * we can do. (XXX it's arguable whether we should do this at all ---
- * is cur_lineno a physical or logical count?)
+ * necessarily a little fragile - this test is probably about the
+ * best we can do. (XXX it's arguable whether we should do this
+ * at all --- is cur_lineno a physical or logical count?)
*/
if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r'))
cstate->cur_lineno++;
@@ -2394,12 +2396,13 @@ CopyReadLineText(CopyState cstate)
if (cstate->eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg(!cstate->csv_mode ?
+ errmsg(!cstate->csv_mode ?
"literal carriage return found in data" :
- "unquoted carriage return found in data"),
+ "unquoted carriage return found in data"),
errhint(!cstate->csv_mode ?
- "Use \"\\r\" to represent carriage return." :
- "Use quoted CSV field to represent carriage return.")));
+ "Use \"\\r\" to represent carriage return." :
+ "Use quoted CSV field to represent carriage return.")));
+
/*
* if we got here, it is the first line and we didn't find
* \n, so don't consume the peeked character
@@ -2410,12 +2413,12 @@ CopyReadLineText(CopyState cstate)
else if (cstate->eol_type == EOL_NL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg(!cstate->csv_mode ?
+ errmsg(!cstate->csv_mode ?
"literal carriage return found in data" :
"unquoted carriage return found in data"),
errhint(!cstate->csv_mode ?
- "Use \"\\r\" to represent carriage return." :
- "Use quoted CSV field to represent carriage return.")));
+ "Use \"\\r\" to represent carriage return." :
+ "Use quoted CSV field to represent carriage return.")));
/* If reach here, we have found the line terminator */
break;
}
@@ -2431,15 +2434,15 @@ CopyReadLineText(CopyState cstate)
"unquoted newline found in data"),
errhint(!cstate->csv_mode ?
"Use \"\\n\" to represent newline." :
- "Use quoted CSV field to represent newline.")));
+ "Use quoted CSV field to represent newline.")));
cstate->eol_type = EOL_NL; /* in case not set yet */
/* If reach here, we have found the line terminator */
break;
}
/*
- * In CSV mode, we only recognize \. alone on a line. This is
- * because \. is a valid CSV data value.
+ * In CSV mode, we only recognize \. alone on a line. This is because
+ * \. is a valid CSV data value.
*/
if (c == '\\' && (!cstate->csv_mode || first_char_in_line))
{
@@ -2529,23 +2532,24 @@ CopyReadLineText(CopyState cstate)
break;
}
else if (!cstate->csv_mode)
+
/*
- * If we are here, it means we found a backslash followed by
- * something other than a period. In non-CSV mode, anything
- * after a backslash is special, so we skip over that second
- * character too. If we didn't do that \\. would be
- * considered an eof-of copy, while in non-CVS mode it is a
- * literal backslash followed by a period. In CSV mode,
- * backslashes are not special, so we want to process the
- * character after the backslash just like a normal character,
- * so we don't increment in those cases.
+ * If we are here, it means we found a backslash followed by
+ * something other than a period. In non-CSV mode, anything
+ * after a backslash is special, so we skip over that second
+ * character too. If we didn't do that \\. would be
+ * considered an eof-of copy, while in non-CVS mode it is a
+ * literal backslash followed by a period. In CSV mode,
+ * backslashes are not special, so we want to process the
+ * character after the backslash just like a normal character,
+ * so we don't increment in those cases.
*/
raw_buf_ptr++;
}
/*
- * This label is for CSV cases where \. appears at the start of a line,
- * but there is more text after it, meaning it was a data value.
+ * This label is for CSV cases where \. appears at the start of a
+ * line, but there is more text after it, meaning it was a data value.
* We are more strict for \. in CSV mode because \. could be a data
* value, while in non-CSV mode, \. cannot be a data value.
*/
@@ -2554,9 +2558,9 @@ not_end_of_copy:
/*
* Process all bytes of a multi-byte character as a group.
*
- * We only support multi-byte sequences where the first byte
- * has the high-bit set, so as an optimization we can avoid
- * this block entirely if it is not set.
+ * We only support multi-byte sequences where the first byte has the
+ * high-bit set, so as an optimization we can avoid this block
+ * entirely if it is not set.
*/
if (cstate->encoding_embeds_ascii && IS_HIGHBIT_SET(c))
{
@@ -3040,10 +3044,10 @@ CopyAttributeOutText(CopyState cstate, char *string)
/*
* We have to grovel through the string searching for control characters
* and instances of the delimiter character. In most cases, though, these
- * are infrequent. To avoid overhead from calling CopySendData once per
- * character, we dump out all characters between replaceable characters
- * in a single call. The loop invariant is that the data from "start"
- * to "ptr" can be sent literally, but hasn't yet been.
+ * are infrequent. To avoid overhead from calling CopySendData once per
+ * character, we dump out all characters between replaceable characters in
+ * a single call. The loop invariant is that the data from "start" to
+ * "ptr" can be sent literally, but hasn't yet been.
*/
start = ptr;
while ((c = *ptr) != '\0')
@@ -3090,7 +3094,7 @@ CopyAttributeOutText(CopyState cstate, char *string)
{
DUMPSOFAR();
CopySendChar(cstate, '\\');
- start = ptr; /* we include char in next run */
+ start = ptr; /* we include char in next run */
}
/*
@@ -3139,14 +3143,13 @@ CopyAttributeOutCSV(CopyState cstate, char *string,
if (!use_quote)
{
/*
- * Because '\.' can be a data value, quote it if it appears
- * alone on a line so it is not interpreted as the end-of-data
- * marker.
+ * Because '\.' can be a data value, quote it if it appears alone on a
+ * line so it is not interpreted as the end-of-data marker.
*/
if (single_attr && strcmp(ptr, "\\.") == 0)
- use_quote = true;
- else
- {
+ use_quote = true;
+ else
+ {
char *tptr = ptr;
while ((c = *tptr) != '\0')
@@ -3251,14 +3254,14 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist)
{
if (rel != NULL)
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- name, RelationGetRelationName(rel))));
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ name, RelationGetRelationName(rel))));
else
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" does not exist",
- name)));
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" does not exist",
+ name)));
}
/* Check for duplicates */
if (list_member_int(attnums, attnum))
@@ -3289,7 +3292,7 @@ copy_dest_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
static void
copy_dest_receive(TupleTableSlot *slot, DestReceiver *self)
{
- DR_copy *myState = (DR_copy *) self;
+ DR_copy *myState = (DR_copy *) self;
CopyState cstate = myState->cstate;
/* Make sure the tuple is fully deconstructed */
@@ -3323,7 +3326,7 @@ copy_dest_destroy(DestReceiver *self)
DestReceiver *
CreateCopyDestReceiver(void)
{
- DR_copy *self = (DR_copy *) palloc(sizeof(DR_copy));
+ DR_copy *self = (DR_copy *) palloc(sizeof(DR_copy));
self->pub.receiveSlot = copy_dest_receive;
self->pub.rStartup = copy_dest_startup;
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 853c7e6626..d7d4cdbfbc 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.184 2006/07/14 14:52:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.185 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -326,9 +326,9 @@ createdb(const CreatedbStmt *stmt)
}
/*
- * Check for db name conflict. This is just to give a more friendly
- * error message than "unique index violation". There's a race condition
- * but we're willing to accept the less friendly message in that case.
+ * Check for db name conflict. This is just to give a more friendly error
+ * message than "unique index violation". There's a race condition but
+ * we're willing to accept the less friendly message in that case.
*/
if (OidIsValid(get_database_oid(dbname)))
ereport(ERROR,
@@ -336,10 +336,10 @@ createdb(const CreatedbStmt *stmt)
errmsg("database \"%s\" already exists", dbname)));
/*
- * Insert a new tuple into pg_database. This establishes our ownership
- * of the new database name (anyone else trying to insert the same name
- * will block on the unique index, and fail after we commit). It also
- * assigns the OID that the new database will have.
+ * Insert a new tuple into pg_database. This establishes our ownership of
+ * the new database name (anyone else trying to insert the same name will
+ * block on the unique index, and fail after we commit). It also assigns
+ * the OID that the new database will have.
*/
pg_database_rel = heap_open(DatabaseRelationId, RowExclusiveLock);
@@ -361,9 +361,9 @@ createdb(const CreatedbStmt *stmt)
/*
* We deliberately set datconfig and datacl to defaults (NULL), rather
- * than copying them from the template database. Copying datacl would
- * be a bad idea when the owner is not the same as the template's
- * owner. It's more debatable whether datconfig should be copied.
+ * than copying them from the template database. Copying datacl would be
+ * a bad idea when the owner is not the same as the template's owner. It's
+ * more debatable whether datconfig should be copied.
*/
new_record_nulls[Anum_pg_database_datconfig - 1] = 'n';
new_record_nulls[Anum_pg_database_datacl - 1] = 'n';
@@ -497,8 +497,8 @@ createdb(const CreatedbStmt *stmt)
RequestCheckpoint(true, false);
/*
- * Close pg_database, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_database, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_database_rel, NoLock);
@@ -543,8 +543,8 @@ dropdb(const char *dbname, bool missing_ok)
errmsg("cannot drop the currently open database")));
/*
- * Look up the target database's OID, and get exclusive lock on it.
- * We need this to ensure that no new backend starts up in the target
+ * Look up the target database's OID, and get exclusive lock on it. We
+ * need this to ensure that no new backend starts up in the target
* database while we are deleting it (see postinit.c), and that no one is
* using it as a CREATE DATABASE template or trying to delete it for
* themselves.
@@ -589,8 +589,8 @@ dropdb(const char *dbname, bool missing_ok)
errmsg("cannot drop a template database")));
/*
- * Check for active backends in the target database. (Because we hold
- * the database lock, no new ones can start after this.)
+ * Check for active backends in the target database. (Because we hold the
+ * database lock, no new ones can start after this.)
*/
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
@@ -647,8 +647,8 @@ dropdb(const char *dbname, bool missing_ok)
remove_dbtablespaces(db_id);
/*
- * Close pg_database, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_database, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pgdbrel, NoLock);
@@ -670,8 +670,8 @@ RenameDatabase(const char *oldname, const char *newname)
Relation rel;
/*
- * Look up the target database's OID, and get exclusive lock on it.
- * We need this for the same reasons as DROP DATABASE.
+ * Look up the target database's OID, and get exclusive lock on it. We
+ * need this for the same reasons as DROP DATABASE.
*/
rel = heap_open(DatabaseRelationId, RowExclusiveLock);
@@ -693,8 +693,8 @@ RenameDatabase(const char *oldname, const char *newname)
errmsg("current database may not be renamed")));
/*
- * Make sure the database does not have active sessions. This is the
- * same concern as above, but applied to other sessions.
+ * Make sure the database does not have active sessions. This is the same
+ * concern as above, but applied to other sessions.
*/
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
@@ -730,8 +730,8 @@ RenameDatabase(const char *oldname, const char *newname)
CatalogUpdateIndexes(rel, newtup);
/*
- * Close pg_database, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_database, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(rel, NoLock);
@@ -1067,9 +1067,9 @@ get_db_info(const char *name, LOCKMODE lockmode,
relation = heap_open(DatabaseRelationId, AccessShareLock);
/*
- * Loop covers the rare case where the database is renamed before we
- * can lock it. We try again just in case we can find a new one of
- * the same name.
+ * Loop covers the rare case where the database is renamed before we can
+ * lock it. We try again just in case we can find a new one of the same
+ * name.
*/
for (;;)
{
@@ -1079,8 +1079,8 @@ get_db_info(const char *name, LOCKMODE lockmode,
Oid dbOid;
/*
- * there's no syscache for database-indexed-by-name,
- * so must do it the hard way
+ * there's no syscache for database-indexed-by-name, so must do it the
+ * hard way
*/
ScanKeyInit(&scanKey,
Anum_pg_database_datname,
@@ -1110,9 +1110,9 @@ get_db_info(const char *name, LOCKMODE lockmode,
LockSharedObject(DatabaseRelationId, dbOid, 0, lockmode);
/*
- * And now, re-fetch the tuple by OID. If it's still there and
- * still the same name, we win; else, drop the lock and loop
- * back to try again.
+ * And now, re-fetch the tuple by OID. If it's still there and still
+ * the same name, we win; else, drop the lock and loop back to try
+ * again.
*/
tuple = SearchSysCache(DATABASEOID,
ObjectIdGetDatum(dbOid),
@@ -1267,8 +1267,8 @@ get_database_oid(const char *dbname)
Oid oid;
/*
- * There's no syscache for pg_database indexed by name,
- * so we must look the hard way.
+ * There's no syscache for pg_database indexed by name, so we must look
+ * the hard way.
*/
pg_database = heap_open(DatabaseRelationId, AccessShareLock);
ScanKeyInit(&entry[0],
@@ -1399,15 +1399,15 @@ dbase_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_dbase_create_rec *xlrec = (xl_dbase_create_rec *) rec;
appendStringInfo(buf, "create db: copy dir %u/%u to %u/%u",
- xlrec->src_db_id, xlrec->src_tablespace_id,
- xlrec->db_id, xlrec->tablespace_id);
+ xlrec->src_db_id, xlrec->src_tablespace_id,
+ xlrec->db_id, xlrec->tablespace_id);
}
else if (info == XLOG_DBASE_DROP)
{
xl_dbase_drop_rec *xlrec = (xl_dbase_drop_rec *) rec;
appendStringInfo(buf, "drop db: dir %u/%u",
- xlrec->db_id, xlrec->tablespace_id);
+ xlrec->db_id, xlrec->tablespace_id);
}
else
appendStringInfo(buf, "UNKNOWN");
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index 149e1b6dae..697678c4aa 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.97 2006/07/03 22:45:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.98 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -131,6 +131,7 @@ defGetBoolean(DefElem *def)
*/
if (def->arg == NULL)
return true;
+
/*
* Allow 0, 1, "true", "false"
*/
@@ -150,7 +151,7 @@ defGetBoolean(DefElem *def)
break;
default:
{
- char *sval = defGetString(def);
+ char *sval = defGetString(def);
if (pg_strcasecmp(sval, "true") == 0)
return true;
@@ -310,9 +311,9 @@ defGetTypeLength(DefElem *def)
DefElem *
defWithOids(bool value)
{
- DefElem *f = makeNode(DefElem);
+ DefElem *f = makeNode(DefElem);
f->defname = "oids";
- f->arg = (Node *)makeInteger(value);
+ f->arg = (Node *) makeInteger(value);
return f;
}
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 48db000ea9..00ada3fc72 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.151 2006/09/06 20:40:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.152 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@ typedef struct ExplainState
} ExplainState;
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
- ParamListInfo params, TupOutputState *tstate);
+ ParamListInfo params, TupOutputState *tstate);
static double elapsed_time(instr_time *starttime);
static void explain_outNode(StringInfo str,
Plan *plan, PlanState *planstate,
@@ -760,7 +760,7 @@ explain_outNode(StringInfo str,
* The tidquals list has OR semantics, so be sure to show it
* as an OR condition.
*/
- List *tidquals = ((TidScan *) plan)->tidquals;
+ List *tidquals = ((TidScan *) plan)->tidquals;
if (list_length(tidquals) > 1)
tidquals = list_make1(make_orclause(tidquals));
@@ -928,8 +928,8 @@ explain_outNode(StringInfo str,
/*
* Ordinarily we don't pass down our own outer_plan value to our
* child nodes, but in an Append we must, since we might be
- * looking at an appendrel indexscan with outer references
- * from the member scans.
+ * looking at an appendrel indexscan with outer references from
+ * the member scans.
*/
explain_outNode(str, subnode,
appendstate->appendplans[j],
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 615d4c93b2..29954a27a8 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.78 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.79 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -686,7 +686,7 @@ RemoveFunction(RemoveFuncStmt *stmt)
* Find the function, do permissions and validity checks
*/
funcOid = LookupFuncNameTypeNames(functionName, argTypes, stmt->missing_ok);
- if (!OidIsValid(funcOid))
+ if (!OidIsValid(funcOid))
{
/* can only get here if stmt->missing_ok */
ereport(NOTICE,
@@ -1394,7 +1394,7 @@ DropCast(DropCastStmt *stmt)
0, 0);
if (!HeapTupleIsValid(tuple))
{
- if (! stmt->missing_ok)
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("cast from type %s to type %s does not exist",
@@ -1402,7 +1402,7 @@ DropCast(DropCastStmt *stmt)
TypeNameToString(stmt->targettype))));
else
ereport(NOTICE,
- (errmsg("cast from type %s to type %s does not exist ... skipping",
+ (errmsg("cast from type %s to type %s does not exist ... skipping",
TypeNameToString(stmt->sourcetype),
TypeNameToString(stmt->targettype))));
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 81246768bc..5f54f66f59 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.148 2006/08/27 19:14:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.149 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -125,8 +125,8 @@ DefineIndex(RangeVar *heapRelation,
LockRelId heaprelid;
LOCKTAG heaplocktag;
Snapshot snapshot;
- Relation pg_index;
- HeapTuple indexTuple;
+ Relation pg_index;
+ HeapTuple indexTuple;
Form_pg_index indexForm;
/*
@@ -450,18 +450,18 @@ DefineIndex(RangeVar *heapRelation,
* for an overview of how this works)
*
* We must commit our current transaction so that the index becomes
- * visible; then start another. Note that all the data structures
- * we just built are lost in the commit. The only data we keep past
- * here are the relation IDs.
+ * visible; then start another. Note that all the data structures we just
+ * built are lost in the commit. The only data we keep past here are the
+ * relation IDs.
*
* Before committing, get a session-level lock on the table, to ensure
- * that neither it nor the index can be dropped before we finish.
- * This cannot block, even if someone else is waiting for access, because
- * we already have the same lock within our transaction.
+ * that neither it nor the index can be dropped before we finish. This
+ * cannot block, even if someone else is waiting for access, because we
+ * already have the same lock within our transaction.
*
* Note: we don't currently bother with a session lock on the index,
- * because there are no operations that could change its state while
- * we hold lock on the parent table. This might need to change later.
+ * because there are no operations that could change its state while we
+ * hold lock on the parent table. This might need to change later.
*/
heaprelid = rel->rd_lockInfo.lockRelId;
LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
@@ -471,15 +471,15 @@ DefineIndex(RangeVar *heapRelation,
/*
* Now we must wait until no running transaction could have the table open
- * with the old list of indexes. To do this, inquire which xacts currently
- * would conflict with ShareLock on the table -- ie, which ones have
- * a lock that permits writing the table. Then wait for each of these
- * xacts to commit or abort. Note we do not need to worry about xacts
- * that open the table for writing after this point; they will see the
- * new index when they open it.
+ * with the old list of indexes. To do this, inquire which xacts
+ * currently would conflict with ShareLock on the table -- ie, which ones
+ * have a lock that permits writing the table. Then wait for each of
+ * these xacts to commit or abort. Note we do not need to worry about
+ * xacts that open the table for writing after this point; they will see
+ * the new index when they open it.
*
- * Note: GetLockConflicts() never reports our own xid,
- * hence we need not check for that.
+ * Note: GetLockConflicts() never reports our own xid, hence we need not
+ * check for that.
*/
SET_LOCKTAG_RELATION(heaplocktag, heaprelid.dbId, heaprelid.relId);
old_xact_list = GetLockConflicts(&heaplocktag, ShareLock);
@@ -493,12 +493,12 @@ DefineIndex(RangeVar *heapRelation,
/*
* Now take the "reference snapshot" that will be used by validate_index()
- * to filter candidate tuples. All other transactions running at this
+ * to filter candidate tuples. All other transactions running at this
* time will have to be out-waited before we can commit, because we can't
* guarantee that tuples deleted just before this will be in the index.
*
- * We also set ActiveSnapshot to this snap, since functions in indexes
- * may need a snapshot.
+ * We also set ActiveSnapshot to this snap, since functions in indexes may
+ * need a snapshot.
*/
snapshot = CopySnapshot(GetTransactionSnapshot());
ActiveSnapshot = snapshot;
@@ -510,13 +510,13 @@ DefineIndex(RangeVar *heapRelation,
/*
* The index is now valid in the sense that it contains all currently
- * interesting tuples. But since it might not contain tuples deleted
- * just before the reference snap was taken, we have to wait out any
- * transactions older than the reference snap. We can do this by
- * waiting for each xact explicitly listed in the snap.
+ * interesting tuples. But since it might not contain tuples deleted just
+ * before the reference snap was taken, we have to wait out any
+ * transactions older than the reference snap. We can do this by waiting
+ * for each xact explicitly listed in the snap.
*
- * Note: GetSnapshotData() never stores our own xid into a snap,
- * hence we need not check for that.
+ * Note: GetSnapshotData() never stores our own xid into a snap, hence we
+ * need not check for that.
*/
for (ixcnt = 0; ixcnt < snapshot->xcnt; ixcnt++)
XactLockTableWait(snapshot->xip[ixcnt]);
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 5d77e056e1..4407e2785c 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.48 2006/07/18 17:42:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.49 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -128,7 +128,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
* A minimum expectation therefore is that the caller have execute
* privilege with grant option. Since we don't have a way to make the
* opclass go away if the grant option is revoked, we choose instead to
- * require ownership of the functions. It's also not entirely clear what
+ * require ownership of the functions. It's also not entirely clear what
* permissions should be required on the datatype, but ownership seems
* like a safe choice.
*
@@ -699,7 +699,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
opcID = OpclassnameGetOpcid(amID, opcname);
if (!OidIsValid(opcID))
{
- if (! stmt -> missing_ok )
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
@@ -708,10 +708,10 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
ereport(NOTICE,
(errmsg("operator class \"%s\" does not exist for access method \"%s\"",
opcname, stmt->amname)));
-
+
return;
}
-
+
tuple = SearchSysCache(CLAOID,
ObjectIdGetDatum(opcID),
0, 0, 0);
@@ -719,19 +719,19 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
if (!HeapTupleIsValid(tuple))
{
-
- if (! stmt->missing_ok )
+
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
else
ereport(NOTICE,
(errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
return;
}
-
+
opcID = HeapTupleGetOid(tuple);
/* Permission check: must own opclass or its namespace */
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index 2aed55ed45..76884e8cd8 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.32 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.33 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -214,14 +214,14 @@ RemoveOperator(RemoveFuncStmt *stmt)
operOid = LookupOperNameTypeNames(NULL, operatorName,
typeName1, typeName2,
stmt->missing_ok, -1);
-
- if (stmt->missing_ok &&!OidIsValid(operOid) )
- {
- ereport(NOTICE,
- (errmsg("operator %s does not exist, skipping",
- NameListToString(operatorName))));
- return;
- }
+
+ if (stmt->missing_ok && !OidIsValid(operOid))
+ {
+ ereport(NOTICE,
+ (errmsg("operator %s does not exist, skipping",
+ NameListToString(operatorName))));
+ return;
+ }
tup = SearchSysCache(OPEROID,
ObjectIdGetDatum(operOid),
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index 48833e7dc0..8907aac8a9 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.55 2006/09/07 22:52:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.56 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -108,8 +108,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params)
plan = copyObject(plan);
/*
- * XXX: debug_query_string is wrong here: the user might have
- * submitted multiple semicolon delimited queries.
+ * XXX: debug_query_string is wrong here: the user might have submitted
+ * multiple semicolon delimited queries.
*/
PortalDefineQuery(portal,
NULL,
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 46824a48e5..6b7c11a189 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -10,7 +10,7 @@
* Copyright (c) 2002-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.65 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.66 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -162,7 +162,7 @@ ExecuteQuery(ExecuteStmt *stmt, ParamListInfo params,
portal = CreateNewPortal();
/* Don't display the portal in pg_cursors, it is for internal use only */
portal->visible = false;
-
+
/*
* For CREATE TABLE / AS EXECUTE, make a copy of the stored query so that
* we can modify its destination (yech, but this has always been ugly).
@@ -251,7 +251,7 @@ EvaluateParams(EState *estate, List *params, List *argtypes)
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (nargs - 1) * sizeof(ParamExternData));
+ (nargs - 1) *sizeof(ParamExternData));
paramLI->numParams = nargs;
forboth(le, exprstates, la, argtypes)
@@ -674,22 +674,21 @@ ExplainExecuteQuery(ExplainStmt *stmt, ParamListInfo params,
Datum
pg_prepared_statement(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- HASH_SEQ_STATUS *hash_seq;
- PreparedStatement *prep_stmt;
+ FuncCallContext *funcctx;
+ HASH_SEQ_STATUS *hash_seq;
+ PreparedStatement *prep_stmt;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- TupleDesc tupdesc;
- MemoryContext oldcontext;
+ TupleDesc tupdesc;
+ MemoryContext oldcontext;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -704,9 +703,8 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
funcctx->user_fctx = NULL;
/*
- * build tupdesc for result tuples. This must match the
- * definition of the pg_prepared_statements view in
- * system_views.sql
+ * build tupdesc for result tuples. This must match the definition of
+ * the pg_prepared_statements view in system_views.sql
*/
tupdesc = CreateTemplateTupleDesc(5, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
@@ -735,21 +733,21 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
prep_stmt = hash_seq_search(hash_seq);
if (prep_stmt)
{
- Datum result;
- HeapTuple tuple;
- Datum values[5];
- bool nulls[5];
+ Datum result;
+ HeapTuple tuple;
+ Datum values[5];
+ bool nulls[5];
MemSet(nulls, 0, sizeof(nulls));
values[0] = DirectFunctionCall1(textin,
- CStringGetDatum(prep_stmt->stmt_name));
+ CStringGetDatum(prep_stmt->stmt_name));
if (prep_stmt->query_string == NULL)
nulls[1] = true;
else
values[1] = DirectFunctionCall1(textin,
- CStringGetDatum(prep_stmt->query_string));
+ CStringGetDatum(prep_stmt->query_string));
values[2] = TimestampTzGetDatum(prep_stmt->prepare_time);
values[3] = build_regtype_array(prep_stmt->argtype_list);
@@ -783,8 +781,8 @@ build_regtype_array(List *oid_list)
i = 0;
foreach(lc, oid_list)
{
- Oid oid;
- Datum oid_str;
+ Oid oid;
+ Datum oid_str;
oid = lfirst_oid(lc);
oid_str = DirectFunctionCall1(oidout, ObjectIdGetDatum(oid));
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index f91242470a..7906f587b7 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.68 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.69 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -395,15 +395,15 @@ DropProceduralLanguage(DropPLangStmt *stmt)
0, 0, 0);
if (!HeapTupleIsValid(langTup))
{
- if (! stmt->missing_ok)
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("language \"%s\" does not exist", languageName)));
- else
+ else
ereport(NOTICE,
- (errmsg("language \"%s\" does not exist, skipping",
+ (errmsg("language \"%s\" does not exist, skipping",
languageName)));
-
+
return;
}
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 865c2f60fe..9d769cb052 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.139 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.140 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -85,7 +85,7 @@ static Relation open_share_lock(SeqTable seq);
static void init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel);
static Form_pg_sequence read_info(SeqTable elm, Relation rel, Buffer *buf);
static void init_params(List *options, bool isInit,
- Form_pg_sequence new, List **owned_by);
+ Form_pg_sequence new, List **owned_by);
static void do_setval(Oid relid, int64 next, bool iscalled);
static void process_owned_by(Relation seqrel, List *owned_by);
@@ -862,7 +862,7 @@ open_share_lock(SeqTable seq)
static void
init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
{
- SeqTable elm;
+ SeqTable elm;
Relation seqrel;
/* Look to see if we already have a seqtable entry for relation */
@@ -1180,7 +1180,7 @@ process_owned_by(Relation seqrel, List *owned_by)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid OWNED BY option"),
- errhint("Specify OWNED BY table.column or OWNED BY NONE.")));
+ errhint("Specify OWNED BY table.column or OWNED BY NONE.")));
tablerel = NULL;
attnum = 0;
}
@@ -1209,7 +1209,7 @@ process_owned_by(Relation seqrel, List *owned_by)
if (seqrel->rd_rel->relowner != tablerel->rd_rel->relowner)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("sequence must have same owner as table it is owned by")));
+ errmsg("sequence must have same owner as table it is owned by")));
if (RelationGetNamespace(seqrel) != RelationGetNamespace(tablerel))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
@@ -1225,8 +1225,8 @@ process_owned_by(Relation seqrel, List *owned_by)
}
/*
- * OK, we are ready to update pg_depend. First remove any existing
- * AUTO dependencies for the sequence, then optionally add a new one.
+ * OK, we are ready to update pg_depend. First remove any existing AUTO
+ * dependencies for the sequence, then optionally add a new one.
*/
markSequenceUnowned(RelationGetRelid(seqrel));
@@ -1304,5 +1304,5 @@ seq_desc(StringInfo buf, uint8 xl_info, char *rec)
}
appendStringInfo(buf, "rel %u/%u/%u",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
}
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 45167b816a..04c2a03aa8 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.202 2006/09/04 21:15:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.203 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -166,7 +166,7 @@ static void MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
static bool change_varattnos_walker(Node *node, const AttrNumber *newattno);
static void StoreCatalogInheritance(Oid relationId, List *supers);
static void StoreCatalogInheritance1(Oid relationId, Oid parentOid,
- int16 seqNumber, Relation catalogRelation);
+ int16 seqNumber, Relation catalogRelation);
static int findAttrByName(const char *attributeName, List *schema);
static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
static void AlterIndexNamespaces(Relation classRel, Relation rel,
@@ -566,18 +566,18 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * In CASCADE mode, suck in all referencing relations as well. This
- * requires multiple iterations to find indirectly-dependent relations.
- * At each phase, we need to exclusive-lock new rels before looking
- * for their dependencies, else we might miss something. Also, we
- * check each rel as soon as we open it, to avoid a faux pas such as
- * holding lock for a long time on a rel we have no permissions for.
+ * In CASCADE mode, suck in all referencing relations as well. This
+ * requires multiple iterations to find indirectly-dependent relations. At
+ * each phase, we need to exclusive-lock new rels before looking for their
+ * dependencies, else we might miss something. Also, we check each rel as
+ * soon as we open it, to avoid a faux pas such as holding lock for a long
+ * time on a rel we have no permissions for.
*/
if (stmt->behavior == DROP_CASCADE)
{
for (;;)
{
- List *newrelids;
+ List *newrelids;
newrelids = heap_truncate_find_FKs(relids);
if (newrelids == NIL)
@@ -585,7 +585,7 @@ ExecuteTruncate(TruncateStmt *stmt)
foreach(cell, newrelids)
{
- Oid relid = lfirst_oid(cell);
+ Oid relid = lfirst_oid(cell);
Relation rel;
rel = heap_open(relid, AccessExclusiveLock);
@@ -601,8 +601,8 @@ ExecuteTruncate(TruncateStmt *stmt)
/*
* Check foreign key references. In CASCADE mode, this should be
- * unnecessary since we just pulled in all the references; but as
- * a cross-check, do it anyway if in an Assert-enabled build.
+ * unnecessary since we just pulled in all the references; but as a
+ * cross-check, do it anyway if in an Assert-enabled build.
*/
#ifdef USE_ASSERT_CHECKING
heap_truncate_check_FKs(rels, false);
@@ -612,9 +612,9 @@ ExecuteTruncate(TruncateStmt *stmt)
#endif
/*
- * Also check for pending AFTER trigger events on the target relations.
- * We can't just leave those be, since they will try to fetch tuples
- * that the TRUNCATE removes.
+ * Also check for pending AFTER trigger events on the target relations. We
+ * can't just leave those be, since they will try to fetch tuples that the
+ * TRUNCATE removes.
*/
AfterTriggerCheckTruncate(relids);
@@ -657,7 +657,7 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
+ * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
*/
static void
truncate_check_rel(Relation rel)
@@ -681,9 +681,8 @@ truncate_check_rel(Relation rel)
RelationGetRelationName(rel))));
/*
- * We can never allow truncation of shared or nailed-in-cache
- * relations, because we can't support changing their relfilenode
- * values.
+ * We can never allow truncation of shared or nailed-in-cache relations,
+ * because we can't support changing their relfilenode values.
*/
if (rel->rd_rel->relisshared || rel->rd_isnailed)
ereport(ERROR,
@@ -692,13 +691,13 @@ truncate_check_rel(Relation rel)
RelationGetRelationName(rel))));
/*
- * Don't allow truncate on temp tables of other backends ... their
- * local buffer manager is not going to cope.
+ * Don't allow truncate on temp tables of other backends ... their local
+ * buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temporary tables of other sessions")));
+ errmsg("cannot truncate temporary tables of other sessions")));
}
/*----------
@@ -1141,16 +1140,20 @@ change_varattnos_of_a_node(Node *node, const AttrNumber *newattno)
AttrNumber *
varattnos_map(TupleDesc old, TupleDesc new)
{
- int i,j;
- AttrNumber *attmap = palloc0(sizeof(AttrNumber)*old->natts);
- for (i=1; i <= old->natts; i++) {
- if (old->attrs[i-1]->attisdropped) {
- attmap[i-1] = 0;
+ int i,
+ j;
+ AttrNumber *attmap = palloc0(sizeof(AttrNumber) * old->natts);
+
+ for (i = 1; i <= old->natts; i++)
+ {
+ if (old->attrs[i - 1]->attisdropped)
+ {
+ attmap[i - 1] = 0;
continue;
}
- for (j=1; j<= new->natts; j++)
- if (!strcmp(NameStr(old->attrs[i-1]->attname), NameStr(new->attrs[j-1]->attname)))
- attmap[i-1] = j;
+ for (j = 1; j <= new->natts; j++)
+ if (!strcmp(NameStr(old->attrs[i - 1]->attname), NameStr(new->attrs[j - 1]->attname)))
+ attmap[i - 1] = j;
}
return attmap;
}
@@ -1160,16 +1163,19 @@ varattnos_map(TupleDesc old, TupleDesc new)
* ColumnDefs
*/
AttrNumber *
-varattnos_map_schema(TupleDesc old, List *schema)
+varattnos_map_schema(TupleDesc old, List *schema)
{
- int i;
- AttrNumber *attmap = palloc0(sizeof(AttrNumber)*old->natts);
- for (i=1; i <= old->natts; i++) {
- if (old->attrs[i-1]->attisdropped) {
- attmap[i-1] = 0;
+ int i;
+ AttrNumber *attmap = palloc0(sizeof(AttrNumber) * old->natts);
+
+ for (i = 1; i <= old->natts; i++)
+ {
+ if (old->attrs[i - 1]->attisdropped)
+ {
+ attmap[i - 1] = 0;
continue;
}
- attmap[i-1] = findAttrByName(NameStr(old->attrs[i-1]->attname), schema);
+ attmap[i - 1] = findAttrByName(NameStr(old->attrs[i - 1]->attname), schema);
}
return attmap;
}
@@ -1244,14 +1250,14 @@ StoreCatalogInheritance(Oid relationId, List *supers)
static void
StoreCatalogInheritance1(Oid relationId, Oid parentOid,
- int16 seqNumber, Relation relation)
+ int16 seqNumber, Relation relation)
{
- Datum datum[Natts_pg_inherits];
- char nullarr[Natts_pg_inherits];
- ObjectAddress childobject,
- parentobject;
- HeapTuple tuple;
- TupleDesc desc = RelationGetDescr(relation);
+ Datum datum[Natts_pg_inherits];
+ char nullarr[Natts_pg_inherits];
+ ObjectAddress childobject,
+ parentobject;
+ HeapTuple tuple;
+ TupleDesc desc = RelationGetDescr(relation);
datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */
@@ -2100,8 +2106,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
ATPrepSetTableSpace(tab, rel, cmd->name);
pass = AT_PASS_MISC; /* doesn't actually matter */
break;
- case AT_SetRelOptions: /* SET (...) */
- case AT_ResetRelOptions: /* RESET (...) */
+ case AT_SetRelOptions: /* SET (...) */
+ case AT_ResetRelOptions: /* RESET (...) */
ATSimplePermissionsRelationOrIndex(rel);
/* This command never recurses */
/* No command-specific prep needed */
@@ -2274,10 +2280,10 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd)
* Nothing to do here; Phase 3 does the work
*/
break;
- case AT_SetRelOptions: /* SET (...) */
+ case AT_SetRelOptions: /* SET (...) */
ATExecSetRelOptions(rel, (List *) cmd->def, false);
break;
- case AT_ResetRelOptions: /* RESET (...) */
+ case AT_ResetRelOptions: /* RESET (...) */
ATExecSetRelOptions(rel, (List *) cmd->def, true);
break;
case AT_EnableTrig: /* ENABLE TRIGGER name */
@@ -2564,8 +2570,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
/*
* If we are rebuilding the tuples OR if we added any new NOT NULL
* constraints, check all not-null constraints. This is a bit of
- * overkill but it minimizes risk of bugs, and heap_attisnull is
- * a pretty cheap test anyway.
+ * overkill but it minimizes risk of bugs, and heap_attisnull is a
+ * pretty cheap test anyway.
*/
for (i = 0; i < newTupDesc->natts; i++)
{
@@ -2679,13 +2685,13 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
foreach(l, notnull_attrs)
{
- int attn = lfirst_int(l);
+ int attn = lfirst_int(l);
- if (heap_attisnull(tuple, attn+1))
+ if (heap_attisnull(tuple, attn + 1))
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" contains null values",
- NameStr(newTupDesc->attrs[attn]->attname))));
+ NameStr(newTupDesc->attrs[attn]->attname))));
}
foreach(l, tab->constraints)
@@ -5105,7 +5111,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
if (!list_member_oid(tab->changedConstraintOids,
foundObject.objectId))
{
- char *defstring = pg_get_constraintdef_string(foundObject.objectId);
+ char *defstring = pg_get_constraintdef_string(foundObject.objectId);
/*
* Put NORMAL dependencies at the front of the list and
@@ -5302,10 +5308,10 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab)
/*
* Now we can drop the existing constraints and indexes --- constraints
* first, since some of them might depend on the indexes. In fact, we
- * have to delete FOREIGN KEY constraints before UNIQUE constraints,
- * but we already ordered the constraint list to ensure that would happen.
- * It should be okay to use DROP_RESTRICT here, since nothing else should
- * be depending on these objects.
+ * have to delete FOREIGN KEY constraints before UNIQUE constraints, but
+ * we already ordered the constraint list to ensure that would happen. It
+ * should be okay to use DROP_RESTRICT here, since nothing else should be
+ * depending on these objects.
*/
foreach(l, tab->changedConstraintOids)
{
@@ -5482,17 +5488,17 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing)
tuple_class->relowner != newOwnerId)
{
/* if it's an owned sequence, disallow changing it by itself */
- Oid tableId;
- int32 colId;
+ Oid tableId;
+ int32 colId;
if (sequenceIsOwned(relationOid, &tableId, &colId))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot change owner of sequence \"%s\"",
NameStr(tuple_class->relname)),
- errdetail("Sequence \"%s\" is linked to table \"%s\".",
- NameStr(tuple_class->relname),
- get_rel_name(tableId))));
+ errdetail("Sequence \"%s\" is linked to table \"%s\".",
+ NameStr(tuple_class->relname),
+ get_rel_name(tableId))));
}
break;
case RELKIND_TOASTVALUE:
@@ -6051,12 +6057,12 @@ ATExecEnableDisableTrigger(Relation rel, char *trigname,
}
static char *
-decompile_conbin(HeapTuple contup, TupleDesc tupdesc)
+decompile_conbin(HeapTuple contup, TupleDesc tupdesc)
{
- Form_pg_constraint con;
- bool isnull;
- Datum attr;
- Datum expr;
+ Form_pg_constraint con;
+ bool isnull;
+ Datum attr;
+ Datum expr;
con = (Form_pg_constraint) GETSTRUCT(contup);
attr = heap_getattr(contup, Anum_pg_constraint_conbin, tupdesc, &isnull);
@@ -6107,7 +6113,7 @@ ATExecAddInherits(Relation child_rel, RangeVar *parent)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs",
- RelationGetRelationName(child_rel), parent->relname)));
+ RelationGetRelationName(child_rel), parent->relname)));
/*
* Don't allow any duplicates in the list of parents. We scan through the
@@ -6140,8 +6146,8 @@ ATExecAddInherits(Relation child_rel, RangeVar *parent)
heap_close(catalogRelation, RowExclusiveLock);
/*
- * If the new parent is found in our list of inheritors, we have a circular
- * structure
+ * If the new parent is found in our list of inheritors, we have a
+ * circular structure
*/
children = find_all_inheritors(RelationGetRelid(child_rel));
@@ -6183,12 +6189,12 @@ ATExecAddInherits(Relation child_rel, RangeVar *parent)
static void
MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
{
- Relation attrdesc;
- AttrNumber parent_attno;
- int parent_natts;
- TupleDesc tupleDesc;
+ Relation attrdesc;
+ AttrNumber parent_attno;
+ int parent_natts;
+ TupleDesc tupleDesc;
TupleConstr *constr;
- HeapTuple tuple;
+ HeapTuple tuple;
tupleDesc = RelationGetDescr(parent_rel);
parent_natts = tupleDesc->natts;
@@ -6221,13 +6227,13 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- RelationGetRelationName(child_rel), NameStr(attribute->attname))));
+ RelationGetRelationName(child_rel), NameStr(attribute->attname))));
if (attribute->attnotnull && !childatt->attnotnull)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column \"%s\" in child table must be NOT NULL",
- NameStr(attribute->attname))));
+ errmsg("column \"%s\" in child table must be NOT NULL",
+ NameStr(attribute->attname))));
childatt->attinhcount++;
simple_heap_update(attrdesc, &tuple->t_self, tuple);
@@ -6555,13 +6561,13 @@ AlterTableNamespace(RangeVar *relation, const char *newschema)
/* if it's an owned sequence, disallow moving it by itself */
if (rel->rd_rel->relkind == RELKIND_SEQUENCE)
{
- Oid tableId;
- int32 colId;
+ Oid tableId;
+ int32 colId;
if (sequenceIsOwned(relid, &tableId, &colId))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move an owned sequence into another schema"),
+ errmsg("cannot move an owned sequence into another schema"),
errdetail("Sequence \"%s\" is linked to table \"%s\".",
RelationGetRelationName(rel),
get_rel_name(tableId))));
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index e53ae3b61b..50d5e7d84e 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.38 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.39 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -402,7 +402,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
if (!HeapTupleIsValid(tuple))
{
- if ( ! stmt->missing_ok )
+ if (!stmt->missing_ok)
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -481,8 +481,8 @@ DropTableSpace(DropTableSpaceStmt *stmt)
}
/*
- * Note: because we checked that the tablespace was empty, there should
- * be no need to worry about flushing shared buffers or free space map
+ * Note: because we checked that the tablespace was empty, there should be
+ * no need to worry about flushing shared buffers or free space map
* entries for relations in the tablespace.
*/
@@ -1069,7 +1069,7 @@ tblspc_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_tblspc_create_rec *xlrec = (xl_tblspc_create_rec *) rec;
appendStringInfo(buf, "create ts: %u \"%s\"",
- xlrec->ts_id, xlrec->ts_path);
+ xlrec->ts_id, xlrec->ts_path);
}
else if (info == XLOG_TBLSPC_DROP)
{
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 6d63356e10..1ed15614ce 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.208 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.209 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -480,7 +480,7 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior,
if (!HeapTupleIsValid(tup))
{
- if (! missing_ok)
+ if (!missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("trigger \"%s\" for table \"%s\" does not exist",
@@ -856,8 +856,8 @@ RelationBuildTriggers(Relation relation)
/*
* Note: since we scan the triggers using TriggerRelidNameIndexId, we will
* be reading the triggers in name order, except possibly during
- * emergency-recovery operations (ie, IgnoreSystemIndexes). This in
- * turn ensures that triggers will be fired in name order.
+ * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
+ * ensures that triggers will be fired in name order.
*/
ScanKeyInit(&skey,
Anum_pg_trigger_tgrelid,
@@ -2940,26 +2940,30 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
- constraint->catalogname, constraint->schemaname,
+ constraint->catalogname, constraint->schemaname,
constraint->relname)));
}
- /*
+ /*
* If we're given the schema name with the constraint, look only
- * in that schema. If given a bare constraint name, use the
+ * in that schema. If given a bare constraint name, use the
* search path to find the first matching constraint.
*/
- if (constraint->schemaname) {
- Oid namespaceId = LookupExplicitNamespace(constraint->schemaname);
+ if (constraint->schemaname)
+ {
+ Oid namespaceId = LookupExplicitNamespace(constraint->schemaname);
+
namespaceSearchList = list_make1_oid(namespaceId);
- } else {
+ }
+ else
+ {
namespaceSearchList = fetch_search_path(true);
}
found = false;
foreach(namespaceSearchCell, namespaceSearchList)
{
- Oid searchNamespaceId = lfirst_oid(namespaceSearchCell);
+ Oid searchNamespaceId = lfirst_oid(namespaceSearchCell);
/*
* Setup to scan pg_trigger by tgconstrname ...
@@ -2978,18 +2982,18 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
{
Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
- Oid constraintNamespaceId;
+ Oid constraintNamespaceId;
/*
* Foreign key constraints have triggers on both the
- * parent and child tables. Since these tables may be
- * in different schemas we must pick the child table
- * because that table "owns" the constraint.
+ * parent and child tables. Since these tables may be in
+ * different schemas we must pick the child table because
+ * that table "owns" the constraint.
*
* Referential triggers on the parent table other than
- * NOACTION_DEL and NOACTION_UPD are ignored below, so
- * it is possible to not check them here, but it seems
- * safer to always check.
+ * NOACTION_DEL and NOACTION_UPD are ignored below, so it
+ * is possible to not check them here, but it seems safer
+ * to always check.
*/
if (pg_trigger->tgfoid == F_RI_FKEY_NOACTION_DEL ||
pg_trigger->tgfoid == F_RI_FKEY_NOACTION_UPD ||
@@ -3006,16 +3010,16 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
constraintNamespaceId = get_rel_namespace(pg_trigger->tgrelid);
/*
- * If this constraint is not in the schema we're
- * currently searching for, keep looking.
+ * If this constraint is not in the schema we're currently
+ * searching for, keep looking.
*/
if (constraintNamespaceId != searchNamespaceId)
continue;
/*
- * If we found some, check that they fit the deferrability but
- * skip referential action ones, since they are silently never
- * deferrable.
+ * If we found some, check that they fit the deferrability
+ * but skip referential action ones, since they are
+ * silently never deferrable.
*/
if (pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_DEL &&
@@ -3029,8 +3033,8 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
if (stmt->deferred && !pg_trigger->tgdeferrable)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("constraint \"%s\" is not deferrable",
- constraint->relname)));
+ errmsg("constraint \"%s\" is not deferrable",
+ constraint->relname)));
oidlist = lappend_oid(oidlist, HeapTupleGetOid(htup));
}
found = true;
@@ -3147,9 +3151,9 @@ AfterTriggerCheckTruncate(List *relids)
event = event->ate_next)
{
/*
- * We can ignore completed events. (Even if a DONE flag is rolled
- * back by subxact abort, it's OK because the effects of the
- * TRUNCATE must get rolled back too.)
+ * We can ignore completed events. (Even if a DONE flag is rolled
+ * back by subxact abort, it's OK because the effects of the TRUNCATE
+ * must get rolled back too.)
*/
if (event->ate_event & AFTER_TRIGGER_DONE)
continue;
@@ -3162,10 +3166,9 @@ AfterTriggerCheckTruncate(List *relids)
}
/*
- * Also scan events queued by incomplete queries. This could only
- * matter if a TRUNCATE is executed by a function or trigger within
- * an updating query on the same relation, which is pretty perverse,
- * but let's check.
+ * Also scan events queued by incomplete queries. This could only matter
+ * if a TRUNCATE is executed by a function or trigger within an updating
+ * query on the same relation, which is pretty perverse, but let's check.
*/
for (depth = 0; depth <= afterTriggers->query_depth; depth++)
{
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 6dfa6296d5..53cca73a9d 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.96 2006/07/31 20:09:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.97 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -343,12 +343,12 @@ DefineType(List *names, List *parameters)
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
/*
- * Check permissions on functions. We choose to require the creator/owner
- * of a type to also own the underlying functions. Since creating a type
+ * Check permissions on functions. We choose to require the creator/owner
+ * of a type to also own the underlying functions. Since creating a type
* is tantamount to granting public execute access on the functions, the
- * minimum sane check would be for execute-with-grant-option. But we don't
- * have a way to make the type go away if the grant option is revoked, so
- * ownership seems better.
+ * minimum sane check would be for execute-with-grant-option. But we
+ * don't have a way to make the type go away if the grant option is
+ * revoked, so ownership seems better.
*/
if (inputOid && !pg_proc_ownercheck(inputOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
@@ -587,8 +587,8 @@ DefineDomain(CreateDomainStmt *stmt)
/*
* Base type must be a plain base type or another domain. Domains over
- * pseudotypes would create a security hole. Domains over composite
- * types might be made to work in the future, but not today.
+ * pseudotypes would create a security hole. Domains over composite types
+ * might be made to work in the future, but not today.
*/
typtype = baseType->typtype;
if (typtype != 'b' && typtype != 'd')
@@ -1840,7 +1840,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in check constraint")));
+ errmsg("cannot use aggregate function in check constraint")));
/*
* Convert to string form for storage.
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 589111713b..2e9b27a598 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.173 2006/07/13 16:49:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.174 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -377,8 +377,8 @@ CreateRole(CreateRoleStmt *stmt)
GetUserId(), false);
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authid_rel, NoLock);
@@ -696,8 +696,8 @@ AlterRole(AlterRoleStmt *stmt)
false);
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authid_rel, NoLock);
@@ -845,7 +845,7 @@ DropRole(DropRoleStmt *stmt)
else
{
ereport(NOTICE,
- (errmsg("role \"%s\" does not exist, skipping",
+ (errmsg("role \"%s\" does not exist, skipping",
role)));
}
@@ -1075,8 +1075,8 @@ RenameRole(const char *oldname, const char *newname)
ReleaseSysCache(oldtuple);
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(rel, NoLock);
@@ -1132,8 +1132,8 @@ GrantRole(GrantRoleStmt *stmt)
}
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authid_rel, NoLock);
@@ -1149,7 +1149,7 @@ GrantRole(GrantRoleStmt *stmt)
* Drop the objects owned by a given list of roles.
*/
void
-DropOwnedObjects(DropOwnedStmt * stmt)
+DropOwnedObjects(DropOwnedStmt *stmt)
{
List *role_ids = roleNamesToIds(stmt->roles);
ListCell *cell;
@@ -1175,7 +1175,7 @@ DropOwnedObjects(DropOwnedStmt * stmt)
* Give the objects owned by a given list of roles away to another user.
*/
void
-ReassignOwnedObjects(ReassignOwnedStmt * stmt)
+ReassignOwnedObjects(ReassignOwnedStmt *stmt)
{
List *role_ids = roleNamesToIds(stmt->roles);
ListCell *cell;
@@ -1360,8 +1360,8 @@ AddRoleMems(const char *rolename, Oid roleid,
}
/*
- * Close pg_authmem, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authmem, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authmem_rel, NoLock);
}
@@ -1473,8 +1473,8 @@ DelRoleMems(const char *rolename, Oid roleid,
}
/*
- * Close pg_authmem, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authmem, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authmem_rel, NoLock);
}
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 5a6d5a04b0..e9f0bf363e 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.340 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.341 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -125,7 +125,7 @@ typedef struct VRelStats
Size min_tlen;
Size max_tlen;
bool hasindex;
- TransactionId minxid; /* Minimum Xid present anywhere on table */
+ TransactionId minxid; /* Minimum Xid present anywhere on table */
/* vtlinks array for tuple chain following - sorted by new_tid */
int num_vtlinks;
VTupleLink vtlinks;
@@ -238,7 +238,7 @@ static int vac_cmp_blk(const void *left, const void *right);
static int vac_cmp_offno(const void *left, const void *right);
static int vac_cmp_vtlinks(const void *left, const void *right);
static bool enough_space(VacPage vacpage, Size len);
-static Size PageGetFreeSpaceWithFillFactor(Relation relation, Page page);
+static Size PageGetFreeSpaceWithFillFactor(Relation relation, Page page);
/****************************************************************************
@@ -320,8 +320,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
errhint("Use VACUUM FULL, then VACUUM FREEZE.")));
/*
- * Send info about dead objects to the statistics collector, unless
- * we are in autovacuum --- autovacuum.c does this for itself.
+ * Send info about dead objects to the statistics collector, unless we are
+ * in autovacuum --- autovacuum.c does this for itself.
*/
if (vacstmt->vacuum && !IsAutoVacuumProcess())
pgstat_vacuum_tabstat();
@@ -481,20 +481,21 @@ vacuum(VacuumStmt *vacstmt, List *relids)
* PostgresMain().
*/
StartTransactionCommand();
+
/*
- * Re-establish the transaction snapshot. This is wasted effort
- * when we are called as a normal utility command, because the
- * new transaction will be dropped immediately by PostgresMain();
- * but it's necessary if we are called from autovacuum because
- * autovacuum might continue on to do an ANALYZE-only call.
+ * Re-establish the transaction snapshot. This is wasted effort when
+ * we are called as a normal utility command, because the new
+ * transaction will be dropped immediately by PostgresMain(); but it's
+ * necessary if we are called from autovacuum because autovacuum might
+ * continue on to do an ANALYZE-only call.
*/
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
}
if (vacstmt->vacuum)
{
- TransactionId minxid,
- vacuumxid;
+ TransactionId minxid,
+ vacuumxid;
/*
* If it was a database-wide VACUUM, print FSM usage statistics (we
@@ -593,9 +594,9 @@ vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel,
TransactionId limit;
/*
- * We can always ignore processes running lazy vacuum. This is because we
+ * We can always ignore processes running lazy vacuum. This is because we
* use these values only for deciding which tuples we must keep in the
- * tables. Since lazy vacuum doesn't write its xid to the table, it's
+ * tables. Since lazy vacuum doesn't write its xid to the table, it's
* safe to ignore it. In theory it could be problematic to ignore lazy
* vacuums on a full vacuum, but keep in mind that only one vacuum process
* can be working on a particular table at any time, and that each vacuum
@@ -704,6 +705,7 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
pgcform->relhasindex = hasindex;
dirty = true;
}
+
/*
* If we have discovered that there are no indexes, then there's no
* primary key either. This could be done more thoroughly...
@@ -740,11 +742,11 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
/*
* vac_update_dbminxid() -- update the minimum Xid present in one database
*
- * Update pg_database's datminxid and datvacuumxid, and the flat-file copy
- * of it. datminxid is updated to the minimum of all relminxid found in
- * pg_class. datvacuumxid is updated to the minimum of all relvacuumxid
- * found in pg_class. The values are also returned in minxid and
- * vacuumxid, respectively.
+ * Update pg_database's datminxid and datvacuumxid, and the flat-file copy
+ * of it. datminxid is updated to the minimum of all relminxid found in
+ * pg_class. datvacuumxid is updated to the minimum of all relvacuumxid
+ * found in pg_class. The values are also returned in minxid and
+ * vacuumxid, respectively.
*
* We violate transaction semantics here by overwriting the database's
* existing pg_database tuple with the new values. This is reasonably
@@ -760,15 +762,15 @@ vac_update_dbminxid(Oid dbid, TransactionId *minxid, TransactionId *vacuumxid)
HeapTuple tuple;
Form_pg_database dbform;
Relation relation;
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple classTup;
- TransactionId newMinXid = InvalidTransactionId;
- TransactionId newVacXid = InvalidTransactionId;
+ TransactionId newMinXid = InvalidTransactionId;
+ TransactionId newVacXid = InvalidTransactionId;
bool dirty = false;
- /*
- * We must seqscan pg_class to find the minimum Xid, because there
- * is no index that can help us here.
+ /*
+ * We must seqscan pg_class to find the minimum Xid, because there is no
+ * index that can help us here.
*/
relation = heap_open(RelationRelationId, AccessShareLock);
@@ -845,7 +847,7 @@ vac_update_dbminxid(Oid dbid, TransactionId *minxid, TransactionId *vacuumxid)
*vacuumxid = newVacXid;
/* Mark the flat-file copy of pg_database for update at commit */
- database_file_update_needed();
+ database_file_update_needed();
}
@@ -970,14 +972,14 @@ vac_truncate_clog(TransactionId myminxid, TransactionId myvacxid)
* XXX -- the test we use here is fairly arbitrary. Note that in the
* autovacuum database-wide code, a template database is always processed
* with VACUUM FREEZE, so we can be sure that it will be truly frozen so
- * it won't be need to be processed here again soon.
+ * it won't be need to be processed here again soon.
*
* FIXME -- here we could get into a kind of loop if the database being
* chosen is not actually a template database, because we'll not freeze
* it, so its age may not really decrease if there are any live
* non-freezable tuples. Consider forcing a vacuum freeze if autovacuum
- * is invoked by a backend. On the other hand, forcing a vacuum freeze
- * on a user database may not a be a very polite thing to do.
+ * is invoked by a backend. On the other hand, forcing a vacuum freeze on
+ * a user database may not a be a very polite thing to do.
*/
if (!AutoVacuumingActive() && age > (int32) ((MaxTransactionId >> 3) * 3))
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC);
@@ -1022,18 +1024,18 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
else
{
/*
- * During a lazy VACUUM we do not run any user-supplied functions,
- * and so it should be safe to not create a transaction snapshot.
+ * During a lazy VACUUM we do not run any user-supplied functions, and
+ * so it should be safe to not create a transaction snapshot.
*
* We can furthermore set the inVacuum flag, which lets other
* concurrent VACUUMs know that they can ignore this one while
* determining their OldestXmin. (The reason we don't set inVacuum
* during a full VACUUM is exactly that we may have to run user-
- * defined functions for functional indexes, and we want to make
- * sure that if they use the snapshot set above, any tuples it
- * requires can't get removed from other tables. An index function
- * that depends on the contents of other tables is arguably broken,
- * but we won't break it here by violating transaction semantics.)
+ * defined functions for functional indexes, and we want to make sure
+ * that if they use the snapshot set above, any tuples it requires
+ * can't get removed from other tables. An index function that
+ * depends on the contents of other tables is arguably broken, but we
+ * won't break it here by violating transaction semantics.)
*
* Note: the inVacuum flag remains set until CommitTransaction or
* AbortTransaction. We don't want to clear it until we reset
@@ -1059,8 +1061,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/*
* Open the relation and get the appropriate lock on it.
*
- * There's a race condition here: the rel may have gone away since
- * the last time we saw it. If so, we don't need to vacuum it.
+ * There's a race condition here: the rel may have gone away since the
+ * last time we saw it. If so, we don't need to vacuum it.
*/
onerel = try_relation_open(relid, lmode);
@@ -1116,7 +1118,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
{
relation_close(onerel, lmode);
CommitTransactionCommand();
- return; /* assume no long-lived data in temp tables */
+ return; /* assume no long-lived data in temp tables */
}
/*
@@ -1207,7 +1209,7 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
i;
VRelStats *vacrelstats;
TransactionId FreezeLimit,
- OldestXmin;
+ OldestXmin;
vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
&OldestXmin, &FreezeLimit);
@@ -1221,13 +1223,13 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
vacrelstats->hasindex = false;
/*
- * Set initial minimum Xid, which will be updated if a smaller Xid is found
- * in the relation by scan_heap.
+ * Set initial minimum Xid, which will be updated if a smaller Xid is
+ * found in the relation by scan_heap.
*
* We use RecentXmin here (the minimum Xid that belongs to a transaction
* that is still open according to our snapshot), because it is the
- * earliest transaction that could insert new tuples in the table after our
- * VACUUM is done.
+ * earliest transaction that could insert new tuples in the table after
+ * our VACUUM is done.
*/
vacrelstats->minxid = RecentXmin;
@@ -1557,7 +1559,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
}
else
{
- TransactionId min;
+ TransactionId min;
num_tuples += 1;
notup = false;
@@ -1566,7 +1568,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
if (tuple.t_len > max_tlen)
max_tlen = tuple.t_len;
- /*
+ /*
* If the tuple is alive, we consider it for the "minxid"
* calculations.
*/
@@ -1710,23 +1712,23 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
TransactionId
vactuple_get_minxid(HeapTuple tuple)
{
- TransactionId min = InvalidTransactionId;
+ TransactionId min = InvalidTransactionId;
- /*
- * Initialize calculations with Xmin. NB -- may be FrozenXid and
- * we don't want that one.
+ /*
+ * Initialize calculations with Xmin. NB -- may be FrozenXid and we don't
+ * want that one.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple->t_data)))
min = HeapTupleHeaderGetXmin(tuple->t_data);
/*
* If Xmax is not marked INVALID, we assume it's valid without making
- * further checks on it --- it must be recently obsoleted or still running,
- * else HeapTupleSatisfiesVacuum would have deemed it removable.
+ * further checks on it --- it must be recently obsoleted or still
+ * running, else HeapTupleSatisfiesVacuum would have deemed it removable.
*/
if (!(tuple->t_data->t_infomask | HEAP_XMAX_INVALID))
{
- TransactionId xmax = HeapTupleHeaderGetXmax(tuple->t_data);
+ TransactionId xmax = HeapTupleHeaderGetXmax(tuple->t_data);
/* If xmax is a plain Xid, consider it by itself */
if (!(tuple->t_data->t_infomask | HEAP_XMAX_IS_MULTI))
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index c839b951d9..c89dc20404 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -36,7 +36,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.79 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.80 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -90,7 +90,7 @@ typedef struct LVRelStats
int num_free_pages; /* current # of entries */
int max_free_pages; /* # slots allocated in array */
PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
- BlockNumber tot_free_pages; /* total pages with >= threshold space */
+ BlockNumber tot_free_pages; /* total pages with >= threshold space */
} LVRelStats;
@@ -103,15 +103,15 @@ static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
TransactionId OldestXmin);
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
static void lazy_vacuum_index(Relation indrel,
- IndexBulkDeleteResult **stats,
- LVRelStats *vacrelstats);
+ IndexBulkDeleteResult **stats,
+ LVRelStats *vacrelstats);
static void lazy_cleanup_index(Relation indrel,
- IndexBulkDeleteResult *stats,
- LVRelStats *vacrelstats);
+ IndexBulkDeleteResult *stats,
+ LVRelStats *vacrelstats);
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
int tupindex, LVRelStats *vacrelstats);
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats,
- TransactionId OldestXmin);
+ TransactionId OldestXmin);
static BlockNumber count_nondeletable_pages(Relation onerel,
LVRelStats *vacrelstats, TransactionId OldestXmin);
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
@@ -143,7 +143,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
int nindexes;
BlockNumber possibly_freeable;
TransactionId OldestXmin,
- FreezeLimit;
+ FreezeLimit;
if (vacstmt->verbose)
elevel = INFO;
@@ -160,8 +160,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
vacrelstats->threshold = GetAvgFSMRequestSize(&onerel->rd_node);
/*
- * Set initial minimum Xid, which will be updated if a smaller Xid is found
- * in the relation by lazy_scan_heap.
+ * Set initial minimum Xid, which will be updated if a smaller Xid is
+ * found in the relation by lazy_scan_heap.
*
* We use RecentXmin here (the minimum Xid that belongs to a transaction
* that is still open according to our snapshot), because it is the
@@ -440,7 +440,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
num_tuples += 1;
hastup = true;
- /*
+ /*
* If the tuple is alive, we consider it for the "minxid"
* calculations.
*/
@@ -472,8 +472,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* If we remembered any tuples for deletion, then the page will be
* visited again by lazy_vacuum_heap, which will compute and record
* its post-compaction free space. If not, then we're done with this
- * page, so remember its free space as-is. (This path will always
- * be taken if there are no indexes.)
+ * page, so remember its free space as-is. (This path will always be
+ * taken if there are no indexes.)
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
{
@@ -803,11 +803,12 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats,
}
}
vacrelstats->num_free_pages = j;
+
/*
* If tot_free_pages was more than num_free_pages, we can't tell for sure
* what its correct value is now, because we don't know which of the
- * forgotten pages are getting truncated. Conservatively set it equal
- * to num_free_pages.
+ * forgotten pages are getting truncated. Conservatively set it equal to
+ * num_free_pages.
*/
vacrelstats->tot_free_pages = j;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 2e270cde56..c910f6376f 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.118 2006/07/14 14:52:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.119 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -585,7 +585,7 @@ assign_client_encoding(const char *value, bool doit, GucSource source)
* limit on names, so we can tell whether we're being passed an initial
* role name or a saved/restored value. (NOTE: we rely on guc.c to have
* properly truncated any incoming value, but not to truncate already-stored
- * values. See GUC_IS_NAME processing.)
+ * values. See GUC_IS_NAME processing.)
*/
extern char *session_authorization_string; /* in guc.c */
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index df7f479f31..a79bd3cd4f 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.97 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.98 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,7 +119,7 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
def->colname = pstrdup(tle->resname);
def->typename = makeTypeNameFromOid(exprType((Node *) tle->expr),
- exprTypmod((Node *) tle->expr));
+ exprTypmod((Node *) tle->expr));
def->inhcount = 0;
def->is_local = true;
def->is_not_null = false;